1 /*
2 * Copyright (c) 2012, Microsoft Corporation.
3 *
4 * Author:
5 * K. Y. Srinivasan <kys@microsoft.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 *
17 */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/mman.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/kthread.h>
29 #include <linux/completion.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/memory.h>
32 #include <linux/notifier.h>
33 #include <linux/percpu_counter.h>
34
35 #include <linux/hyperv.h>
36
37 #define CREATE_TRACE_POINTS
38 #include "hv_trace_balloon.h"
39
40 /*
41 * We begin with definitions supporting the Dynamic Memory protocol
42 * with the host.
43 *
44 * Begin protocol definitions.
45 */
46
47
48
49 /*
50 * Protocol versions. The low word is the minor version, the high word the major
51 * version.
52 *
53 * History:
54 * Initial version 1.0
55 * Changed to 0.1 on 2009/03/25
56 * Changes to 0.2 on 2009/05/14
57 * Changes to 0.3 on 2009/12/03
58 * Changed to 1.0 on 2011/04/05
59 */
60
61 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
62 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
63 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
64
65 enum {
66 DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
67 DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
68 DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
69
70 DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
71 DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
72 DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
73
74 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
75 };
76
77
78
79 /*
80 * Message Types
81 */
82
83 enum dm_message_type {
84 /*
85 * Version 0.3
86 */
87 DM_ERROR = 0,
88 DM_VERSION_REQUEST = 1,
89 DM_VERSION_RESPONSE = 2,
90 DM_CAPABILITIES_REPORT = 3,
91 DM_CAPABILITIES_RESPONSE = 4,
92 DM_STATUS_REPORT = 5,
93 DM_BALLOON_REQUEST = 6,
94 DM_BALLOON_RESPONSE = 7,
95 DM_UNBALLOON_REQUEST = 8,
96 DM_UNBALLOON_RESPONSE = 9,
97 DM_MEM_HOT_ADD_REQUEST = 10,
98 DM_MEM_HOT_ADD_RESPONSE = 11,
99 DM_VERSION_03_MAX = 11,
100 /*
101 * Version 1.0.
102 */
103 DM_INFO_MESSAGE = 12,
104 DM_VERSION_1_MAX = 12
105 };
106
107
108 /*
109 * Structures defining the dynamic memory management
110 * protocol.
111 */
112
113 union dm_version {
114 struct {
115 __u16 minor_version;
116 __u16 major_version;
117 };
118 __u32 version;
119 } __packed;
120
121
122 union dm_caps {
123 struct {
124 __u64 balloon:1;
125 __u64 hot_add:1;
126 /*
127 * To support guests that may have alignment
128 * limitations on hot-add, the guest can specify
129 * its alignment requirements; a value of n
130 * represents an alignment of 2^n in mega bytes.
131 */
132 __u64 hot_add_alignment:4;
133 __u64 reservedz:58;
134 } cap_bits;
135 __u64 caps;
136 } __packed;
137
138 union dm_mem_page_range {
139 struct {
140 /*
141 * The PFN number of the first page in the range.
142 * 40 bits is the architectural limit of a PFN
143 * number for AMD64.
144 */
145 __u64 start_page:40;
146 /*
147 * The number of pages in the range.
148 */
149 __u64 page_cnt:24;
150 } finfo;
151 __u64 page_range;
152 } __packed;
153
154
155
156 /*
157 * The header for all dynamic memory messages:
158 *
159 * type: Type of the message.
160 * size: Size of the message in bytes; including the header.
161 * trans_id: The guest is responsible for manufacturing this ID.
162 */
163
164 struct dm_header {
165 __u16 type;
166 __u16 size;
167 __u32 trans_id;
168 } __packed;
169
170 /*
171 * A generic message format for dynamic memory.
172 * Specific message formats are defined later in the file.
173 */
174
175 struct dm_message {
176 struct dm_header hdr;
177 __u8 data[]; /* enclosed message */
178 } __packed;
179
180
181 /*
182 * Specific message types supporting the dynamic memory protocol.
183 */
184
185 /*
186 * Version negotiation message. Sent from the guest to the host.
187 * The guest is free to try different versions until the host
188 * accepts the version.
189 *
190 * dm_version: The protocol version requested.
191 * is_last_attempt: If TRUE, this is the last version guest will request.
192 * reservedz: Reserved field, set to zero.
193 */
194
195 struct dm_version_request {
196 struct dm_header hdr;
197 union dm_version version;
198 __u32 is_last_attempt:1;
199 __u32 reservedz:31;
200 } __packed;
201
202 /*
203 * Version response message; Host to Guest and indicates
204 * if the host has accepted the version sent by the guest.
205 *
206 * is_accepted: If TRUE, host has accepted the version and the guest
207 * should proceed to the next stage of the protocol. FALSE indicates that
208 * guest should re-try with a different version.
209 *
210 * reservedz: Reserved field, set to zero.
211 */
212
213 struct dm_version_response {
214 struct dm_header hdr;
215 __u64 is_accepted:1;
216 __u64 reservedz:63;
217 } __packed;
218
219 /*
220 * Message reporting capabilities. This is sent from the guest to the
221 * host.
222 */
223
224 struct dm_capabilities {
225 struct dm_header hdr;
226 union dm_caps caps;
227 __u64 min_page_cnt;
228 __u64 max_page_number;
229 } __packed;
230
231 /*
232 * Response to the capabilities message. This is sent from the host to the
233 * guest. This message notifies if the host has accepted the guest's
234 * capabilities. If the host has not accepted, the guest must shutdown
235 * the service.
236 *
237 * is_accepted: Indicates if the host has accepted guest's capabilities.
238 * reservedz: Must be 0.
239 */
240
241 struct dm_capabilities_resp_msg {
242 struct dm_header hdr;
243 __u64 is_accepted:1;
244 __u64 reservedz:63;
245 } __packed;
246
247 /*
248 * This message is used to report memory pressure from the guest.
249 * This message is not part of any transaction and there is no
250 * response to this message.
251 *
252 * num_avail: Available memory in pages.
253 * num_committed: Committed memory in pages.
254 * page_file_size: The accumulated size of all page files
255 * in the system in pages.
256 * zero_free: The nunber of zero and free pages.
257 * page_file_writes: The writes to the page file in pages.
258 * io_diff: An indicator of file cache efficiency or page file activity,
259 * calculated as File Cache Page Fault Count - Page Read Count.
260 * This value is in pages.
261 *
262 * Some of these metrics are Windows specific and fortunately
263 * the algorithm on the host side that computes the guest memory
264 * pressure only uses num_committed value.
265 */
266
267 struct dm_status {
268 struct dm_header hdr;
269 __u64 num_avail;
270 __u64 num_committed;
271 __u64 page_file_size;
272 __u64 zero_free;
273 __u32 page_file_writes;
274 __u32 io_diff;
275 } __packed;
276
277
278 /*
279 * Message to ask the guest to allocate memory - balloon up message.
280 * This message is sent from the host to the guest. The guest may not be
281 * able to allocate as much memory as requested.
282 *
283 * num_pages: number of pages to allocate.
284 */
285
286 struct dm_balloon {
287 struct dm_header hdr;
288 __u32 num_pages;
289 __u32 reservedz;
290 } __packed;
291
292
293 /*
294 * Balloon response message; this message is sent from the guest
295 * to the host in response to the balloon message.
296 *
297 * reservedz: Reserved; must be set to zero.
298 * more_pages: If FALSE, this is the last message of the transaction.
299 * if TRUE there will atleast one more message from the guest.
300 *
301 * range_count: The number of ranges in the range array.
302 *
303 * range_array: An array of page ranges returned to the host.
304 *
305 */
306
307 struct dm_balloon_response {
308 struct dm_header hdr;
309 __u32 reservedz;
310 __u32 more_pages:1;
311 __u32 range_count:31;
312 union dm_mem_page_range range_array[];
313 } __packed;
314
315 /*
316 * Un-balloon message; this message is sent from the host
317 * to the guest to give guest more memory.
318 *
319 * more_pages: If FALSE, this is the last message of the transaction.
320 * if TRUE there will atleast one more message from the guest.
321 *
322 * reservedz: Reserved; must be set to zero.
323 *
324 * range_count: The number of ranges in the range array.
325 *
326 * range_array: An array of page ranges returned to the host.
327 *
328 */
329
330 struct dm_unballoon_request {
331 struct dm_header hdr;
332 __u32 more_pages:1;
333 __u32 reservedz:31;
334 __u32 range_count;
335 union dm_mem_page_range range_array[];
336 } __packed;
337
338 /*
339 * Un-balloon response message; this message is sent from the guest
340 * to the host in response to an unballoon request.
341 *
342 */
343
344 struct dm_unballoon_response {
345 struct dm_header hdr;
346 } __packed;
347
348
349 /*
350 * Hot add request message. Message sent from the host to the guest.
351 *
352 * mem_range: Memory range to hot add.
353 *
354 * On Linux we currently don't support this since we cannot hot add
355 * arbitrary granularity of memory.
356 */
357
358 struct dm_hot_add {
359 struct dm_header hdr;
360 union dm_mem_page_range range;
361 } __packed;
362
363 /*
364 * Hot add response message.
365 * This message is sent by the guest to report the status of a hot add request.
366 * If page_count is less than the requested page count, then the host should
367 * assume all further hot add requests will fail, since this indicates that
368 * the guest has hit an upper physical memory barrier.
369 *
370 * Hot adds may also fail due to low resources; in this case, the guest must
371 * not complete this message until the hot add can succeed, and the host must
372 * not send a new hot add request until the response is sent.
373 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
374 * times it fails the request.
375 *
376 *
377 * page_count: number of pages that were successfully hot added.
378 *
379 * result: result of the operation 1: success, 0: failure.
380 *
381 */
382
383 struct dm_hot_add_response {
384 struct dm_header hdr;
385 __u32 page_count;
386 __u32 result;
387 } __packed;
388
389 /*
390 * Types of information sent from host to the guest.
391 */
392
393 enum dm_info_type {
394 INFO_TYPE_MAX_PAGE_CNT = 0,
395 MAX_INFO_TYPE
396 };
397
398
399 /*
400 * Header for the information message.
401 */
402
403 struct dm_info_header {
404 enum dm_info_type type;
405 __u32 data_size;
406 } __packed;
407
408 /*
409 * This message is sent from the host to the guest to pass
410 * some relevant information (win8 addition).
411 *
412 * reserved: no used.
413 * info_size: size of the information blob.
414 * info: information blob.
415 */
416
417 struct dm_info_msg {
418 struct dm_header hdr;
419 __u32 reserved;
420 __u32 info_size;
421 __u8 info[];
422 };
423
424 /*
425 * End protocol definitions.
426 */
427
428 /*
429 * State to manage hot adding memory into the guest.
430 * The range start_pfn : end_pfn specifies the range
431 * that the host has asked us to hot add. The range
432 * start_pfn : ha_end_pfn specifies the range that we have
433 * currently hot added. We hot add in multiples of 128M
434 * chunks; it is possible that we may not be able to bring
435 * online all the pages in the region. The range
436 * covered_start_pfn:covered_end_pfn defines the pages that can
437 * be brough online.
438 */
439
440 struct hv_hotadd_state {
441 struct list_head list;
442 unsigned long start_pfn;
443 unsigned long covered_start_pfn;
444 unsigned long covered_end_pfn;
445 unsigned long ha_end_pfn;
446 unsigned long end_pfn;
447 /*
448 * A list of gaps.
449 */
450 struct list_head gap_list;
451 };
452
453 struct hv_hotadd_gap {
454 struct list_head list;
455 unsigned long start_pfn;
456 unsigned long end_pfn;
457 };
458
459 struct balloon_state {
460 __u32 num_pages;
461 struct work_struct wrk;
462 };
463
464 struct hot_add_wrk {
465 union dm_mem_page_range ha_page_range;
466 union dm_mem_page_range ha_region_range;
467 struct work_struct wrk;
468 };
469
470 static bool hot_add = true;
471 static bool do_hot_add;
472 /*
473 * Delay reporting memory pressure by
474 * the specified number of seconds.
475 */
476 static uint pressure_report_delay = 45;
477
478 /*
479 * The last time we posted a pressure report to host.
480 */
481 static unsigned long last_post_time;
482
483 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
484 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
485
486 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
487 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
488 static atomic_t trans_id = ATOMIC_INIT(0);
489
490 static int dm_ring_size = (5 * PAGE_SIZE);
491
492 /*
493 * Driver specific state.
494 */
495
496 enum hv_dm_state {
497 DM_INITIALIZING = 0,
498 DM_INITIALIZED,
499 DM_BALLOON_UP,
500 DM_BALLOON_DOWN,
501 DM_HOT_ADD,
502 DM_INIT_ERROR
503 };
504
505
506 static __u8 recv_buffer[PAGE_SIZE];
507 static __u8 *send_buffer;
508 #define PAGES_IN_2M 512
509 #define HA_CHUNK (32 * 1024)
510
511 struct hv_dynmem_device {
512 struct hv_device *dev;
513 enum hv_dm_state state;
514 struct completion host_event;
515 struct completion config_event;
516
517 /*
518 * Number of pages we have currently ballooned out.
519 */
520 unsigned int num_pages_ballooned;
521 unsigned int num_pages_onlined;
522 unsigned int num_pages_added;
523
524 /*
525 * State to manage the ballooning (up) operation.
526 */
527 struct balloon_state balloon_wrk;
528
529 /*
530 * State to execute the "hot-add" operation.
531 */
532 struct hot_add_wrk ha_wrk;
533
534 /*
535 * This state tracks if the host has specified a hot-add
536 * region.
537 */
538 bool host_specified_ha_region;
539
540 /*
541 * State to synchronize hot-add.
542 */
543 struct completion ol_waitevent;
544 bool ha_waiting;
545 /*
546 * This thread handles hot-add
547 * requests from the host as well as notifying
548 * the host with regards to memory pressure in
549 * the guest.
550 */
551 struct task_struct *thread;
552
553 /*
554 * Protects ha_region_list, num_pages_onlined counter and individual
555 * regions from ha_region_list.
556 */
557 spinlock_t ha_lock;
558
559 /*
560 * A list of hot-add regions.
561 */
562 struct list_head ha_region_list;
563
564 /*
565 * We start with the highest version we can support
566 * and downgrade based on the host; we save here the
567 * next version to try.
568 */
569 __u32 next_version;
570
571 /*
572 * The negotiated version agreed by host.
573 */
574 __u32 version;
575 };
576
577 static struct hv_dynmem_device dm_device;
578
579 static void post_status(struct hv_dynmem_device *dm);
580
581 #ifdef CONFIG_MEMORY_HOTPLUG
has_pfn_is_backed(struct hv_hotadd_state * has,unsigned long pfn)582 static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
583 unsigned long pfn)
584 {
585 struct hv_hotadd_gap *gap;
586
587 /* The page is not backed. */
588 if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
589 return false;
590
591 /* Check for gaps. */
592 list_for_each_entry(gap, &has->gap_list, list) {
593 if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
594 return false;
595 }
596
597 return true;
598 }
599
hv_page_offline_check(unsigned long start_pfn,unsigned long nr_pages)600 static unsigned long hv_page_offline_check(unsigned long start_pfn,
601 unsigned long nr_pages)
602 {
603 unsigned long pfn = start_pfn, count = 0;
604 struct hv_hotadd_state *has;
605 bool found;
606
607 while (pfn < start_pfn + nr_pages) {
608 /*
609 * Search for HAS which covers the pfn and when we find one
610 * count how many consequitive PFNs are covered.
611 */
612 found = false;
613 list_for_each_entry(has, &dm_device.ha_region_list, list) {
614 while ((pfn >= has->start_pfn) &&
615 (pfn < has->end_pfn) &&
616 (pfn < start_pfn + nr_pages)) {
617 found = true;
618 if (has_pfn_is_backed(has, pfn))
619 count++;
620 pfn++;
621 }
622 }
623
624 /*
625 * This PFN is not in any HAS (e.g. we're offlining a region
626 * which was present at boot), no need to account for it. Go
627 * to the next one.
628 */
629 if (!found)
630 pfn++;
631 }
632
633 return count;
634 }
635
hv_memory_notifier(struct notifier_block * nb,unsigned long val,void * v)636 static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
637 void *v)
638 {
639 struct memory_notify *mem = (struct memory_notify *)v;
640 unsigned long flags, pfn_count;
641
642 switch (val) {
643 case MEM_ONLINE:
644 case MEM_CANCEL_ONLINE:
645 if (dm_device.ha_waiting) {
646 dm_device.ha_waiting = false;
647 complete(&dm_device.ol_waitevent);
648 }
649 break;
650
651 case MEM_OFFLINE:
652 spin_lock_irqsave(&dm_device.ha_lock, flags);
653 pfn_count = hv_page_offline_check(mem->start_pfn,
654 mem->nr_pages);
655 if (pfn_count <= dm_device.num_pages_onlined) {
656 dm_device.num_pages_onlined -= pfn_count;
657 } else {
658 /*
659 * We're offlining more pages than we managed to online.
660 * This is unexpected. In any case don't let
661 * num_pages_onlined wrap around zero.
662 */
663 WARN_ON_ONCE(1);
664 dm_device.num_pages_onlined = 0;
665 }
666 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
667 break;
668 case MEM_GOING_ONLINE:
669 case MEM_GOING_OFFLINE:
670 case MEM_CANCEL_OFFLINE:
671 break;
672 }
673 return NOTIFY_OK;
674 }
675
676 static struct notifier_block hv_memory_nb = {
677 .notifier_call = hv_memory_notifier,
678 .priority = 0
679 };
680
681 /* Check if the particular page is backed and can be onlined and online it. */
hv_page_online_one(struct hv_hotadd_state * has,struct page * pg)682 static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
683 {
684 if (!has_pfn_is_backed(has, page_to_pfn(pg)))
685 return;
686
687 /* This frame is currently backed; online the page. */
688 __online_page_set_limits(pg);
689 __online_page_increment_counters(pg);
690 __online_page_free(pg);
691
692 WARN_ON_ONCE(!spin_is_locked(&dm_device.ha_lock));
693 dm_device.num_pages_onlined++;
694 }
695
hv_bring_pgs_online(struct hv_hotadd_state * has,unsigned long start_pfn,unsigned long size)696 static void hv_bring_pgs_online(struct hv_hotadd_state *has,
697 unsigned long start_pfn, unsigned long size)
698 {
699 int i;
700
701 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
702 for (i = 0; i < size; i++)
703 hv_page_online_one(has, pfn_to_page(start_pfn + i));
704 }
705
hv_mem_hot_add(unsigned long start,unsigned long size,unsigned long pfn_count,struct hv_hotadd_state * has)706 static void hv_mem_hot_add(unsigned long start, unsigned long size,
707 unsigned long pfn_count,
708 struct hv_hotadd_state *has)
709 {
710 int ret = 0;
711 int i, nid;
712 unsigned long start_pfn;
713 unsigned long processed_pfn;
714 unsigned long total_pfn = pfn_count;
715 unsigned long flags;
716
717 for (i = 0; i < (size/HA_CHUNK); i++) {
718 start_pfn = start + (i * HA_CHUNK);
719
720 spin_lock_irqsave(&dm_device.ha_lock, flags);
721 has->ha_end_pfn += HA_CHUNK;
722
723 if (total_pfn > HA_CHUNK) {
724 processed_pfn = HA_CHUNK;
725 total_pfn -= HA_CHUNK;
726 } else {
727 processed_pfn = total_pfn;
728 total_pfn = 0;
729 }
730
731 has->covered_end_pfn += processed_pfn;
732 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
733
734 init_completion(&dm_device.ol_waitevent);
735 dm_device.ha_waiting = !memhp_auto_online;
736
737 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
738 ret = add_memory(nid, PFN_PHYS((start_pfn)),
739 (HA_CHUNK << PAGE_SHIFT));
740
741 if (ret) {
742 pr_err("hot_add memory failed error is %d\n", ret);
743 if (ret == -EEXIST) {
744 /*
745 * This error indicates that the error
746 * is not a transient failure. This is the
747 * case where the guest's physical address map
748 * precludes hot adding memory. Stop all further
749 * memory hot-add.
750 */
751 do_hot_add = false;
752 }
753 spin_lock_irqsave(&dm_device.ha_lock, flags);
754 has->ha_end_pfn -= HA_CHUNK;
755 has->covered_end_pfn -= processed_pfn;
756 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
757 break;
758 }
759
760 /*
761 * Wait for the memory block to be onlined when memory onlining
762 * is done outside of kernel (memhp_auto_online). Since the hot
763 * add has succeeded, it is ok to proceed even if the pages in
764 * the hot added region have not been "onlined" within the
765 * allowed time.
766 */
767 if (dm_device.ha_waiting)
768 wait_for_completion_timeout(&dm_device.ol_waitevent,
769 5*HZ);
770 post_status(&dm_device);
771 }
772 }
773
hv_online_page(struct page * pg)774 static void hv_online_page(struct page *pg)
775 {
776 struct hv_hotadd_state *has;
777 unsigned long flags;
778 unsigned long pfn = page_to_pfn(pg);
779
780 spin_lock_irqsave(&dm_device.ha_lock, flags);
781 list_for_each_entry(has, &dm_device.ha_region_list, list) {
782 /* The page belongs to a different HAS. */
783 if ((pfn < has->start_pfn) || (pfn >= has->end_pfn))
784 continue;
785
786 hv_page_online_one(has, pg);
787 break;
788 }
789 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
790 }
791
pfn_covered(unsigned long start_pfn,unsigned long pfn_cnt)792 static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
793 {
794 struct hv_hotadd_state *has;
795 struct hv_hotadd_gap *gap;
796 unsigned long residual, new_inc;
797 int ret = 0;
798 unsigned long flags;
799
800 spin_lock_irqsave(&dm_device.ha_lock, flags);
801 list_for_each_entry(has, &dm_device.ha_region_list, list) {
802 /*
803 * If the pfn range we are dealing with is not in the current
804 * "hot add block", move on.
805 */
806 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
807 continue;
808
809 /*
810 * If the current start pfn is not where the covered_end
811 * is, create a gap and update covered_end_pfn.
812 */
813 if (has->covered_end_pfn != start_pfn) {
814 gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
815 if (!gap) {
816 ret = -ENOMEM;
817 break;
818 }
819
820 INIT_LIST_HEAD(&gap->list);
821 gap->start_pfn = has->covered_end_pfn;
822 gap->end_pfn = start_pfn;
823 list_add_tail(&gap->list, &has->gap_list);
824
825 has->covered_end_pfn = start_pfn;
826 }
827
828 /*
829 * If the current hot add-request extends beyond
830 * our current limit; extend it.
831 */
832 if ((start_pfn + pfn_cnt) > has->end_pfn) {
833 residual = (start_pfn + pfn_cnt - has->end_pfn);
834 /*
835 * Extend the region by multiples of HA_CHUNK.
836 */
837 new_inc = (residual / HA_CHUNK) * HA_CHUNK;
838 if (residual % HA_CHUNK)
839 new_inc += HA_CHUNK;
840
841 has->end_pfn += new_inc;
842 }
843
844 ret = 1;
845 break;
846 }
847 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
848
849 return ret;
850 }
851
handle_pg_range(unsigned long pg_start,unsigned long pg_count)852 static unsigned long handle_pg_range(unsigned long pg_start,
853 unsigned long pg_count)
854 {
855 unsigned long start_pfn = pg_start;
856 unsigned long pfn_cnt = pg_count;
857 unsigned long size;
858 struct hv_hotadd_state *has;
859 unsigned long pgs_ol = 0;
860 unsigned long old_covered_state;
861 unsigned long res = 0, flags;
862
863 pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
864 pg_start);
865
866 spin_lock_irqsave(&dm_device.ha_lock, flags);
867 list_for_each_entry(has, &dm_device.ha_region_list, list) {
868 /*
869 * If the pfn range we are dealing with is not in the current
870 * "hot add block", move on.
871 */
872 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
873 continue;
874
875 old_covered_state = has->covered_end_pfn;
876
877 if (start_pfn < has->ha_end_pfn) {
878 /*
879 * This is the case where we are backing pages
880 * in an already hot added region. Bring
881 * these pages online first.
882 */
883 pgs_ol = has->ha_end_pfn - start_pfn;
884 if (pgs_ol > pfn_cnt)
885 pgs_ol = pfn_cnt;
886
887 has->covered_end_pfn += pgs_ol;
888 pfn_cnt -= pgs_ol;
889 /*
890 * Check if the corresponding memory block is already
891 * online by checking its last previously backed page.
892 * In case it is we need to bring rest (which was not
893 * backed previously) online too.
894 */
895 if (start_pfn > has->start_pfn &&
896 !PageReserved(pfn_to_page(start_pfn - 1)))
897 hv_bring_pgs_online(has, start_pfn, pgs_ol);
898
899 }
900
901 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
902 /*
903 * We have some residual hot add range
904 * that needs to be hot added; hot add
905 * it now. Hot add a multiple of
906 * of HA_CHUNK that fully covers the pages
907 * we have.
908 */
909 size = (has->end_pfn - has->ha_end_pfn);
910 if (pfn_cnt <= size) {
911 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
912 if (pfn_cnt % HA_CHUNK)
913 size += HA_CHUNK;
914 } else {
915 pfn_cnt = size;
916 }
917 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
918 hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
919 spin_lock_irqsave(&dm_device.ha_lock, flags);
920 }
921 /*
922 * If we managed to online any pages that were given to us,
923 * we declare success.
924 */
925 res = has->covered_end_pfn - old_covered_state;
926 break;
927 }
928 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
929
930 return res;
931 }
932
process_hot_add(unsigned long pg_start,unsigned long pfn_cnt,unsigned long rg_start,unsigned long rg_size)933 static unsigned long process_hot_add(unsigned long pg_start,
934 unsigned long pfn_cnt,
935 unsigned long rg_start,
936 unsigned long rg_size)
937 {
938 struct hv_hotadd_state *ha_region = NULL;
939 int covered;
940 unsigned long flags;
941
942 if (pfn_cnt == 0)
943 return 0;
944
945 if (!dm_device.host_specified_ha_region) {
946 covered = pfn_covered(pg_start, pfn_cnt);
947 if (covered < 0)
948 return 0;
949
950 if (covered)
951 goto do_pg_range;
952 }
953
954 /*
955 * If the host has specified a hot-add range; deal with it first.
956 */
957
958 if (rg_size != 0) {
959 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
960 if (!ha_region)
961 return 0;
962
963 INIT_LIST_HEAD(&ha_region->list);
964 INIT_LIST_HEAD(&ha_region->gap_list);
965
966 ha_region->start_pfn = rg_start;
967 ha_region->ha_end_pfn = rg_start;
968 ha_region->covered_start_pfn = pg_start;
969 ha_region->covered_end_pfn = pg_start;
970 ha_region->end_pfn = rg_start + rg_size;
971
972 spin_lock_irqsave(&dm_device.ha_lock, flags);
973 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
974 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
975 }
976
977 do_pg_range:
978 /*
979 * Process the page range specified; bringing them
980 * online if possible.
981 */
982 return handle_pg_range(pg_start, pfn_cnt);
983 }
984
985 #endif
986
hot_add_req(struct work_struct * dummy)987 static void hot_add_req(struct work_struct *dummy)
988 {
989 struct dm_hot_add_response resp;
990 #ifdef CONFIG_MEMORY_HOTPLUG
991 unsigned long pg_start, pfn_cnt;
992 unsigned long rg_start, rg_sz;
993 #endif
994 struct hv_dynmem_device *dm = &dm_device;
995
996 memset(&resp, 0, sizeof(struct dm_hot_add_response));
997 resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
998 resp.hdr.size = sizeof(struct dm_hot_add_response);
999
1000 #ifdef CONFIG_MEMORY_HOTPLUG
1001 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
1002 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
1003
1004 rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
1005 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
1006
1007 if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
1008 unsigned long region_size;
1009 unsigned long region_start;
1010
1011 /*
1012 * The host has not specified the hot-add region.
1013 * Based on the hot-add page range being specified,
1014 * compute a hot-add region that can cover the pages
1015 * that need to be hot-added while ensuring the alignment
1016 * and size requirements of Linux as it relates to hot-add.
1017 */
1018 region_start = pg_start;
1019 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
1020 if (pfn_cnt % HA_CHUNK)
1021 region_size += HA_CHUNK;
1022
1023 region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
1024
1025 rg_start = region_start;
1026 rg_sz = region_size;
1027 }
1028
1029 if (do_hot_add)
1030 resp.page_count = process_hot_add(pg_start, pfn_cnt,
1031 rg_start, rg_sz);
1032
1033 dm->num_pages_added += resp.page_count;
1034 #endif
1035 /*
1036 * The result field of the response structure has the
1037 * following semantics:
1038 *
1039 * 1. If all or some pages hot-added: Guest should return success.
1040 *
1041 * 2. If no pages could be hot-added:
1042 *
1043 * If the guest returns success, then the host
1044 * will not attempt any further hot-add operations. This
1045 * signifies a permanent failure.
1046 *
1047 * If the guest returns failure, then this failure will be
1048 * treated as a transient failure and the host may retry the
1049 * hot-add operation after some delay.
1050 */
1051 if (resp.page_count > 0)
1052 resp.result = 1;
1053 else if (!do_hot_add)
1054 resp.result = 1;
1055 else
1056 resp.result = 0;
1057
1058 if (!do_hot_add || (resp.page_count == 0))
1059 pr_err("Memory hot add failed\n");
1060
1061 dm->state = DM_INITIALIZED;
1062 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1063 vmbus_sendpacket(dm->dev->channel, &resp,
1064 sizeof(struct dm_hot_add_response),
1065 (unsigned long)NULL,
1066 VM_PKT_DATA_INBAND, 0);
1067 }
1068
process_info(struct hv_dynmem_device * dm,struct dm_info_msg * msg)1069 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1070 {
1071 struct dm_info_header *info_hdr;
1072
1073 info_hdr = (struct dm_info_header *)msg->info;
1074
1075 switch (info_hdr->type) {
1076 case INFO_TYPE_MAX_PAGE_CNT:
1077 if (info_hdr->data_size == sizeof(__u64)) {
1078 __u64 *max_page_count = (__u64 *)&info_hdr[1];
1079
1080 pr_info("Max. dynamic memory size: %llu MB\n",
1081 (*max_page_count) >> (20 - PAGE_SHIFT));
1082 }
1083
1084 break;
1085 default:
1086 pr_warn("Received Unknown type: %d\n", info_hdr->type);
1087 }
1088 }
1089
compute_balloon_floor(void)1090 static unsigned long compute_balloon_floor(void)
1091 {
1092 unsigned long min_pages;
1093 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1094 /* Simple continuous piecewiese linear function:
1095 * max MiB -> min MiB gradient
1096 * 0 0
1097 * 16 16
1098 * 32 24
1099 * 128 72 (1/2)
1100 * 512 168 (1/4)
1101 * 2048 360 (1/8)
1102 * 8192 744 (1/16)
1103 * 32768 1512 (1/32)
1104 */
1105 if (totalram_pages < MB2PAGES(128))
1106 min_pages = MB2PAGES(8) + (totalram_pages >> 1);
1107 else if (totalram_pages < MB2PAGES(512))
1108 min_pages = MB2PAGES(40) + (totalram_pages >> 2);
1109 else if (totalram_pages < MB2PAGES(2048))
1110 min_pages = MB2PAGES(104) + (totalram_pages >> 3);
1111 else if (totalram_pages < MB2PAGES(8192))
1112 min_pages = MB2PAGES(232) + (totalram_pages >> 4);
1113 else
1114 min_pages = MB2PAGES(488) + (totalram_pages >> 5);
1115 #undef MB2PAGES
1116 return min_pages;
1117 }
1118
1119 /*
1120 * Post our status as it relates memory pressure to the
1121 * host. Host expects the guests to post this status
1122 * periodically at 1 second intervals.
1123 *
1124 * The metrics specified in this protocol are very Windows
1125 * specific and so we cook up numbers here to convey our memory
1126 * pressure.
1127 */
1128
post_status(struct hv_dynmem_device * dm)1129 static void post_status(struct hv_dynmem_device *dm)
1130 {
1131 struct dm_status status;
1132 unsigned long now = jiffies;
1133 unsigned long last_post = last_post_time;
1134
1135 if (pressure_report_delay > 0) {
1136 --pressure_report_delay;
1137 return;
1138 }
1139
1140 if (!time_after(now, (last_post_time + HZ)))
1141 return;
1142
1143 memset(&status, 0, sizeof(struct dm_status));
1144 status.hdr.type = DM_STATUS_REPORT;
1145 status.hdr.size = sizeof(struct dm_status);
1146 status.hdr.trans_id = atomic_inc_return(&trans_id);
1147
1148 /*
1149 * The host expects the guest to report free and committed memory.
1150 * Furthermore, the host expects the pressure information to include
1151 * the ballooned out pages. For a given amount of memory that we are
1152 * managing we need to compute a floor below which we should not
1153 * balloon. Compute this and add it to the pressure report.
1154 * We also need to report all offline pages (num_pages_added -
1155 * num_pages_onlined) as committed to the host, otherwise it can try
1156 * asking us to balloon them out.
1157 */
1158 status.num_avail = si_mem_available();
1159 status.num_committed = vm_memory_committed() +
1160 dm->num_pages_ballooned +
1161 (dm->num_pages_added > dm->num_pages_onlined ?
1162 dm->num_pages_added - dm->num_pages_onlined : 0) +
1163 compute_balloon_floor();
1164
1165 trace_balloon_status(status.num_avail, status.num_committed,
1166 vm_memory_committed(), dm->num_pages_ballooned,
1167 dm->num_pages_added, dm->num_pages_onlined);
1168 /*
1169 * If our transaction ID is no longer current, just don't
1170 * send the status. This can happen if we were interrupted
1171 * after we picked our transaction ID.
1172 */
1173 if (status.hdr.trans_id != atomic_read(&trans_id))
1174 return;
1175
1176 /*
1177 * If the last post time that we sampled has changed,
1178 * we have raced, don't post the status.
1179 */
1180 if (last_post != last_post_time)
1181 return;
1182
1183 last_post_time = jiffies;
1184 vmbus_sendpacket(dm->dev->channel, &status,
1185 sizeof(struct dm_status),
1186 (unsigned long)NULL,
1187 VM_PKT_DATA_INBAND, 0);
1188
1189 }
1190
free_balloon_pages(struct hv_dynmem_device * dm,union dm_mem_page_range * range_array)1191 static void free_balloon_pages(struct hv_dynmem_device *dm,
1192 union dm_mem_page_range *range_array)
1193 {
1194 int num_pages = range_array->finfo.page_cnt;
1195 __u64 start_frame = range_array->finfo.start_page;
1196 struct page *pg;
1197 int i;
1198
1199 for (i = 0; i < num_pages; i++) {
1200 pg = pfn_to_page(i + start_frame);
1201 __free_page(pg);
1202 dm->num_pages_ballooned--;
1203 }
1204 }
1205
1206
1207
alloc_balloon_pages(struct hv_dynmem_device * dm,unsigned int num_pages,struct dm_balloon_response * bl_resp,int alloc_unit)1208 static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1209 unsigned int num_pages,
1210 struct dm_balloon_response *bl_resp,
1211 int alloc_unit)
1212 {
1213 unsigned int i = 0;
1214 struct page *pg;
1215
1216 if (num_pages < alloc_unit)
1217 return 0;
1218
1219 for (i = 0; (i * alloc_unit) < num_pages; i++) {
1220 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1221 PAGE_SIZE)
1222 return i * alloc_unit;
1223
1224 /*
1225 * We execute this code in a thread context. Furthermore,
1226 * we don't want the kernel to try too hard.
1227 */
1228 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1229 __GFP_NOMEMALLOC | __GFP_NOWARN,
1230 get_order(alloc_unit << PAGE_SHIFT));
1231
1232 if (!pg)
1233 return i * alloc_unit;
1234
1235 dm->num_pages_ballooned += alloc_unit;
1236
1237 /*
1238 * If we allocatted 2M pages; split them so we
1239 * can free them in any order we get.
1240 */
1241
1242 if (alloc_unit != 1)
1243 split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1244
1245 bl_resp->range_count++;
1246 bl_resp->range_array[i].finfo.start_page =
1247 page_to_pfn(pg);
1248 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1249 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1250
1251 }
1252
1253 return num_pages;
1254 }
1255
balloon_up(struct work_struct * dummy)1256 static void balloon_up(struct work_struct *dummy)
1257 {
1258 unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1259 unsigned int num_ballooned = 0;
1260 struct dm_balloon_response *bl_resp;
1261 int alloc_unit;
1262 int ret;
1263 bool done = false;
1264 int i;
1265 long avail_pages;
1266 unsigned long floor;
1267
1268 /* The host balloons pages in 2M granularity. */
1269 WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1270
1271 /*
1272 * We will attempt 2M allocations. However, if we fail to
1273 * allocate 2M chunks, we will go back to 4k allocations.
1274 */
1275 alloc_unit = 512;
1276
1277 avail_pages = si_mem_available();
1278 floor = compute_balloon_floor();
1279
1280 /* Refuse to balloon below the floor, keep the 2M granularity. */
1281 if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1282 pr_warn("Balloon request will be partially fulfilled. %s\n",
1283 avail_pages < num_pages ? "Not enough memory." :
1284 "Balloon floor reached.");
1285
1286 num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1287 num_pages -= num_pages % PAGES_IN_2M;
1288 }
1289
1290 while (!done) {
1291 bl_resp = (struct dm_balloon_response *)send_buffer;
1292 memset(send_buffer, 0, PAGE_SIZE);
1293 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1294 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1295 bl_resp->more_pages = 1;
1296
1297 num_pages -= num_ballooned;
1298 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1299 bl_resp, alloc_unit);
1300
1301 if (alloc_unit != 1 && num_ballooned == 0) {
1302 alloc_unit = 1;
1303 continue;
1304 }
1305
1306 if (num_ballooned == 0 || num_ballooned == num_pages) {
1307 pr_debug("Ballooned %u out of %u requested pages.\n",
1308 num_pages, dm_device.balloon_wrk.num_pages);
1309
1310 bl_resp->more_pages = 0;
1311 done = true;
1312 dm_device.state = DM_INITIALIZED;
1313 }
1314
1315 /*
1316 * We are pushing a lot of data through the channel;
1317 * deal with transient failures caused because of the
1318 * lack of space in the ring buffer.
1319 */
1320
1321 do {
1322 bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1323 ret = vmbus_sendpacket(dm_device.dev->channel,
1324 bl_resp,
1325 bl_resp->hdr.size,
1326 (unsigned long)NULL,
1327 VM_PKT_DATA_INBAND, 0);
1328
1329 if (ret == -EAGAIN)
1330 msleep(20);
1331 post_status(&dm_device);
1332 } while (ret == -EAGAIN);
1333
1334 if (ret) {
1335 /*
1336 * Free up the memory we allocatted.
1337 */
1338 pr_err("Balloon response failed\n");
1339
1340 for (i = 0; i < bl_resp->range_count; i++)
1341 free_balloon_pages(&dm_device,
1342 &bl_resp->range_array[i]);
1343
1344 done = true;
1345 }
1346 }
1347
1348 }
1349
balloon_down(struct hv_dynmem_device * dm,struct dm_unballoon_request * req)1350 static void balloon_down(struct hv_dynmem_device *dm,
1351 struct dm_unballoon_request *req)
1352 {
1353 union dm_mem_page_range *range_array = req->range_array;
1354 int range_count = req->range_count;
1355 struct dm_unballoon_response resp;
1356 int i;
1357 unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
1358
1359 for (i = 0; i < range_count; i++) {
1360 free_balloon_pages(dm, &range_array[i]);
1361 complete(&dm_device.config_event);
1362 }
1363
1364 pr_debug("Freed %u ballooned pages.\n",
1365 prev_pages_ballooned - dm->num_pages_ballooned);
1366
1367 if (req->more_pages == 1)
1368 return;
1369
1370 memset(&resp, 0, sizeof(struct dm_unballoon_response));
1371 resp.hdr.type = DM_UNBALLOON_RESPONSE;
1372 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1373 resp.hdr.size = sizeof(struct dm_unballoon_response);
1374
1375 vmbus_sendpacket(dm_device.dev->channel, &resp,
1376 sizeof(struct dm_unballoon_response),
1377 (unsigned long)NULL,
1378 VM_PKT_DATA_INBAND, 0);
1379
1380 dm->state = DM_INITIALIZED;
1381 }
1382
1383 static void balloon_onchannelcallback(void *context);
1384
dm_thread_func(void * dm_dev)1385 static int dm_thread_func(void *dm_dev)
1386 {
1387 struct hv_dynmem_device *dm = dm_dev;
1388
1389 while (!kthread_should_stop()) {
1390 wait_for_completion_interruptible_timeout(
1391 &dm_device.config_event, 1*HZ);
1392 /*
1393 * The host expects us to post information on the memory
1394 * pressure every second.
1395 */
1396 reinit_completion(&dm_device.config_event);
1397 post_status(dm);
1398 }
1399
1400 return 0;
1401 }
1402
1403
version_resp(struct hv_dynmem_device * dm,struct dm_version_response * vresp)1404 static void version_resp(struct hv_dynmem_device *dm,
1405 struct dm_version_response *vresp)
1406 {
1407 struct dm_version_request version_req;
1408 int ret;
1409
1410 if (vresp->is_accepted) {
1411 /*
1412 * We are done; wakeup the
1413 * context waiting for version
1414 * negotiation.
1415 */
1416 complete(&dm->host_event);
1417 return;
1418 }
1419 /*
1420 * If there are more versions to try, continue
1421 * with negotiations; if not
1422 * shutdown the service since we are not able
1423 * to negotiate a suitable version number
1424 * with the host.
1425 */
1426 if (dm->next_version == 0)
1427 goto version_error;
1428
1429 memset(&version_req, 0, sizeof(struct dm_version_request));
1430 version_req.hdr.type = DM_VERSION_REQUEST;
1431 version_req.hdr.size = sizeof(struct dm_version_request);
1432 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1433 version_req.version.version = dm->next_version;
1434 dm->version = version_req.version.version;
1435
1436 /*
1437 * Set the next version to try in case current version fails.
1438 * Win7 protocol ought to be the last one to try.
1439 */
1440 switch (version_req.version.version) {
1441 case DYNMEM_PROTOCOL_VERSION_WIN8:
1442 dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1443 version_req.is_last_attempt = 0;
1444 break;
1445 default:
1446 dm->next_version = 0;
1447 version_req.is_last_attempt = 1;
1448 }
1449
1450 ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1451 sizeof(struct dm_version_request),
1452 (unsigned long)NULL,
1453 VM_PKT_DATA_INBAND, 0);
1454
1455 if (ret)
1456 goto version_error;
1457
1458 return;
1459
1460 version_error:
1461 dm->state = DM_INIT_ERROR;
1462 complete(&dm->host_event);
1463 }
1464
cap_resp(struct hv_dynmem_device * dm,struct dm_capabilities_resp_msg * cap_resp)1465 static void cap_resp(struct hv_dynmem_device *dm,
1466 struct dm_capabilities_resp_msg *cap_resp)
1467 {
1468 if (!cap_resp->is_accepted) {
1469 pr_err("Capabilities not accepted by host\n");
1470 dm->state = DM_INIT_ERROR;
1471 }
1472 complete(&dm->host_event);
1473 }
1474
balloon_onchannelcallback(void * context)1475 static void balloon_onchannelcallback(void *context)
1476 {
1477 struct hv_device *dev = context;
1478 u32 recvlen;
1479 u64 requestid;
1480 struct dm_message *dm_msg;
1481 struct dm_header *dm_hdr;
1482 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1483 struct dm_balloon *bal_msg;
1484 struct dm_hot_add *ha_msg;
1485 union dm_mem_page_range *ha_pg_range;
1486 union dm_mem_page_range *ha_region;
1487
1488 memset(recv_buffer, 0, sizeof(recv_buffer));
1489 vmbus_recvpacket(dev->channel, recv_buffer,
1490 PAGE_SIZE, &recvlen, &requestid);
1491
1492 if (recvlen > 0) {
1493 dm_msg = (struct dm_message *)recv_buffer;
1494 dm_hdr = &dm_msg->hdr;
1495
1496 switch (dm_hdr->type) {
1497 case DM_VERSION_RESPONSE:
1498 version_resp(dm,
1499 (struct dm_version_response *)dm_msg);
1500 break;
1501
1502 case DM_CAPABILITIES_RESPONSE:
1503 cap_resp(dm,
1504 (struct dm_capabilities_resp_msg *)dm_msg);
1505 break;
1506
1507 case DM_BALLOON_REQUEST:
1508 if (dm->state == DM_BALLOON_UP)
1509 pr_warn("Currently ballooning\n");
1510 bal_msg = (struct dm_balloon *)recv_buffer;
1511 dm->state = DM_BALLOON_UP;
1512 dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1513 schedule_work(&dm_device.balloon_wrk.wrk);
1514 break;
1515
1516 case DM_UNBALLOON_REQUEST:
1517 dm->state = DM_BALLOON_DOWN;
1518 balloon_down(dm,
1519 (struct dm_unballoon_request *)recv_buffer);
1520 break;
1521
1522 case DM_MEM_HOT_ADD_REQUEST:
1523 if (dm->state == DM_HOT_ADD)
1524 pr_warn("Currently hot-adding\n");
1525 dm->state = DM_HOT_ADD;
1526 ha_msg = (struct dm_hot_add *)recv_buffer;
1527 if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1528 /*
1529 * This is a normal hot-add request specifying
1530 * hot-add memory.
1531 */
1532 dm->host_specified_ha_region = false;
1533 ha_pg_range = &ha_msg->range;
1534 dm->ha_wrk.ha_page_range = *ha_pg_range;
1535 dm->ha_wrk.ha_region_range.page_range = 0;
1536 } else {
1537 /*
1538 * Host is specifying that we first hot-add
1539 * a region and then partially populate this
1540 * region.
1541 */
1542 dm->host_specified_ha_region = true;
1543 ha_pg_range = &ha_msg->range;
1544 ha_region = &ha_pg_range[1];
1545 dm->ha_wrk.ha_page_range = *ha_pg_range;
1546 dm->ha_wrk.ha_region_range = *ha_region;
1547 }
1548 schedule_work(&dm_device.ha_wrk.wrk);
1549 break;
1550
1551 case DM_INFO_MESSAGE:
1552 process_info(dm, (struct dm_info_msg *)dm_msg);
1553 break;
1554
1555 default:
1556 pr_warn("Unhandled message: type: %d\n", dm_hdr->type);
1557
1558 }
1559 }
1560
1561 }
1562
balloon_probe(struct hv_device * dev,const struct hv_vmbus_device_id * dev_id)1563 static int balloon_probe(struct hv_device *dev,
1564 const struct hv_vmbus_device_id *dev_id)
1565 {
1566 int ret;
1567 unsigned long t;
1568 struct dm_version_request version_req;
1569 struct dm_capabilities cap_msg;
1570
1571 #ifdef CONFIG_MEMORY_HOTPLUG
1572 do_hot_add = hot_add;
1573 #else
1574 do_hot_add = false;
1575 #endif
1576
1577 /*
1578 * First allocate a send buffer.
1579 */
1580
1581 send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1582 if (!send_buffer)
1583 return -ENOMEM;
1584
1585 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1586 balloon_onchannelcallback, dev);
1587
1588 if (ret)
1589 goto probe_error0;
1590
1591 dm_device.dev = dev;
1592 dm_device.state = DM_INITIALIZING;
1593 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1594 init_completion(&dm_device.host_event);
1595 init_completion(&dm_device.config_event);
1596 INIT_LIST_HEAD(&dm_device.ha_region_list);
1597 spin_lock_init(&dm_device.ha_lock);
1598 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1599 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1600 dm_device.host_specified_ha_region = false;
1601
1602 dm_device.thread =
1603 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1604 if (IS_ERR(dm_device.thread)) {
1605 ret = PTR_ERR(dm_device.thread);
1606 goto probe_error1;
1607 }
1608
1609 #ifdef CONFIG_MEMORY_HOTPLUG
1610 set_online_page_callback(&hv_online_page);
1611 register_memory_notifier(&hv_memory_nb);
1612 #endif
1613
1614 hv_set_drvdata(dev, &dm_device);
1615 /*
1616 * Initiate the hand shake with the host and negotiate
1617 * a version that the host can support. We start with the
1618 * highest version number and go down if the host cannot
1619 * support it.
1620 */
1621 memset(&version_req, 0, sizeof(struct dm_version_request));
1622 version_req.hdr.type = DM_VERSION_REQUEST;
1623 version_req.hdr.size = sizeof(struct dm_version_request);
1624 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1625 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1626 version_req.is_last_attempt = 0;
1627 dm_device.version = version_req.version.version;
1628
1629 ret = vmbus_sendpacket(dev->channel, &version_req,
1630 sizeof(struct dm_version_request),
1631 (unsigned long)NULL,
1632 VM_PKT_DATA_INBAND, 0);
1633 if (ret)
1634 goto probe_error2;
1635
1636 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1637 if (t == 0) {
1638 ret = -ETIMEDOUT;
1639 goto probe_error2;
1640 }
1641
1642 /*
1643 * If we could not negotiate a compatible version with the host
1644 * fail the probe function.
1645 */
1646 if (dm_device.state == DM_INIT_ERROR) {
1647 ret = -ETIMEDOUT;
1648 goto probe_error2;
1649 }
1650
1651 pr_info("Using Dynamic Memory protocol version %u.%u\n",
1652 DYNMEM_MAJOR_VERSION(dm_device.version),
1653 DYNMEM_MINOR_VERSION(dm_device.version));
1654
1655 /*
1656 * Now submit our capabilities to the host.
1657 */
1658 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1659 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1660 cap_msg.hdr.size = sizeof(struct dm_capabilities);
1661 cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1662
1663 cap_msg.caps.cap_bits.balloon = 1;
1664 cap_msg.caps.cap_bits.hot_add = 1;
1665
1666 /*
1667 * Specify our alignment requirements as it relates
1668 * memory hot-add. Specify 128MB alignment.
1669 */
1670 cap_msg.caps.cap_bits.hot_add_alignment = 7;
1671
1672 /*
1673 * Currently the host does not use these
1674 * values and we set them to what is done in the
1675 * Windows driver.
1676 */
1677 cap_msg.min_page_cnt = 0;
1678 cap_msg.max_page_number = -1;
1679
1680 ret = vmbus_sendpacket(dev->channel, &cap_msg,
1681 sizeof(struct dm_capabilities),
1682 (unsigned long)NULL,
1683 VM_PKT_DATA_INBAND, 0);
1684 if (ret)
1685 goto probe_error2;
1686
1687 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1688 if (t == 0) {
1689 ret = -ETIMEDOUT;
1690 goto probe_error2;
1691 }
1692
1693 /*
1694 * If the host does not like our capabilities,
1695 * fail the probe function.
1696 */
1697 if (dm_device.state == DM_INIT_ERROR) {
1698 ret = -ETIMEDOUT;
1699 goto probe_error2;
1700 }
1701
1702 dm_device.state = DM_INITIALIZED;
1703 last_post_time = jiffies;
1704
1705 return 0;
1706
1707 probe_error2:
1708 #ifdef CONFIG_MEMORY_HOTPLUG
1709 restore_online_page_callback(&hv_online_page);
1710 #endif
1711 kthread_stop(dm_device.thread);
1712
1713 probe_error1:
1714 vmbus_close(dev->channel);
1715 probe_error0:
1716 kfree(send_buffer);
1717 return ret;
1718 }
1719
balloon_remove(struct hv_device * dev)1720 static int balloon_remove(struct hv_device *dev)
1721 {
1722 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1723 struct hv_hotadd_state *has, *tmp;
1724 struct hv_hotadd_gap *gap, *tmp_gap;
1725 unsigned long flags;
1726
1727 if (dm->num_pages_ballooned != 0)
1728 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1729
1730 cancel_work_sync(&dm->balloon_wrk.wrk);
1731 cancel_work_sync(&dm->ha_wrk.wrk);
1732
1733 vmbus_close(dev->channel);
1734 kthread_stop(dm->thread);
1735 kfree(send_buffer);
1736 #ifdef CONFIG_MEMORY_HOTPLUG
1737 restore_online_page_callback(&hv_online_page);
1738 unregister_memory_notifier(&hv_memory_nb);
1739 #endif
1740 spin_lock_irqsave(&dm_device.ha_lock, flags);
1741 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
1742 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1743 list_del(&gap->list);
1744 kfree(gap);
1745 }
1746 list_del(&has->list);
1747 kfree(has);
1748 }
1749 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
1750
1751 return 0;
1752 }
1753
1754 static const struct hv_vmbus_device_id id_table[] = {
1755 /* Dynamic Memory Class ID */
1756 /* 525074DC-8985-46e2-8057-A307DC18A502 */
1757 { HV_DM_GUID, },
1758 { },
1759 };
1760
1761 MODULE_DEVICE_TABLE(vmbus, id_table);
1762
1763 static struct hv_driver balloon_drv = {
1764 .name = "hv_balloon",
1765 .id_table = id_table,
1766 .probe = balloon_probe,
1767 .remove = balloon_remove,
1768 .driver = {
1769 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1770 },
1771 };
1772
init_balloon_drv(void)1773 static int __init init_balloon_drv(void)
1774 {
1775
1776 return vmbus_driver_register(&balloon_drv);
1777 }
1778
1779 module_init(init_balloon_drv);
1780
1781 MODULE_DESCRIPTION("Hyper-V Balloon");
1782 MODULE_LICENSE("GPL");
1783