1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2012, Microsoft Corporation.
6 * K. Y. Srinivasan <kys@microsoft.com>
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/jiffies.h>
13 #include <linux/mman.h>
14 #include <linux/delay.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/kthread.h>
19 #include <linux/completion.h>
20 #include <linux/memory_hotplug.h>
21 #include <linux/memory.h>
22 #include <linux/notifier.h>
23 #include <linux/percpu_counter.h>
24 #include <linux/page_reporting.h>
26 #include <linux/hyperv.h>
27 #include <asm/hyperv-tlfs.h>
29 #include <asm/mshyperv.h>
31 #define CREATE_TRACE_POINTS
32 #include "hv_trace_balloon.h"
35 * We begin with definitions supporting the Dynamic Memory protocol
38 * Begin protocol definitions.
44 * Protocol versions. The low word is the minor version, the high word the major
49 * Changed to 0.1 on 2009/03/25
50 * Changes to 0.2 on 2009/05/14
51 * Changes to 0.3 on 2009/12/03
52 * Changed to 1.0 on 2011/04/05
55 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
56 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
57 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
60 DYNMEM_PROTOCOL_VERSION_1
= DYNMEM_MAKE_VERSION(0, 3),
61 DYNMEM_PROTOCOL_VERSION_2
= DYNMEM_MAKE_VERSION(1, 0),
62 DYNMEM_PROTOCOL_VERSION_3
= DYNMEM_MAKE_VERSION(2, 0),
64 DYNMEM_PROTOCOL_VERSION_WIN7
= DYNMEM_PROTOCOL_VERSION_1
,
65 DYNMEM_PROTOCOL_VERSION_WIN8
= DYNMEM_PROTOCOL_VERSION_2
,
66 DYNMEM_PROTOCOL_VERSION_WIN10
= DYNMEM_PROTOCOL_VERSION_3
,
68 DYNMEM_PROTOCOL_VERSION_CURRENT
= DYNMEM_PROTOCOL_VERSION_WIN10
77 enum dm_message_type
{
82 DM_VERSION_REQUEST
= 1,
83 DM_VERSION_RESPONSE
= 2,
84 DM_CAPABILITIES_REPORT
= 3,
85 DM_CAPABILITIES_RESPONSE
= 4,
87 DM_BALLOON_REQUEST
= 6,
88 DM_BALLOON_RESPONSE
= 7,
89 DM_UNBALLOON_REQUEST
= 8,
90 DM_UNBALLOON_RESPONSE
= 9,
91 DM_MEM_HOT_ADD_REQUEST
= 10,
92 DM_MEM_HOT_ADD_RESPONSE
= 11,
93 DM_VERSION_03_MAX
= 11,
103 * Structures defining the dynamic memory management
121 * To support guests that may have alignment
122 * limitations on hot-add, the guest can specify
123 * its alignment requirements; a value of n
124 * represents an alignment of 2^n in mega bytes.
126 __u64 hot_add_alignment
:4;
132 union dm_mem_page_range
{
135 * The PFN number of the first page in the range.
136 * 40 bits is the architectural limit of a PFN
141 * The number of pages in the range.
151 * The header for all dynamic memory messages:
153 * type: Type of the message.
154 * size: Size of the message in bytes; including the header.
155 * trans_id: The guest is responsible for manufacturing this ID.
165 * A generic message format for dynamic memory.
166 * Specific message formats are defined later in the file.
170 struct dm_header hdr
;
171 __u8 data
[]; /* enclosed message */
176 * Specific message types supporting the dynamic memory protocol.
180 * Version negotiation message. Sent from the guest to the host.
181 * The guest is free to try different versions until the host
182 * accepts the version.
184 * dm_version: The protocol version requested.
185 * is_last_attempt: If TRUE, this is the last version guest will request.
186 * reservedz: Reserved field, set to zero.
189 struct dm_version_request
{
190 struct dm_header hdr
;
191 union dm_version version
;
192 __u32 is_last_attempt
:1;
197 * Version response message; Host to Guest and indicates
198 * if the host has accepted the version sent by the guest.
200 * is_accepted: If TRUE, host has accepted the version and the guest
201 * should proceed to the next stage of the protocol. FALSE indicates that
202 * guest should re-try with a different version.
204 * reservedz: Reserved field, set to zero.
207 struct dm_version_response
{
208 struct dm_header hdr
;
214 * Message reporting capabilities. This is sent from the guest to the
218 struct dm_capabilities
{
219 struct dm_header hdr
;
222 __u64 max_page_number
;
226 * Response to the capabilities message. This is sent from the host to the
227 * guest. This message notifies if the host has accepted the guest's
228 * capabilities. If the host has not accepted, the guest must shutdown
231 * is_accepted: Indicates if the host has accepted guest's capabilities.
232 * reservedz: Must be 0.
235 struct dm_capabilities_resp_msg
{
236 struct dm_header hdr
;
242 * This message is used to report memory pressure from the guest.
243 * This message is not part of any transaction and there is no
244 * response to this message.
246 * num_avail: Available memory in pages.
247 * num_committed: Committed memory in pages.
248 * page_file_size: The accumulated size of all page files
249 * in the system in pages.
250 * zero_free: The nunber of zero and free pages.
251 * page_file_writes: The writes to the page file in pages.
252 * io_diff: An indicator of file cache efficiency or page file activity,
253 * calculated as File Cache Page Fault Count - Page Read Count.
254 * This value is in pages.
256 * Some of these metrics are Windows specific and fortunately
257 * the algorithm on the host side that computes the guest memory
258 * pressure only uses num_committed value.
262 struct dm_header hdr
;
265 __u64 page_file_size
;
267 __u32 page_file_writes
;
273 * Message to ask the guest to allocate memory - balloon up message.
274 * This message is sent from the host to the guest. The guest may not be
275 * able to allocate as much memory as requested.
277 * num_pages: number of pages to allocate.
281 struct dm_header hdr
;
288 * Balloon response message; this message is sent from the guest
289 * to the host in response to the balloon message.
291 * reservedz: Reserved; must be set to zero.
292 * more_pages: If FALSE, this is the last message of the transaction.
293 * if TRUE there will atleast one more message from the guest.
295 * range_count: The number of ranges in the range array.
297 * range_array: An array of page ranges returned to the host.
301 struct dm_balloon_response
{
302 struct dm_header hdr
;
305 __u32 range_count
:31;
306 union dm_mem_page_range range_array
[];
310 * Un-balloon message; this message is sent from the host
311 * to the guest to give guest more memory.
313 * more_pages: If FALSE, this is the last message of the transaction.
314 * if TRUE there will atleast one more message from the guest.
316 * reservedz: Reserved; must be set to zero.
318 * range_count: The number of ranges in the range array.
320 * range_array: An array of page ranges returned to the host.
324 struct dm_unballoon_request
{
325 struct dm_header hdr
;
329 union dm_mem_page_range range_array
[];
333 * Un-balloon response message; this message is sent from the guest
334 * to the host in response to an unballoon request.
338 struct dm_unballoon_response
{
339 struct dm_header hdr
;
344 * Hot add request message. Message sent from the host to the guest.
346 * mem_range: Memory range to hot add.
351 struct dm_header hdr
;
352 union dm_mem_page_range range
;
356 * Hot add response message.
357 * This message is sent by the guest to report the status of a hot add request.
358 * If page_count is less than the requested page count, then the host should
359 * assume all further hot add requests will fail, since this indicates that
360 * the guest has hit an upper physical memory barrier.
362 * Hot adds may also fail due to low resources; in this case, the guest must
363 * not complete this message until the hot add can succeed, and the host must
364 * not send a new hot add request until the response is sent.
365 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
366 * times it fails the request.
369 * page_count: number of pages that were successfully hot added.
371 * result: result of the operation 1: success, 0: failure.
375 struct dm_hot_add_response
{
376 struct dm_header hdr
;
382 * Types of information sent from host to the guest.
386 INFO_TYPE_MAX_PAGE_CNT
= 0,
392 * Header for the information message.
395 struct dm_info_header
{
396 enum dm_info_type type
;
401 * This message is sent from the host to the guest to pass
402 * some relevant information (win8 addition).
405 * info_size: size of the information blob.
406 * info: information blob.
410 struct dm_header hdr
;
417 * End protocol definitions.
421 * State to manage hot adding memory into the guest.
422 * The range start_pfn : end_pfn specifies the range
423 * that the host has asked us to hot add. The range
424 * start_pfn : ha_end_pfn specifies the range that we have
425 * currently hot added. We hot add in multiples of 128M
426 * chunks; it is possible that we may not be able to bring
427 * online all the pages in the region. The range
428 * covered_start_pfn:covered_end_pfn defines the pages that can
432 struct hv_hotadd_state
{
433 struct list_head list
;
434 unsigned long start_pfn
;
435 unsigned long covered_start_pfn
;
436 unsigned long covered_end_pfn
;
437 unsigned long ha_end_pfn
;
438 unsigned long end_pfn
;
442 struct list_head gap_list
;
445 struct hv_hotadd_gap
{
446 struct list_head list
;
447 unsigned long start_pfn
;
448 unsigned long end_pfn
;
451 struct balloon_state
{
453 struct work_struct wrk
;
457 union dm_mem_page_range ha_page_range
;
458 union dm_mem_page_range ha_region_range
;
459 struct work_struct wrk
;
462 static bool allow_hibernation
;
463 static bool hot_add
= true;
464 static bool do_hot_add
;
466 * Delay reporting memory pressure by
467 * the specified number of seconds.
469 static uint pressure_report_delay
= 45;
472 * The last time we posted a pressure report to host.
474 static unsigned long last_post_time
;
476 module_param(hot_add
, bool, (S_IRUGO
| S_IWUSR
));
477 MODULE_PARM_DESC(hot_add
, "If set attempt memory hot_add");
479 module_param(pressure_report_delay
, uint
, (S_IRUGO
| S_IWUSR
));
480 MODULE_PARM_DESC(pressure_report_delay
, "Delay in secs in reporting pressure");
481 static atomic_t trans_id
= ATOMIC_INIT(0);
483 static int dm_ring_size
= VMBUS_RING_SIZE(16 * 1024);
486 * Driver specific state.
499 static __u8 recv_buffer
[HV_HYP_PAGE_SIZE
];
500 static __u8 balloon_up_send_buffer
[HV_HYP_PAGE_SIZE
];
501 #define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
502 #define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE)
504 struct hv_dynmem_device
{
505 struct hv_device
*dev
;
506 enum hv_dm_state state
;
507 struct completion host_event
;
508 struct completion config_event
;
511 * Number of pages we have currently ballooned out.
513 unsigned int num_pages_ballooned
;
514 unsigned int num_pages_onlined
;
515 unsigned int num_pages_added
;
518 * State to manage the ballooning (up) operation.
520 struct balloon_state balloon_wrk
;
523 * State to execute the "hot-add" operation.
525 struct hot_add_wrk ha_wrk
;
528 * This state tracks if the host has specified a hot-add
531 bool host_specified_ha_region
;
534 * State to synchronize hot-add.
536 struct completion ol_waitevent
;
538 * This thread handles hot-add
539 * requests from the host as well as notifying
540 * the host with regards to memory pressure in
543 struct task_struct
*thread
;
546 * Protects ha_region_list, num_pages_onlined counter and individual
547 * regions from ha_region_list.
552 * A list of hot-add regions.
554 struct list_head ha_region_list
;
557 * We start with the highest version we can support
558 * and downgrade based on the host; we save here the
559 * next version to try.
564 * The negotiated version agreed by host.
568 struct page_reporting_dev_info pr_dev_info
;
571 static struct hv_dynmem_device dm_device
;
573 static void post_status(struct hv_dynmem_device
*dm
);
575 #ifdef CONFIG_MEMORY_HOTPLUG
576 static inline bool has_pfn_is_backed(struct hv_hotadd_state
*has
,
579 struct hv_hotadd_gap
*gap
;
581 /* The page is not backed. */
582 if ((pfn
< has
->covered_start_pfn
) || (pfn
>= has
->covered_end_pfn
))
585 /* Check for gaps. */
586 list_for_each_entry(gap
, &has
->gap_list
, list
) {
587 if ((pfn
>= gap
->start_pfn
) && (pfn
< gap
->end_pfn
))
594 static unsigned long hv_page_offline_check(unsigned long start_pfn
,
595 unsigned long nr_pages
)
597 unsigned long pfn
= start_pfn
, count
= 0;
598 struct hv_hotadd_state
*has
;
601 while (pfn
< start_pfn
+ nr_pages
) {
603 * Search for HAS which covers the pfn and when we find one
604 * count how many consequitive PFNs are covered.
607 list_for_each_entry(has
, &dm_device
.ha_region_list
, list
) {
608 while ((pfn
>= has
->start_pfn
) &&
609 (pfn
< has
->end_pfn
) &&
610 (pfn
< start_pfn
+ nr_pages
)) {
612 if (has_pfn_is_backed(has
, pfn
))
619 * This PFN is not in any HAS (e.g. we're offlining a region
620 * which was present at boot), no need to account for it. Go
630 static int hv_memory_notifier(struct notifier_block
*nb
, unsigned long val
,
633 struct memory_notify
*mem
= (struct memory_notify
*)v
;
634 unsigned long flags
, pfn_count
;
638 case MEM_CANCEL_ONLINE
:
639 complete(&dm_device
.ol_waitevent
);
643 spin_lock_irqsave(&dm_device
.ha_lock
, flags
);
644 pfn_count
= hv_page_offline_check(mem
->start_pfn
,
646 if (pfn_count
<= dm_device
.num_pages_onlined
) {
647 dm_device
.num_pages_onlined
-= pfn_count
;
650 * We're offlining more pages than we managed to online.
651 * This is unexpected. In any case don't let
652 * num_pages_onlined wrap around zero.
655 dm_device
.num_pages_onlined
= 0;
657 spin_unlock_irqrestore(&dm_device
.ha_lock
, flags
);
659 case MEM_GOING_ONLINE
:
660 case MEM_GOING_OFFLINE
:
661 case MEM_CANCEL_OFFLINE
:
667 static struct notifier_block hv_memory_nb
= {
668 .notifier_call
= hv_memory_notifier
,
672 /* Check if the particular page is backed and can be onlined and online it. */
673 static void hv_page_online_one(struct hv_hotadd_state
*has
, struct page
*pg
)
675 if (!has_pfn_is_backed(has
, page_to_pfn(pg
))) {
676 if (!PageOffline(pg
))
677 __SetPageOffline(pg
);
681 __ClearPageOffline(pg
);
683 /* This frame is currently backed; online the page. */
684 generic_online_page(pg
, 0);
686 lockdep_assert_held(&dm_device
.ha_lock
);
687 dm_device
.num_pages_onlined
++;
690 static void hv_bring_pgs_online(struct hv_hotadd_state
*has
,
691 unsigned long start_pfn
, unsigned long size
)
695 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size
, start_pfn
);
696 for (i
= 0; i
< size
; i
++)
697 hv_page_online_one(has
, pfn_to_page(start_pfn
+ i
));
700 static void hv_mem_hot_add(unsigned long start
, unsigned long size
,
701 unsigned long pfn_count
,
702 struct hv_hotadd_state
*has
)
706 unsigned long start_pfn
;
707 unsigned long processed_pfn
;
708 unsigned long total_pfn
= pfn_count
;
711 for (i
= 0; i
< (size
/HA_CHUNK
); i
++) {
712 start_pfn
= start
+ (i
* HA_CHUNK
);
714 spin_lock_irqsave(&dm_device
.ha_lock
, flags
);
715 has
->ha_end_pfn
+= HA_CHUNK
;
717 if (total_pfn
> HA_CHUNK
) {
718 processed_pfn
= HA_CHUNK
;
719 total_pfn
-= HA_CHUNK
;
721 processed_pfn
= total_pfn
;
725 has
->covered_end_pfn
+= processed_pfn
;
726 spin_unlock_irqrestore(&dm_device
.ha_lock
, flags
);
728 reinit_completion(&dm_device
.ol_waitevent
);
730 nid
= memory_add_physaddr_to_nid(PFN_PHYS(start_pfn
));
731 ret
= add_memory(nid
, PFN_PHYS((start_pfn
)),
732 (HA_CHUNK
<< PAGE_SHIFT
), MHP_MERGE_RESOURCE
);
735 pr_err("hot_add memory failed error is %d\n", ret
);
736 if (ret
== -EEXIST
) {
738 * This error indicates that the error
739 * is not a transient failure. This is the
740 * case where the guest's physical address map
741 * precludes hot adding memory. Stop all further
746 spin_lock_irqsave(&dm_device
.ha_lock
, flags
);
747 has
->ha_end_pfn
-= HA_CHUNK
;
748 has
->covered_end_pfn
-= processed_pfn
;
749 spin_unlock_irqrestore(&dm_device
.ha_lock
, flags
);
754 * Wait for memory to get onlined. If the kernel onlined the
755 * memory when adding it, this will return directly. Otherwise,
756 * it will wait for user space to online the memory. This helps
757 * to avoid adding memory faster than it is getting onlined. As
758 * adding succeeded, it is ok to proceed even if the memory was
759 * not onlined in time.
761 wait_for_completion_timeout(&dm_device
.ol_waitevent
, 5 * HZ
);
762 post_status(&dm_device
);
766 static void hv_online_page(struct page
*pg
, unsigned int order
)
768 struct hv_hotadd_state
*has
;
770 unsigned long pfn
= page_to_pfn(pg
);
772 spin_lock_irqsave(&dm_device
.ha_lock
, flags
);
773 list_for_each_entry(has
, &dm_device
.ha_region_list
, list
) {
774 /* The page belongs to a different HAS. */
775 if ((pfn
< has
->start_pfn
) ||
776 (pfn
+ (1UL << order
) > has
->end_pfn
))
779 hv_bring_pgs_online(has
, pfn
, 1UL << order
);
782 spin_unlock_irqrestore(&dm_device
.ha_lock
, flags
);
785 static int pfn_covered(unsigned long start_pfn
, unsigned long pfn_cnt
)
787 struct hv_hotadd_state
*has
;
788 struct hv_hotadd_gap
*gap
;
789 unsigned long residual
, new_inc
;
793 spin_lock_irqsave(&dm_device
.ha_lock
, flags
);
794 list_for_each_entry(has
, &dm_device
.ha_region_list
, list
) {
796 * If the pfn range we are dealing with is not in the current
797 * "hot add block", move on.
799 if (start_pfn
< has
->start_pfn
|| start_pfn
>= has
->end_pfn
)
803 * If the current start pfn is not where the covered_end
804 * is, create a gap and update covered_end_pfn.
806 if (has
->covered_end_pfn
!= start_pfn
) {
807 gap
= kzalloc(sizeof(struct hv_hotadd_gap
), GFP_ATOMIC
);
813 INIT_LIST_HEAD(&gap
->list
);
814 gap
->start_pfn
= has
->covered_end_pfn
;
815 gap
->end_pfn
= start_pfn
;
816 list_add_tail(&gap
->list
, &has
->gap_list
);
818 has
->covered_end_pfn
= start_pfn
;
822 * If the current hot add-request extends beyond
823 * our current limit; extend it.
825 if ((start_pfn
+ pfn_cnt
) > has
->end_pfn
) {
826 residual
= (start_pfn
+ pfn_cnt
- has
->end_pfn
);
828 * Extend the region by multiples of HA_CHUNK.
830 new_inc
= (residual
/ HA_CHUNK
) * HA_CHUNK
;
831 if (residual
% HA_CHUNK
)
834 has
->end_pfn
+= new_inc
;
840 spin_unlock_irqrestore(&dm_device
.ha_lock
, flags
);
845 static unsigned long handle_pg_range(unsigned long pg_start
,
846 unsigned long pg_count
)
848 unsigned long start_pfn
= pg_start
;
849 unsigned long pfn_cnt
= pg_count
;
851 struct hv_hotadd_state
*has
;
852 unsigned long pgs_ol
= 0;
853 unsigned long old_covered_state
;
854 unsigned long res
= 0, flags
;
856 pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count
,
859 spin_lock_irqsave(&dm_device
.ha_lock
, flags
);
860 list_for_each_entry(has
, &dm_device
.ha_region_list
, list
) {
862 * If the pfn range we are dealing with is not in the current
863 * "hot add block", move on.
865 if (start_pfn
< has
->start_pfn
|| start_pfn
>= has
->end_pfn
)
868 old_covered_state
= has
->covered_end_pfn
;
870 if (start_pfn
< has
->ha_end_pfn
) {
872 * This is the case where we are backing pages
873 * in an already hot added region. Bring
874 * these pages online first.
876 pgs_ol
= has
->ha_end_pfn
- start_pfn
;
877 if (pgs_ol
> pfn_cnt
)
880 has
->covered_end_pfn
+= pgs_ol
;
883 * Check if the corresponding memory block is already
884 * online. It is possible to observe struct pages still
885 * being uninitialized here so check section instead.
886 * In case the section is online we need to bring the
887 * rest of pfns (which were not backed previously)
890 if (start_pfn
> has
->start_pfn
&&
891 online_section_nr(pfn_to_section_nr(start_pfn
)))
892 hv_bring_pgs_online(has
, start_pfn
, pgs_ol
);
896 if ((has
->ha_end_pfn
< has
->end_pfn
) && (pfn_cnt
> 0)) {
898 * We have some residual hot add range
899 * that needs to be hot added; hot add
900 * it now. Hot add a multiple of
901 * of HA_CHUNK that fully covers the pages
904 size
= (has
->end_pfn
- has
->ha_end_pfn
);
905 if (pfn_cnt
<= size
) {
906 size
= ((pfn_cnt
/ HA_CHUNK
) * HA_CHUNK
);
907 if (pfn_cnt
% HA_CHUNK
)
912 spin_unlock_irqrestore(&dm_device
.ha_lock
, flags
);
913 hv_mem_hot_add(has
->ha_end_pfn
, size
, pfn_cnt
, has
);
914 spin_lock_irqsave(&dm_device
.ha_lock
, flags
);
917 * If we managed to online any pages that were given to us,
918 * we declare success.
920 res
= has
->covered_end_pfn
- old_covered_state
;
923 spin_unlock_irqrestore(&dm_device
.ha_lock
, flags
);
928 static unsigned long process_hot_add(unsigned long pg_start
,
929 unsigned long pfn_cnt
,
930 unsigned long rg_start
,
931 unsigned long rg_size
)
933 struct hv_hotadd_state
*ha_region
= NULL
;
940 if (!dm_device
.host_specified_ha_region
) {
941 covered
= pfn_covered(pg_start
, pfn_cnt
);
950 * If the host has specified a hot-add range; deal with it first.
954 ha_region
= kzalloc(sizeof(struct hv_hotadd_state
), GFP_KERNEL
);
958 INIT_LIST_HEAD(&ha_region
->list
);
959 INIT_LIST_HEAD(&ha_region
->gap_list
);
961 ha_region
->start_pfn
= rg_start
;
962 ha_region
->ha_end_pfn
= rg_start
;
963 ha_region
->covered_start_pfn
= pg_start
;
964 ha_region
->covered_end_pfn
= pg_start
;
965 ha_region
->end_pfn
= rg_start
+ rg_size
;
967 spin_lock_irqsave(&dm_device
.ha_lock
, flags
);
968 list_add_tail(&ha_region
->list
, &dm_device
.ha_region_list
);
969 spin_unlock_irqrestore(&dm_device
.ha_lock
, flags
);
974 * Process the page range specified; bringing them
975 * online if possible.
977 return handle_pg_range(pg_start
, pfn_cnt
);
982 static void hot_add_req(struct work_struct
*dummy
)
984 struct dm_hot_add_response resp
;
985 #ifdef CONFIG_MEMORY_HOTPLUG
986 unsigned long pg_start
, pfn_cnt
;
987 unsigned long rg_start
, rg_sz
;
989 struct hv_dynmem_device
*dm
= &dm_device
;
991 memset(&resp
, 0, sizeof(struct dm_hot_add_response
));
992 resp
.hdr
.type
= DM_MEM_HOT_ADD_RESPONSE
;
993 resp
.hdr
.size
= sizeof(struct dm_hot_add_response
);
995 #ifdef CONFIG_MEMORY_HOTPLUG
996 pg_start
= dm
->ha_wrk
.ha_page_range
.finfo
.start_page
;
997 pfn_cnt
= dm
->ha_wrk
.ha_page_range
.finfo
.page_cnt
;
999 rg_start
= dm
->ha_wrk
.ha_region_range
.finfo
.start_page
;
1000 rg_sz
= dm
->ha_wrk
.ha_region_range
.finfo
.page_cnt
;
1002 if ((rg_start
== 0) && (!dm
->host_specified_ha_region
)) {
1003 unsigned long region_size
;
1004 unsigned long region_start
;
1007 * The host has not specified the hot-add region.
1008 * Based on the hot-add page range being specified,
1009 * compute a hot-add region that can cover the pages
1010 * that need to be hot-added while ensuring the alignment
1011 * and size requirements of Linux as it relates to hot-add.
1013 region_size
= (pfn_cnt
/ HA_CHUNK
) * HA_CHUNK
;
1014 if (pfn_cnt
% HA_CHUNK
)
1015 region_size
+= HA_CHUNK
;
1017 region_start
= (pg_start
/ HA_CHUNK
) * HA_CHUNK
;
1019 rg_start
= region_start
;
1020 rg_sz
= region_size
;
1024 resp
.page_count
= process_hot_add(pg_start
, pfn_cnt
,
1027 dm
->num_pages_added
+= resp
.page_count
;
1030 * The result field of the response structure has the
1031 * following semantics:
1033 * 1. If all or some pages hot-added: Guest should return success.
1035 * 2. If no pages could be hot-added:
1037 * If the guest returns success, then the host
1038 * will not attempt any further hot-add operations. This
1039 * signifies a permanent failure.
1041 * If the guest returns failure, then this failure will be
1042 * treated as a transient failure and the host may retry the
1043 * hot-add operation after some delay.
1045 if (resp
.page_count
> 0)
1047 else if (!do_hot_add
)
1052 if (!do_hot_add
|| resp
.page_count
== 0) {
1053 if (!allow_hibernation
)
1054 pr_err("Memory hot add failed\n");
1056 pr_info("Ignore hot-add request!\n");
1059 dm
->state
= DM_INITIALIZED
;
1060 resp
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1061 vmbus_sendpacket(dm
->dev
->channel
, &resp
,
1062 sizeof(struct dm_hot_add_response
),
1063 (unsigned long)NULL
,
1064 VM_PKT_DATA_INBAND
, 0);
1067 static void process_info(struct hv_dynmem_device
*dm
, struct dm_info_msg
*msg
)
1069 struct dm_info_header
*info_hdr
;
1071 info_hdr
= (struct dm_info_header
*)msg
->info
;
1073 switch (info_hdr
->type
) {
1074 case INFO_TYPE_MAX_PAGE_CNT
:
1075 if (info_hdr
->data_size
== sizeof(__u64
)) {
1076 __u64
*max_page_count
= (__u64
*)&info_hdr
[1];
1078 pr_info("Max. dynamic memory size: %llu MB\n",
1079 (*max_page_count
) >> (20 - HV_HYP_PAGE_SHIFT
));
1084 pr_warn("Received Unknown type: %d\n", info_hdr
->type
);
1088 static unsigned long compute_balloon_floor(void)
1090 unsigned long min_pages
;
1091 unsigned long nr_pages
= totalram_pages();
1092 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1093 /* Simple continuous piecewiese linear function:
1094 * max MiB -> min MiB gradient
1104 if (nr_pages
< MB2PAGES(128))
1105 min_pages
= MB2PAGES(8) + (nr_pages
>> 1);
1106 else if (nr_pages
< MB2PAGES(512))
1107 min_pages
= MB2PAGES(40) + (nr_pages
>> 2);
1108 else if (nr_pages
< MB2PAGES(2048))
1109 min_pages
= MB2PAGES(104) + (nr_pages
>> 3);
1110 else if (nr_pages
< MB2PAGES(8192))
1111 min_pages
= MB2PAGES(232) + (nr_pages
>> 4);
1113 min_pages
= MB2PAGES(488) + (nr_pages
>> 5);
1119 * Post our status as it relates memory pressure to the
1120 * host. Host expects the guests to post this status
1121 * periodically at 1 second intervals.
1123 * The metrics specified in this protocol are very Windows
1124 * specific and so we cook up numbers here to convey our memory
1128 static void post_status(struct hv_dynmem_device
*dm
)
1130 struct dm_status status
;
1131 unsigned long now
= jiffies
;
1132 unsigned long last_post
= last_post_time
;
1134 if (pressure_report_delay
> 0) {
1135 --pressure_report_delay
;
1139 if (!time_after(now
, (last_post_time
+ HZ
)))
1142 memset(&status
, 0, sizeof(struct dm_status
));
1143 status
.hdr
.type
= DM_STATUS_REPORT
;
1144 status
.hdr
.size
= sizeof(struct dm_status
);
1145 status
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1148 * The host expects the guest to report free and committed memory.
1149 * Furthermore, the host expects the pressure information to include
1150 * the ballooned out pages. For a given amount of memory that we are
1151 * managing we need to compute a floor below which we should not
1152 * balloon. Compute this and add it to the pressure report.
1153 * We also need to report all offline pages (num_pages_added -
1154 * num_pages_onlined) as committed to the host, otherwise it can try
1155 * asking us to balloon them out.
1157 status
.num_avail
= si_mem_available();
1158 status
.num_committed
= vm_memory_committed() +
1159 dm
->num_pages_ballooned
+
1160 (dm
->num_pages_added
> dm
->num_pages_onlined
?
1161 dm
->num_pages_added
- dm
->num_pages_onlined
: 0) +
1162 compute_balloon_floor();
1164 trace_balloon_status(status
.num_avail
, status
.num_committed
,
1165 vm_memory_committed(), dm
->num_pages_ballooned
,
1166 dm
->num_pages_added
, dm
->num_pages_onlined
);
1168 * If our transaction ID is no longer current, just don't
1169 * send the status. This can happen if we were interrupted
1170 * after we picked our transaction ID.
1172 if (status
.hdr
.trans_id
!= atomic_read(&trans_id
))
1176 * If the last post time that we sampled has changed,
1177 * we have raced, don't post the status.
1179 if (last_post
!= last_post_time
)
1182 last_post_time
= jiffies
;
1183 vmbus_sendpacket(dm
->dev
->channel
, &status
,
1184 sizeof(struct dm_status
),
1185 (unsigned long)NULL
,
1186 VM_PKT_DATA_INBAND
, 0);
1190 static void free_balloon_pages(struct hv_dynmem_device
*dm
,
1191 union dm_mem_page_range
*range_array
)
1193 int num_pages
= range_array
->finfo
.page_cnt
;
1194 __u64 start_frame
= range_array
->finfo
.start_page
;
1198 for (i
= 0; i
< num_pages
; i
++) {
1199 pg
= pfn_to_page(i
+ start_frame
);
1200 __ClearPageOffline(pg
);
1202 dm
->num_pages_ballooned
--;
1203 adjust_managed_page_count(pg
, 1);
1209 static unsigned int alloc_balloon_pages(struct hv_dynmem_device
*dm
,
1210 unsigned int num_pages
,
1211 struct dm_balloon_response
*bl_resp
,
1217 for (i
= 0; i
< num_pages
/ alloc_unit
; i
++) {
1218 if (bl_resp
->hdr
.size
+ sizeof(union dm_mem_page_range
) >
1220 return i
* alloc_unit
;
1223 * We execute this code in a thread context. Furthermore,
1224 * we don't want the kernel to try too hard.
1226 pg
= alloc_pages(GFP_HIGHUSER
| __GFP_NORETRY
|
1227 __GFP_NOMEMALLOC
| __GFP_NOWARN
,
1228 get_order(alloc_unit
<< PAGE_SHIFT
));
1231 return i
* alloc_unit
;
1233 dm
->num_pages_ballooned
+= alloc_unit
;
1236 * If we allocatted 2M pages; split them so we
1237 * can free them in any order we get.
1240 if (alloc_unit
!= 1)
1241 split_page(pg
, get_order(alloc_unit
<< PAGE_SHIFT
));
1243 /* mark all pages offline */
1244 for (j
= 0; j
< alloc_unit
; j
++) {
1245 __SetPageOffline(pg
+ j
);
1246 adjust_managed_page_count(pg
+ j
, -1);
1249 bl_resp
->range_count
++;
1250 bl_resp
->range_array
[i
].finfo
.start_page
=
1252 bl_resp
->range_array
[i
].finfo
.page_cnt
= alloc_unit
;
1253 bl_resp
->hdr
.size
+= sizeof(union dm_mem_page_range
);
1257 return i
* alloc_unit
;
1260 static void balloon_up(struct work_struct
*dummy
)
1262 unsigned int num_pages
= dm_device
.balloon_wrk
.num_pages
;
1263 unsigned int num_ballooned
= 0;
1264 struct dm_balloon_response
*bl_resp
;
1270 unsigned long floor
;
1273 * We will attempt 2M allocations. However, if we fail to
1274 * allocate 2M chunks, we will go back to PAGE_SIZE allocations.
1276 alloc_unit
= PAGES_IN_2M
;
1278 avail_pages
= si_mem_available();
1279 floor
= compute_balloon_floor();
1281 /* Refuse to balloon below the floor. */
1282 if (avail_pages
< num_pages
|| avail_pages
- num_pages
< floor
) {
1283 pr_info("Balloon request will be partially fulfilled. %s\n",
1284 avail_pages
< num_pages
? "Not enough memory." :
1285 "Balloon floor reached.");
1287 num_pages
= avail_pages
> floor
? (avail_pages
- floor
) : 0;
1291 memset(balloon_up_send_buffer
, 0, HV_HYP_PAGE_SIZE
);
1292 bl_resp
= (struct dm_balloon_response
*)balloon_up_send_buffer
;
1293 bl_resp
->hdr
.type
= DM_BALLOON_RESPONSE
;
1294 bl_resp
->hdr
.size
= sizeof(struct dm_balloon_response
);
1295 bl_resp
->more_pages
= 1;
1297 num_pages
-= num_ballooned
;
1298 num_ballooned
= alloc_balloon_pages(&dm_device
, num_pages
,
1299 bl_resp
, alloc_unit
);
1301 if (alloc_unit
!= 1 && num_ballooned
== 0) {
1306 if (num_ballooned
== 0 || num_ballooned
== num_pages
) {
1307 pr_debug("Ballooned %u out of %u requested pages.\n",
1308 num_pages
, dm_device
.balloon_wrk
.num_pages
);
1310 bl_resp
->more_pages
= 0;
1312 dm_device
.state
= DM_INITIALIZED
;
1316 * We are pushing a lot of data through the channel;
1317 * deal with transient failures caused because of the
1318 * lack of space in the ring buffer.
1322 bl_resp
->hdr
.trans_id
= atomic_inc_return(&trans_id
);
1323 ret
= vmbus_sendpacket(dm_device
.dev
->channel
,
1326 (unsigned long)NULL
,
1327 VM_PKT_DATA_INBAND
, 0);
1331 post_status(&dm_device
);
1332 } while (ret
== -EAGAIN
);
1336 * Free up the memory we allocatted.
1338 pr_err("Balloon response failed\n");
1340 for (i
= 0; i
< bl_resp
->range_count
; i
++)
1341 free_balloon_pages(&dm_device
,
1342 &bl_resp
->range_array
[i
]);
1350 static void balloon_down(struct hv_dynmem_device
*dm
,
1351 struct dm_unballoon_request
*req
)
1353 union dm_mem_page_range
*range_array
= req
->range_array
;
1354 int range_count
= req
->range_count
;
1355 struct dm_unballoon_response resp
;
1357 unsigned int prev_pages_ballooned
= dm
->num_pages_ballooned
;
1359 for (i
= 0; i
< range_count
; i
++) {
1360 free_balloon_pages(dm
, &range_array
[i
]);
1361 complete(&dm_device
.config_event
);
1364 pr_debug("Freed %u ballooned pages.\n",
1365 prev_pages_ballooned
- dm
->num_pages_ballooned
);
1367 if (req
->more_pages
== 1)
1370 memset(&resp
, 0, sizeof(struct dm_unballoon_response
));
1371 resp
.hdr
.type
= DM_UNBALLOON_RESPONSE
;
1372 resp
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1373 resp
.hdr
.size
= sizeof(struct dm_unballoon_response
);
1375 vmbus_sendpacket(dm_device
.dev
->channel
, &resp
,
1376 sizeof(struct dm_unballoon_response
),
1377 (unsigned long)NULL
,
1378 VM_PKT_DATA_INBAND
, 0);
1380 dm
->state
= DM_INITIALIZED
;
1383 static void balloon_onchannelcallback(void *context
);
1385 static int dm_thread_func(void *dm_dev
)
1387 struct hv_dynmem_device
*dm
= dm_dev
;
1389 while (!kthread_should_stop()) {
1390 wait_for_completion_interruptible_timeout(
1391 &dm_device
.config_event
, 1*HZ
);
1393 * The host expects us to post information on the memory
1394 * pressure every second.
1396 reinit_completion(&dm_device
.config_event
);
1404 static void version_resp(struct hv_dynmem_device
*dm
,
1405 struct dm_version_response
*vresp
)
1407 struct dm_version_request version_req
;
1410 if (vresp
->is_accepted
) {
1412 * We are done; wakeup the
1413 * context waiting for version
1416 complete(&dm
->host_event
);
1420 * If there are more versions to try, continue
1421 * with negotiations; if not
1422 * shutdown the service since we are not able
1423 * to negotiate a suitable version number
1426 if (dm
->next_version
== 0)
1429 memset(&version_req
, 0, sizeof(struct dm_version_request
));
1430 version_req
.hdr
.type
= DM_VERSION_REQUEST
;
1431 version_req
.hdr
.size
= sizeof(struct dm_version_request
);
1432 version_req
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1433 version_req
.version
.version
= dm
->next_version
;
1434 dm
->version
= version_req
.version
.version
;
1437 * Set the next version to try in case current version fails.
1438 * Win7 protocol ought to be the last one to try.
1440 switch (version_req
.version
.version
) {
1441 case DYNMEM_PROTOCOL_VERSION_WIN8
:
1442 dm
->next_version
= DYNMEM_PROTOCOL_VERSION_WIN7
;
1443 version_req
.is_last_attempt
= 0;
1446 dm
->next_version
= 0;
1447 version_req
.is_last_attempt
= 1;
1450 ret
= vmbus_sendpacket(dm
->dev
->channel
, &version_req
,
1451 sizeof(struct dm_version_request
),
1452 (unsigned long)NULL
,
1453 VM_PKT_DATA_INBAND
, 0);
1461 dm
->state
= DM_INIT_ERROR
;
1462 complete(&dm
->host_event
);
1465 static void cap_resp(struct hv_dynmem_device
*dm
,
1466 struct dm_capabilities_resp_msg
*cap_resp
)
1468 if (!cap_resp
->is_accepted
) {
1469 pr_err("Capabilities not accepted by host\n");
1470 dm
->state
= DM_INIT_ERROR
;
1472 complete(&dm
->host_event
);
1475 static void balloon_onchannelcallback(void *context
)
1477 struct hv_device
*dev
= context
;
1480 struct dm_message
*dm_msg
;
1481 struct dm_header
*dm_hdr
;
1482 struct hv_dynmem_device
*dm
= hv_get_drvdata(dev
);
1483 struct dm_balloon
*bal_msg
;
1484 struct dm_hot_add
*ha_msg
;
1485 union dm_mem_page_range
*ha_pg_range
;
1486 union dm_mem_page_range
*ha_region
;
1488 memset(recv_buffer
, 0, sizeof(recv_buffer
));
1489 vmbus_recvpacket(dev
->channel
, recv_buffer
,
1490 HV_HYP_PAGE_SIZE
, &recvlen
, &requestid
);
1493 dm_msg
= (struct dm_message
*)recv_buffer
;
1494 dm_hdr
= &dm_msg
->hdr
;
1496 switch (dm_hdr
->type
) {
1497 case DM_VERSION_RESPONSE
:
1499 (struct dm_version_response
*)dm_msg
);
1502 case DM_CAPABILITIES_RESPONSE
:
1504 (struct dm_capabilities_resp_msg
*)dm_msg
);
1507 case DM_BALLOON_REQUEST
:
1508 if (allow_hibernation
) {
1509 pr_info("Ignore balloon-up request!\n");
1513 if (dm
->state
== DM_BALLOON_UP
)
1514 pr_warn("Currently ballooning\n");
1515 bal_msg
= (struct dm_balloon
*)recv_buffer
;
1516 dm
->state
= DM_BALLOON_UP
;
1517 dm_device
.balloon_wrk
.num_pages
= bal_msg
->num_pages
;
1518 schedule_work(&dm_device
.balloon_wrk
.wrk
);
1521 case DM_UNBALLOON_REQUEST
:
1522 if (allow_hibernation
) {
1523 pr_info("Ignore balloon-down request!\n");
1527 dm
->state
= DM_BALLOON_DOWN
;
1529 (struct dm_unballoon_request
*)recv_buffer
);
1532 case DM_MEM_HOT_ADD_REQUEST
:
1533 if (dm
->state
== DM_HOT_ADD
)
1534 pr_warn("Currently hot-adding\n");
1535 dm
->state
= DM_HOT_ADD
;
1536 ha_msg
= (struct dm_hot_add
*)recv_buffer
;
1537 if (ha_msg
->hdr
.size
== sizeof(struct dm_hot_add
)) {
1539 * This is a normal hot-add request specifying
1542 dm
->host_specified_ha_region
= false;
1543 ha_pg_range
= &ha_msg
->range
;
1544 dm
->ha_wrk
.ha_page_range
= *ha_pg_range
;
1545 dm
->ha_wrk
.ha_region_range
.page_range
= 0;
1548 * Host is specifying that we first hot-add
1549 * a region and then partially populate this
1552 dm
->host_specified_ha_region
= true;
1553 ha_pg_range
= &ha_msg
->range
;
1554 ha_region
= &ha_pg_range
[1];
1555 dm
->ha_wrk
.ha_page_range
= *ha_pg_range
;
1556 dm
->ha_wrk
.ha_region_range
= *ha_region
;
1558 schedule_work(&dm_device
.ha_wrk
.wrk
);
1561 case DM_INFO_MESSAGE
:
1562 process_info(dm
, (struct dm_info_msg
*)dm_msg
);
1566 pr_warn("Unhandled message: type: %d\n", dm_hdr
->type
);
1573 /* Hyper-V only supports reporting 2MB pages or higher */
1574 #define HV_MIN_PAGE_REPORTING_ORDER 9
1575 #define HV_MIN_PAGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << HV_MIN_PAGE_REPORTING_ORDER)
1576 static int hv_free_page_report(struct page_reporting_dev_info
*pr_dev_info
,
1577 struct scatterlist
*sgl
, unsigned int nents
)
1579 unsigned long flags
;
1580 struct hv_memory_hint
*hint
;
1583 struct scatterlist
*sg
;
1585 WARN_ON_ONCE(nents
> HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES
);
1586 WARN_ON_ONCE(sgl
->length
< HV_MIN_PAGE_REPORTING_LEN
);
1587 local_irq_save(flags
);
1588 hint
= *(struct hv_memory_hint
**)this_cpu_ptr(hyperv_pcpu_input_arg
);
1590 local_irq_restore(flags
);
1594 hint
->type
= HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD
;
1596 for_each_sg(sgl
, sg
, nents
, i
) {
1597 union hv_gpa_page_range
*range
;
1599 range
= &hint
->ranges
[i
];
1600 range
->address_space
= 0;
1601 /* page reporting only reports 2MB pages or higher */
1602 range
->page
.largepage
= 1;
1603 range
->page
.additional_pages
=
1604 (sg
->length
/ HV_MIN_PAGE_REPORTING_LEN
) - 1;
1605 range
->page_size
= HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB
;
1606 range
->base_large_pfn
=
1607 page_to_hvpfn(sg_page(sg
)) >> HV_MIN_PAGE_REPORTING_ORDER
;
1610 status
= hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT
, nents
, 0,
1612 local_irq_restore(flags
);
1613 if ((status
& HV_HYPERCALL_RESULT_MASK
) != HV_STATUS_SUCCESS
) {
1614 pr_err("Cold memory discard hypercall failed with status %llx\n",
1622 static void enable_page_reporting(void)
1626 /* Essentially, validating 'PAGE_REPORTING_MIN_ORDER' is big enough. */
1627 if (pageblock_order
< HV_MIN_PAGE_REPORTING_ORDER
) {
1628 pr_debug("Cold memory discard is only supported on 2MB pages and above\n");
1632 if (!hv_query_ext_cap(HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT
)) {
1633 pr_debug("Cold memory discard hint not supported by Hyper-V\n");
1637 BUILD_BUG_ON(PAGE_REPORTING_CAPACITY
> HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES
);
1638 dm_device
.pr_dev_info
.report
= hv_free_page_report
;
1639 ret
= page_reporting_register(&dm_device
.pr_dev_info
);
1641 dm_device
.pr_dev_info
.report
= NULL
;
1642 pr_err("Failed to enable cold memory discard: %d\n", ret
);
1644 pr_info("Cold memory discard hint enabled\n");
1648 static void disable_page_reporting(void)
1650 if (dm_device
.pr_dev_info
.report
) {
1651 page_reporting_unregister(&dm_device
.pr_dev_info
);
1652 dm_device
.pr_dev_info
.report
= NULL
;
1656 static int balloon_connect_vsp(struct hv_device
*dev
)
1658 struct dm_version_request version_req
;
1659 struct dm_capabilities cap_msg
;
1664 * max_pkt_size should be large enough for one vmbus packet header plus
1665 * our receive buffer size. Hyper-V sends messages up to
1666 * HV_HYP_PAGE_SIZE bytes long on balloon channel.
1668 dev
->channel
->max_pkt_size
= HV_HYP_PAGE_SIZE
* 2;
1670 ret
= vmbus_open(dev
->channel
, dm_ring_size
, dm_ring_size
, NULL
, 0,
1671 balloon_onchannelcallback
, dev
);
1676 * Initiate the hand shake with the host and negotiate
1677 * a version that the host can support. We start with the
1678 * highest version number and go down if the host cannot
1681 memset(&version_req
, 0, sizeof(struct dm_version_request
));
1682 version_req
.hdr
.type
= DM_VERSION_REQUEST
;
1683 version_req
.hdr
.size
= sizeof(struct dm_version_request
);
1684 version_req
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1685 version_req
.version
.version
= DYNMEM_PROTOCOL_VERSION_WIN10
;
1686 version_req
.is_last_attempt
= 0;
1687 dm_device
.version
= version_req
.version
.version
;
1689 ret
= vmbus_sendpacket(dev
->channel
, &version_req
,
1690 sizeof(struct dm_version_request
),
1691 (unsigned long)NULL
, VM_PKT_DATA_INBAND
, 0);
1695 t
= wait_for_completion_timeout(&dm_device
.host_event
, 5*HZ
);
1702 * If we could not negotiate a compatible version with the host
1703 * fail the probe function.
1705 if (dm_device
.state
== DM_INIT_ERROR
) {
1710 pr_info("Using Dynamic Memory protocol version %u.%u\n",
1711 DYNMEM_MAJOR_VERSION(dm_device
.version
),
1712 DYNMEM_MINOR_VERSION(dm_device
.version
));
1715 * Now submit our capabilities to the host.
1717 memset(&cap_msg
, 0, sizeof(struct dm_capabilities
));
1718 cap_msg
.hdr
.type
= DM_CAPABILITIES_REPORT
;
1719 cap_msg
.hdr
.size
= sizeof(struct dm_capabilities
);
1720 cap_msg
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1723 * When hibernation (i.e. virtual ACPI S4 state) is enabled, the host
1724 * currently still requires the bits to be set, so we have to add code
1725 * to fail the host's hot-add and balloon up/down requests, if any.
1727 cap_msg
.caps
.cap_bits
.balloon
= 1;
1728 cap_msg
.caps
.cap_bits
.hot_add
= 1;
1731 * Specify our alignment requirements as it relates
1732 * memory hot-add. Specify 128MB alignment.
1734 cap_msg
.caps
.cap_bits
.hot_add_alignment
= 7;
1737 * Currently the host does not use these
1738 * values and we set them to what is done in the
1741 cap_msg
.min_page_cnt
= 0;
1742 cap_msg
.max_page_number
= -1;
1744 ret
= vmbus_sendpacket(dev
->channel
, &cap_msg
,
1745 sizeof(struct dm_capabilities
),
1746 (unsigned long)NULL
, VM_PKT_DATA_INBAND
, 0);
1750 t
= wait_for_completion_timeout(&dm_device
.host_event
, 5*HZ
);
1757 * If the host does not like our capabilities,
1758 * fail the probe function.
1760 if (dm_device
.state
== DM_INIT_ERROR
) {
1767 vmbus_close(dev
->channel
);
1771 static int balloon_probe(struct hv_device
*dev
,
1772 const struct hv_vmbus_device_id
*dev_id
)
1776 allow_hibernation
= hv_is_hibernation_supported();
1777 if (allow_hibernation
)
1780 #ifdef CONFIG_MEMORY_HOTPLUG
1781 do_hot_add
= hot_add
;
1785 dm_device
.dev
= dev
;
1786 dm_device
.state
= DM_INITIALIZING
;
1787 dm_device
.next_version
= DYNMEM_PROTOCOL_VERSION_WIN8
;
1788 init_completion(&dm_device
.host_event
);
1789 init_completion(&dm_device
.config_event
);
1790 INIT_LIST_HEAD(&dm_device
.ha_region_list
);
1791 spin_lock_init(&dm_device
.ha_lock
);
1792 INIT_WORK(&dm_device
.balloon_wrk
.wrk
, balloon_up
);
1793 INIT_WORK(&dm_device
.ha_wrk
.wrk
, hot_add_req
);
1794 dm_device
.host_specified_ha_region
= false;
1796 #ifdef CONFIG_MEMORY_HOTPLUG
1797 set_online_page_callback(&hv_online_page
);
1798 init_completion(&dm_device
.ol_waitevent
);
1799 register_memory_notifier(&hv_memory_nb
);
1802 hv_set_drvdata(dev
, &dm_device
);
1804 ret
= balloon_connect_vsp(dev
);
1808 enable_page_reporting();
1809 dm_device
.state
= DM_INITIALIZED
;
1812 kthread_run(dm_thread_func
, &dm_device
, "hv_balloon");
1813 if (IS_ERR(dm_device
.thread
)) {
1814 ret
= PTR_ERR(dm_device
.thread
);
1821 dm_device
.state
= DM_INIT_ERROR
;
1822 dm_device
.thread
= NULL
;
1823 disable_page_reporting();
1824 vmbus_close(dev
->channel
);
1825 #ifdef CONFIG_MEMORY_HOTPLUG
1826 unregister_memory_notifier(&hv_memory_nb
);
1827 restore_online_page_callback(&hv_online_page
);
1832 static int balloon_remove(struct hv_device
*dev
)
1834 struct hv_dynmem_device
*dm
= hv_get_drvdata(dev
);
1835 struct hv_hotadd_state
*has
, *tmp
;
1836 struct hv_hotadd_gap
*gap
, *tmp_gap
;
1837 unsigned long flags
;
1839 if (dm
->num_pages_ballooned
!= 0)
1840 pr_warn("Ballooned pages: %d\n", dm
->num_pages_ballooned
);
1842 cancel_work_sync(&dm
->balloon_wrk
.wrk
);
1843 cancel_work_sync(&dm
->ha_wrk
.wrk
);
1845 kthread_stop(dm
->thread
);
1846 disable_page_reporting();
1847 vmbus_close(dev
->channel
);
1848 #ifdef CONFIG_MEMORY_HOTPLUG
1849 unregister_memory_notifier(&hv_memory_nb
);
1850 restore_online_page_callback(&hv_online_page
);
1852 spin_lock_irqsave(&dm_device
.ha_lock
, flags
);
1853 list_for_each_entry_safe(has
, tmp
, &dm
->ha_region_list
, list
) {
1854 list_for_each_entry_safe(gap
, tmp_gap
, &has
->gap_list
, list
) {
1855 list_del(&gap
->list
);
1858 list_del(&has
->list
);
1861 spin_unlock_irqrestore(&dm_device
.ha_lock
, flags
);
1866 static int balloon_suspend(struct hv_device
*hv_dev
)
1868 struct hv_dynmem_device
*dm
= hv_get_drvdata(hv_dev
);
1870 tasklet_disable(&hv_dev
->channel
->callback_event
);
1872 cancel_work_sync(&dm
->balloon_wrk
.wrk
);
1873 cancel_work_sync(&dm
->ha_wrk
.wrk
);
1876 kthread_stop(dm
->thread
);
1878 vmbus_close(hv_dev
->channel
);
1881 tasklet_enable(&hv_dev
->channel
->callback_event
);
1887 static int balloon_resume(struct hv_device
*dev
)
1891 dm_device
.state
= DM_INITIALIZING
;
1893 ret
= balloon_connect_vsp(dev
);
1899 kthread_run(dm_thread_func
, &dm_device
, "hv_balloon");
1900 if (IS_ERR(dm_device
.thread
)) {
1901 ret
= PTR_ERR(dm_device
.thread
);
1902 dm_device
.thread
= NULL
;
1906 dm_device
.state
= DM_INITIALIZED
;
1909 vmbus_close(dev
->channel
);
1911 dm_device
.state
= DM_INIT_ERROR
;
1912 #ifdef CONFIG_MEMORY_HOTPLUG
1913 unregister_memory_notifier(&hv_memory_nb
);
1914 restore_online_page_callback(&hv_online_page
);
1919 static const struct hv_vmbus_device_id id_table
[] = {
1920 /* Dynamic Memory Class ID */
1921 /* 525074DC-8985-46e2-8057-A307DC18A502 */
1926 MODULE_DEVICE_TABLE(vmbus
, id_table
);
1928 static struct hv_driver balloon_drv
= {
1929 .name
= "hv_balloon",
1930 .id_table
= id_table
,
1931 .probe
= balloon_probe
,
1932 .remove
= balloon_remove
,
1933 .suspend
= balloon_suspend
,
1934 .resume
= balloon_resume
,
1936 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
1940 static int __init
init_balloon_drv(void)
1943 return vmbus_driver_register(&balloon_drv
);
1946 module_init(init_balloon_drv
);
1948 MODULE_DESCRIPTION("Hyper-V Balloon");
1949 MODULE_LICENSE("GPL");