2 * Copyright (c) 2012, Microsoft Corporation.
5 * K. Y. Srinivasan <kys@microsoft.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/kernel.h>
22 #include <linux/mman.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/kthread.h>
28 #include <linux/completion.h>
29 #include <linux/memory_hotplug.h>
30 #include <linux/memory.h>
31 #include <linux/notifier.h>
32 #include <linux/percpu_counter.h>
34 #include <linux/hyperv.h>
37 * We begin with definitions supporting the Dynamic Memory protocol
40 * Begin protocol definitions.
46 * Protocol versions. The low word is the minor version, the high word the major
51 * Changed to 0.1 on 2009/03/25
52 * Changes to 0.2 on 2009/05/14
53 * Changes to 0.3 on 2009/12/03
54 * Changed to 1.0 on 2011/04/05
57 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
58 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
59 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
62 DYNMEM_PROTOCOL_VERSION_1
= DYNMEM_MAKE_VERSION(0, 3),
63 DYNMEM_PROTOCOL_VERSION_2
= DYNMEM_MAKE_VERSION(1, 0),
65 DYNMEM_PROTOCOL_VERSION_WIN7
= DYNMEM_PROTOCOL_VERSION_1
,
66 DYNMEM_PROTOCOL_VERSION_WIN8
= DYNMEM_PROTOCOL_VERSION_2
,
68 DYNMEM_PROTOCOL_VERSION_CURRENT
= DYNMEM_PROTOCOL_VERSION_WIN8
77 enum dm_message_type
{
82 DM_VERSION_REQUEST
= 1,
83 DM_VERSION_RESPONSE
= 2,
84 DM_CAPABILITIES_REPORT
= 3,
85 DM_CAPABILITIES_RESPONSE
= 4,
87 DM_BALLOON_REQUEST
= 6,
88 DM_BALLOON_RESPONSE
= 7,
89 DM_UNBALLOON_REQUEST
= 8,
90 DM_UNBALLOON_RESPONSE
= 9,
91 DM_MEM_HOT_ADD_REQUEST
= 10,
92 DM_MEM_HOT_ADD_RESPONSE
= 11,
93 DM_VERSION_03_MAX
= 11,
103 * Structures defining the dynamic memory management
125 union dm_mem_page_range
{
128 * The PFN number of the first page in the range.
129 * 40 bits is the architectural limit of a PFN
134 * The number of pages in the range.
144 * The header for all dynamic memory messages:
146 * type: Type of the message.
147 * size: Size of the message in bytes; including the header.
148 * trans_id: The guest is responsible for manufacturing this ID.
158 * A generic message format for dynamic memory.
159 * Specific message formats are defined later in the file.
163 struct dm_header hdr
;
164 __u8 data
[]; /* enclosed message */
169 * Specific message types supporting the dynamic memory protocol.
173 * Version negotiation message. Sent from the guest to the host.
174 * The guest is free to try different versions until the host
175 * accepts the version.
177 * dm_version: The protocol version requested.
178 * is_last_attempt: If TRUE, this is the last version guest will request.
179 * reservedz: Reserved field, set to zero.
182 struct dm_version_request
{
183 struct dm_header hdr
;
184 union dm_version version
;
185 __u32 is_last_attempt
:1;
190 * Version response message; Host to Guest and indicates
191 * if the host has accepted the version sent by the guest.
193 * is_accepted: If TRUE, host has accepted the version and the guest
194 * should proceed to the next stage of the protocol. FALSE indicates that
195 * guest should re-try with a different version.
197 * reservedz: Reserved field, set to zero.
200 struct dm_version_response
{
201 struct dm_header hdr
;
207 * Message reporting capabilities. This is sent from the guest to the
211 struct dm_capabilities
{
212 struct dm_header hdr
;
215 __u64 max_page_number
;
219 * Response to the capabilities message. This is sent from the host to the
220 * guest. This message notifies if the host has accepted the guest's
221 * capabilities. If the host has not accepted, the guest must shutdown
224 * is_accepted: Indicates if the host has accepted guest's capabilities.
225 * reservedz: Must be 0.
228 struct dm_capabilities_resp_msg
{
229 struct dm_header hdr
;
235 * This message is used to report memory pressure from the guest.
236 * This message is not part of any transaction and there is no
237 * response to this message.
239 * num_avail: Available memory in pages.
240 * num_committed: Committed memory in pages.
241 * page_file_size: The accumulated size of all page files
242 * in the system in pages.
243 * zero_free: The nunber of zero and free pages.
244 * page_file_writes: The writes to the page file in pages.
245 * io_diff: An indicator of file cache efficiency or page file activity,
246 * calculated as File Cache Page Fault Count - Page Read Count.
247 * This value is in pages.
249 * Some of these metrics are Windows specific and fortunately
250 * the algorithm on the host side that computes the guest memory
251 * pressure only uses num_committed value.
255 struct dm_header hdr
;
258 __u64 page_file_size
;
260 __u32 page_file_writes
;
266 * Message to ask the guest to allocate memory - balloon up message.
267 * This message is sent from the host to the guest. The guest may not be
268 * able to allocate as much memory as requested.
270 * num_pages: number of pages to allocate.
274 struct dm_header hdr
;
281 * Balloon response message; this message is sent from the guest
282 * to the host in response to the balloon message.
284 * reservedz: Reserved; must be set to zero.
285 * more_pages: If FALSE, this is the last message of the transaction.
286 * if TRUE there will atleast one more message from the guest.
288 * range_count: The number of ranges in the range array.
290 * range_array: An array of page ranges returned to the host.
294 struct dm_balloon_response
{
295 struct dm_header hdr
;
298 __u32 range_count
:31;
299 union dm_mem_page_range range_array
[];
303 * Un-balloon message; this message is sent from the host
304 * to the guest to give guest more memory.
306 * more_pages: If FALSE, this is the last message of the transaction.
307 * if TRUE there will atleast one more message from the guest.
309 * reservedz: Reserved; must be set to zero.
311 * range_count: The number of ranges in the range array.
313 * range_array: An array of page ranges returned to the host.
317 struct dm_unballoon_request
{
318 struct dm_header hdr
;
322 union dm_mem_page_range range_array
[];
326 * Un-balloon response message; this message is sent from the guest
327 * to the host in response to an unballoon request.
331 struct dm_unballoon_response
{
332 struct dm_header hdr
;
337 * Hot add request message. Message sent from the host to the guest.
339 * mem_range: Memory range to hot add.
341 * On Linux we currently don't support this since we cannot hot add
342 * arbitrary granularity of memory.
346 struct dm_header hdr
;
347 union dm_mem_page_range range
;
351 * Hot add response message.
352 * This message is sent by the guest to report the status of a hot add request.
353 * If page_count is less than the requested page count, then the host should
354 * assume all further hot add requests will fail, since this indicates that
355 * the guest has hit an upper physical memory barrier.
357 * Hot adds may also fail due to low resources; in this case, the guest must
358 * not complete this message until the hot add can succeed, and the host must
359 * not send a new hot add request until the response is sent.
360 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
361 * times it fails the request.
364 * page_count: number of pages that were successfully hot added.
366 * result: result of the operation 1: success, 0: failure.
370 struct dm_hot_add_response
{
371 struct dm_header hdr
;
377 * Types of information sent from host to the guest.
381 INFO_TYPE_MAX_PAGE_CNT
= 0,
387 * Header for the information message.
390 struct dm_info_header
{
391 enum dm_info_type type
;
396 * This message is sent from the host to the guest to pass
397 * some relevant information (win8 addition).
400 * info_size: size of the information blob.
401 * info: information blob.
405 struct dm_header hdr
;
412 * End protocol definitions.
416 * State to manage hot adding memory into the guest.
417 * The range start_pfn : end_pfn specifies the range
418 * that the host has asked us to hot add. The range
419 * start_pfn : ha_end_pfn specifies the range that we have
420 * currently hot added. We hot add in multiples of 128M
421 * chunks; it is possible that we may not be able to bring
422 * online all the pages in the region. The range
423 * covered_start_pfn : covered_end_pfn defines the pages that can
427 struct hv_hotadd_state
{
428 struct list_head list
;
429 unsigned long start_pfn
;
430 unsigned long covered_start_pfn
;
431 unsigned long covered_end_pfn
;
432 unsigned long ha_end_pfn
;
433 unsigned long end_pfn
;
436 struct balloon_state
{
438 struct work_struct wrk
;
442 union dm_mem_page_range ha_page_range
;
443 union dm_mem_page_range ha_region_range
;
444 struct work_struct wrk
;
447 static bool hot_add
= true;
448 static bool do_hot_add
;
450 * Delay reporting memory pressure by
451 * the specified number of seconds.
453 static uint pressure_report_delay
= 45;
455 module_param(hot_add
, bool, (S_IRUGO
| S_IWUSR
));
456 MODULE_PARM_DESC(hot_add
, "If set attempt memory hot_add");
458 module_param(pressure_report_delay
, uint
, (S_IRUGO
| S_IWUSR
));
459 MODULE_PARM_DESC(pressure_report_delay
, "Delay in secs in reporting pressure");
460 static atomic_t trans_id
= ATOMIC_INIT(0);
462 static int dm_ring_size
= (5 * PAGE_SIZE
);
465 * Driver specific state.
478 static __u8 recv_buffer
[PAGE_SIZE
];
479 static __u8
*send_buffer
;
480 #define PAGES_IN_2M 512
481 #define HA_CHUNK (32 * 1024)
483 struct hv_dynmem_device
{
484 struct hv_device
*dev
;
485 enum hv_dm_state state
;
486 struct completion host_event
;
487 struct completion config_event
;
490 * Number of pages we have currently ballooned out.
492 unsigned int num_pages_ballooned
;
495 * State to manage the ballooning (up) operation.
497 struct balloon_state balloon_wrk
;
500 * State to execute the "hot-add" operation.
502 struct hot_add_wrk ha_wrk
;
505 * This state tracks if the host has specified a hot-add
508 bool host_specified_ha_region
;
511 * State to synchronize hot-add.
513 struct completion ol_waitevent
;
516 * This thread handles hot-add
517 * requests from the host as well as notifying
518 * the host with regards to memory pressure in
521 struct task_struct
*thread
;
524 * A list of hot-add regions.
526 struct list_head ha_region_list
;
529 * We start with the highest version we can support
530 * and downgrade based on the host; we save here the
531 * next version to try.
536 static struct hv_dynmem_device dm_device
;
538 #ifdef CONFIG_MEMORY_HOTPLUG
540 void hv_bring_pgs_online(unsigned long start_pfn
, unsigned long size
)
544 for (i
= 0; i
< size
; i
++) {
546 pg
= pfn_to_page(start_pfn
+ i
);
547 __online_page_set_limits(pg
);
548 __online_page_increment_counters(pg
);
549 __online_page_free(pg
);
553 static void hv_mem_hot_add(unsigned long start
, unsigned long size
,
554 unsigned long pfn_count
,
555 struct hv_hotadd_state
*has
)
559 unsigned long start_pfn
;
560 unsigned long processed_pfn
;
561 unsigned long total_pfn
= pfn_count
;
563 for (i
= 0; i
< (size
/HA_CHUNK
); i
++) {
564 start_pfn
= start
+ (i
* HA_CHUNK
);
565 has
->ha_end_pfn
+= HA_CHUNK
;
567 if (total_pfn
> HA_CHUNK
) {
568 processed_pfn
= HA_CHUNK
;
569 total_pfn
-= HA_CHUNK
;
571 processed_pfn
= total_pfn
;
575 has
->covered_end_pfn
+= processed_pfn
;
577 init_completion(&dm_device
.ol_waitevent
);
578 dm_device
.ha_waiting
= true;
580 nid
= memory_add_physaddr_to_nid(PFN_PHYS(start_pfn
));
581 ret
= add_memory(nid
, PFN_PHYS((start_pfn
)),
582 (HA_CHUNK
<< PAGE_SHIFT
));
585 pr_info("hot_add memory failed error is %d\n", ret
);
586 has
->ha_end_pfn
-= HA_CHUNK
;
587 has
->covered_end_pfn
-= processed_pfn
;
592 * Wait for the memory block to be onlined.
594 t
= wait_for_completion_timeout(&dm_device
.ol_waitevent
, 5*HZ
);
596 pr_info("hot_add memory timedout\n");
597 has
->ha_end_pfn
-= HA_CHUNK
;
598 has
->covered_end_pfn
-= processed_pfn
;
607 static void hv_online_page(struct page
*pg
)
609 struct list_head
*cur
;
610 struct hv_hotadd_state
*has
;
611 unsigned long cur_start_pgp
;
612 unsigned long cur_end_pgp
;
614 if (dm_device
.ha_waiting
) {
615 dm_device
.ha_waiting
= false;
616 complete(&dm_device
.ol_waitevent
);
619 list_for_each(cur
, &dm_device
.ha_region_list
) {
620 has
= list_entry(cur
, struct hv_hotadd_state
, list
);
621 cur_start_pgp
= (unsigned long)
622 pfn_to_page(has
->covered_start_pfn
);
623 cur_end_pgp
= (unsigned long)pfn_to_page(has
->covered_end_pfn
);
625 if (((unsigned long)pg
>= cur_start_pgp
) &&
626 ((unsigned long)pg
< cur_end_pgp
)) {
628 * This frame is currently backed; online the
631 __online_page_set_limits(pg
);
632 __online_page_increment_counters(pg
);
633 __online_page_free(pg
);
634 has
->covered_start_pfn
++;
639 static bool pfn_covered(unsigned long start_pfn
, unsigned long pfn_cnt
)
641 struct list_head
*cur
;
642 struct hv_hotadd_state
*has
;
643 unsigned long residual
, new_inc
;
645 if (list_empty(&dm_device
.ha_region_list
))
648 list_for_each(cur
, &dm_device
.ha_region_list
) {
649 has
= list_entry(cur
, struct hv_hotadd_state
, list
);
652 * If the pfn range we are dealing with is not in the current
653 * "hot add block", move on.
655 if ((start_pfn
>= has
->end_pfn
))
658 * If the current hot add-request extends beyond
659 * our current limit; extend it.
661 if ((start_pfn
+ pfn_cnt
) > has
->end_pfn
) {
662 residual
= (start_pfn
+ pfn_cnt
- has
->end_pfn
);
664 * Extend the region by multiples of HA_CHUNK.
666 new_inc
= (residual
/ HA_CHUNK
) * HA_CHUNK
;
667 if (residual
% HA_CHUNK
)
670 has
->end_pfn
+= new_inc
;
674 * If the current start pfn is not where the covered_end
678 if (has
->covered_end_pfn
!= start_pfn
) {
679 has
->covered_end_pfn
= start_pfn
;
680 has
->covered_start_pfn
= start_pfn
;
689 static unsigned long handle_pg_range(unsigned long pg_start
,
690 unsigned long pg_count
)
692 unsigned long start_pfn
= pg_start
;
693 unsigned long pfn_cnt
= pg_count
;
695 struct list_head
*cur
;
696 struct hv_hotadd_state
*has
;
697 unsigned long pgs_ol
= 0;
698 unsigned long old_covered_state
;
700 if (list_empty(&dm_device
.ha_region_list
))
703 list_for_each(cur
, &dm_device
.ha_region_list
) {
704 has
= list_entry(cur
, struct hv_hotadd_state
, list
);
707 * If the pfn range we are dealing with is not in the current
708 * "hot add block", move on.
710 if ((start_pfn
>= has
->end_pfn
))
713 old_covered_state
= has
->covered_end_pfn
;
715 if (start_pfn
< has
->ha_end_pfn
) {
717 * This is the case where we are backing pages
718 * in an already hot added region. Bring
719 * these pages online first.
721 pgs_ol
= has
->ha_end_pfn
- start_pfn
;
722 if (pgs_ol
> pfn_cnt
)
724 hv_bring_pgs_online(start_pfn
, pgs_ol
);
725 has
->covered_end_pfn
+= pgs_ol
;
726 has
->covered_start_pfn
+= pgs_ol
;
730 if ((has
->ha_end_pfn
< has
->end_pfn
) && (pfn_cnt
> 0)) {
732 * We have some residual hot add range
733 * that needs to be hot added; hot add
734 * it now. Hot add a multiple of
735 * of HA_CHUNK that fully covers the pages
738 size
= (has
->end_pfn
- has
->ha_end_pfn
);
739 if (pfn_cnt
<= size
) {
740 size
= ((pfn_cnt
/ HA_CHUNK
) * HA_CHUNK
);
741 if (pfn_cnt
% HA_CHUNK
)
746 hv_mem_hot_add(has
->ha_end_pfn
, size
, pfn_cnt
, has
);
749 * If we managed to online any pages that were given to us,
750 * we declare success.
752 return has
->covered_end_pfn
- old_covered_state
;
759 static unsigned long process_hot_add(unsigned long pg_start
,
760 unsigned long pfn_cnt
,
761 unsigned long rg_start
,
762 unsigned long rg_size
)
764 struct hv_hotadd_state
*ha_region
= NULL
;
769 if (!dm_device
.host_specified_ha_region
)
770 if (pfn_covered(pg_start
, pfn_cnt
))
774 * If the host has specified a hot-add range; deal with it first.
777 if ((rg_size
!= 0) && (!dm_device
.host_specified_ha_region
)) {
778 ha_region
= kzalloc(sizeof(struct hv_hotadd_state
), GFP_KERNEL
);
782 INIT_LIST_HEAD(&ha_region
->list
);
784 list_add_tail(&ha_region
->list
, &dm_device
.ha_region_list
);
785 ha_region
->start_pfn
= rg_start
;
786 ha_region
->ha_end_pfn
= rg_start
;
787 ha_region
->covered_start_pfn
= pg_start
;
788 ha_region
->covered_end_pfn
= pg_start
;
789 ha_region
->end_pfn
= rg_start
+ rg_size
;
794 * Process the page range specified; bringing them
795 * online if possible.
797 return handle_pg_range(pg_start
, pfn_cnt
);
802 static void hot_add_req(struct work_struct
*dummy
)
804 struct dm_hot_add_response resp
;
805 #ifdef CONFIG_MEMORY_HOTPLUG
806 unsigned long pg_start
, pfn_cnt
;
807 unsigned long rg_start
, rg_sz
;
809 struct hv_dynmem_device
*dm
= &dm_device
;
811 memset(&resp
, 0, sizeof(struct dm_hot_add_response
));
812 resp
.hdr
.type
= DM_MEM_HOT_ADD_RESPONSE
;
813 resp
.hdr
.size
= sizeof(struct dm_hot_add_response
);
814 resp
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
816 #ifdef CONFIG_MEMORY_HOTPLUG
817 pg_start
= dm
->ha_wrk
.ha_page_range
.finfo
.start_page
;
818 pfn_cnt
= dm
->ha_wrk
.ha_page_range
.finfo
.page_cnt
;
820 rg_start
= dm
->ha_wrk
.ha_region_range
.finfo
.start_page
;
821 rg_sz
= dm
->ha_wrk
.ha_region_range
.finfo
.page_cnt
;
823 if ((rg_start
== 0) && (!dm
->host_specified_ha_region
)) {
824 unsigned long region_size
;
825 unsigned long region_start
;
828 * The host has not specified the hot-add region.
829 * Based on the hot-add page range being specified,
830 * compute a hot-add region that can cover the pages
831 * that need to be hot-added while ensuring the alignment
832 * and size requirements of Linux as it relates to hot-add.
834 region_start
= pg_start
;
835 region_size
= (pfn_cnt
/ HA_CHUNK
) * HA_CHUNK
;
836 if (pfn_cnt
% HA_CHUNK
)
837 region_size
+= HA_CHUNK
;
839 region_start
= (pg_start
/ HA_CHUNK
) * HA_CHUNK
;
841 rg_start
= region_start
;
845 resp
.page_count
= process_hot_add(pg_start
, pfn_cnt
,
848 if (resp
.page_count
> 0)
853 if (!do_hot_add
|| (resp
.page_count
== 0))
854 pr_info("Memory hot add failed\n");
856 dm
->state
= DM_INITIALIZED
;
857 vmbus_sendpacket(dm
->dev
->channel
, &resp
,
858 sizeof(struct dm_hot_add_response
),
860 VM_PKT_DATA_INBAND
, 0);
863 static void process_info(struct hv_dynmem_device
*dm
, struct dm_info_msg
*msg
)
865 struct dm_info_header
*info_hdr
;
867 info_hdr
= (struct dm_info_header
*)msg
->info
;
869 switch (info_hdr
->type
) {
870 case INFO_TYPE_MAX_PAGE_CNT
:
871 pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
872 pr_info("Data Size is %d\n", info_hdr
->data_size
);
875 pr_info("Received Unknown type: %d\n", info_hdr
->type
);
879 unsigned long compute_balloon_floor(void)
881 unsigned long min_pages
;
882 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
883 /* Simple continuous piecewiese linear function:
884 * max MiB -> min MiB gradient
895 if (totalram_pages
< MB2PAGES(128))
896 min_pages
= MB2PAGES(8) + (totalram_pages
>> 1);
897 else if (totalram_pages
< MB2PAGES(512))
898 min_pages
= MB2PAGES(40) + (totalram_pages
>> 2);
899 else if (totalram_pages
< MB2PAGES(2048))
900 min_pages
= MB2PAGES(104) + (totalram_pages
>> 3);
902 min_pages
= MB2PAGES(296) + (totalram_pages
>> 5);
908 * Post our status as it relates memory pressure to the
909 * host. Host expects the guests to post this status
910 * periodically at 1 second intervals.
912 * The metrics specified in this protocol are very Windows
913 * specific and so we cook up numbers here to convey our memory
917 static void post_status(struct hv_dynmem_device
*dm
)
919 struct dm_status status
;
922 if (pressure_report_delay
> 0) {
923 --pressure_report_delay
;
927 memset(&status
, 0, sizeof(struct dm_status
));
928 status
.hdr
.type
= DM_STATUS_REPORT
;
929 status
.hdr
.size
= sizeof(struct dm_status
);
930 status
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
933 * The host expects the guest to report free memory.
934 * Further, the host expects the pressure information to
935 * include the ballooned out pages.
936 * For a given amount of memory that we are managing, we
937 * need to compute a floor below which we should not balloon.
938 * Compute this and add it to the pressure report.
940 status
.num_avail
= val
.freeram
;
941 status
.num_committed
= vm_memory_committed() +
942 dm
->num_pages_ballooned
+
943 compute_balloon_floor();
945 vmbus_sendpacket(dm
->dev
->channel
, &status
,
946 sizeof(struct dm_status
),
948 VM_PKT_DATA_INBAND
, 0);
952 static void free_balloon_pages(struct hv_dynmem_device
*dm
,
953 union dm_mem_page_range
*range_array
)
955 int num_pages
= range_array
->finfo
.page_cnt
;
956 __u64 start_frame
= range_array
->finfo
.start_page
;
960 for (i
= 0; i
< num_pages
; i
++) {
961 pg
= pfn_to_page(i
+ start_frame
);
963 dm
->num_pages_ballooned
--;
969 static int alloc_balloon_pages(struct hv_dynmem_device
*dm
, int num_pages
,
970 struct dm_balloon_response
*bl_resp
, int alloc_unit
,
976 if (num_pages
< alloc_unit
)
979 for (i
= 0; (i
* alloc_unit
) < num_pages
; i
++) {
980 if (bl_resp
->hdr
.size
+ sizeof(union dm_mem_page_range
) >
982 return i
* alloc_unit
;
985 * We execute this code in a thread context. Furthermore,
986 * we don't want the kernel to try too hard.
988 pg
= alloc_pages(GFP_HIGHUSER
| __GFP_NORETRY
|
989 __GFP_NOMEMALLOC
| __GFP_NOWARN
,
990 get_order(alloc_unit
<< PAGE_SHIFT
));
994 return i
* alloc_unit
;
998 dm
->num_pages_ballooned
+= alloc_unit
;
1000 bl_resp
->range_count
++;
1001 bl_resp
->range_array
[i
].finfo
.start_page
=
1003 bl_resp
->range_array
[i
].finfo
.page_cnt
= alloc_unit
;
1004 bl_resp
->hdr
.size
+= sizeof(union dm_mem_page_range
);
1013 static void balloon_up(struct work_struct
*dummy
)
1015 int num_pages
= dm_device
.balloon_wrk
.num_pages
;
1016 int num_ballooned
= 0;
1017 struct dm_balloon_response
*bl_resp
;
1020 bool alloc_error
= false;
1026 * Currently, we only support 4k allocations.
1031 bl_resp
= (struct dm_balloon_response
*)send_buffer
;
1032 memset(send_buffer
, 0, PAGE_SIZE
);
1033 bl_resp
->hdr
.type
= DM_BALLOON_RESPONSE
;
1034 bl_resp
->hdr
.trans_id
= atomic_inc_return(&trans_id
);
1035 bl_resp
->hdr
.size
= sizeof(struct dm_balloon_response
);
1036 bl_resp
->more_pages
= 1;
1039 num_pages
-= num_ballooned
;
1040 num_ballooned
= alloc_balloon_pages(&dm_device
, num_pages
,
1041 bl_resp
, alloc_unit
,
1044 if ((alloc_error
) || (num_ballooned
== num_pages
)) {
1045 bl_resp
->more_pages
= 0;
1047 dm_device
.state
= DM_INITIALIZED
;
1051 * We are pushing a lot of data through the channel;
1052 * deal with transient failures caused because of the
1053 * lack of space in the ring buffer.
1057 ret
= vmbus_sendpacket(dm_device
.dev
->channel
,
1060 (unsigned long)NULL
,
1061 VM_PKT_DATA_INBAND
, 0);
1066 } while (ret
== -EAGAIN
);
1070 * Free up the memory we allocatted.
1072 pr_info("Balloon response failed\n");
1074 for (i
= 0; i
< bl_resp
->range_count
; i
++)
1075 free_balloon_pages(&dm_device
,
1076 &bl_resp
->range_array
[i
]);
1084 static void balloon_down(struct hv_dynmem_device
*dm
,
1085 struct dm_unballoon_request
*req
)
1087 union dm_mem_page_range
*range_array
= req
->range_array
;
1088 int range_count
= req
->range_count
;
1089 struct dm_unballoon_response resp
;
1092 for (i
= 0; i
< range_count
; i
++)
1093 free_balloon_pages(dm
, &range_array
[i
]);
1095 if (req
->more_pages
== 1)
1098 memset(&resp
, 0, sizeof(struct dm_unballoon_response
));
1099 resp
.hdr
.type
= DM_UNBALLOON_RESPONSE
;
1100 resp
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1101 resp
.hdr
.size
= sizeof(struct dm_unballoon_response
);
1103 vmbus_sendpacket(dm_device
.dev
->channel
, &resp
,
1104 sizeof(struct dm_unballoon_response
),
1105 (unsigned long)NULL
,
1106 VM_PKT_DATA_INBAND
, 0);
1108 dm
->state
= DM_INITIALIZED
;
1111 static void balloon_onchannelcallback(void *context
);
1113 static int dm_thread_func(void *dm_dev
)
1115 struct hv_dynmem_device
*dm
= dm_dev
;
1118 while (!kthread_should_stop()) {
1119 t
= wait_for_completion_timeout(&dm_device
.config_event
, 1*HZ
);
1121 * The host expects us to post information on the memory
1122 * pressure every second.
1134 static void version_resp(struct hv_dynmem_device
*dm
,
1135 struct dm_version_response
*vresp
)
1137 struct dm_version_request version_req
;
1140 if (vresp
->is_accepted
) {
1142 * We are done; wakeup the
1143 * context waiting for version
1146 complete(&dm
->host_event
);
1150 * If there are more versions to try, continue
1151 * with negotiations; if not
1152 * shutdown the service since we are not able
1153 * to negotiate a suitable version number
1156 if (dm
->next_version
== 0)
1159 dm
->next_version
= 0;
1160 memset(&version_req
, 0, sizeof(struct dm_version_request
));
1161 version_req
.hdr
.type
= DM_VERSION_REQUEST
;
1162 version_req
.hdr
.size
= sizeof(struct dm_version_request
);
1163 version_req
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1164 version_req
.version
.version
= DYNMEM_PROTOCOL_VERSION_WIN7
;
1165 version_req
.is_last_attempt
= 1;
1167 ret
= vmbus_sendpacket(dm
->dev
->channel
, &version_req
,
1168 sizeof(struct dm_version_request
),
1169 (unsigned long)NULL
,
1170 VM_PKT_DATA_INBAND
, 0);
1178 dm
->state
= DM_INIT_ERROR
;
1179 complete(&dm
->host_event
);
1182 static void cap_resp(struct hv_dynmem_device
*dm
,
1183 struct dm_capabilities_resp_msg
*cap_resp
)
1185 if (!cap_resp
->is_accepted
) {
1186 pr_info("Capabilities not accepted by host\n");
1187 dm
->state
= DM_INIT_ERROR
;
1189 complete(&dm
->host_event
);
1192 static void balloon_onchannelcallback(void *context
)
1194 struct hv_device
*dev
= context
;
1197 struct dm_message
*dm_msg
;
1198 struct dm_header
*dm_hdr
;
1199 struct hv_dynmem_device
*dm
= hv_get_drvdata(dev
);
1200 struct dm_balloon
*bal_msg
;
1201 struct dm_hot_add
*ha_msg
;
1202 union dm_mem_page_range
*ha_pg_range
;
1203 union dm_mem_page_range
*ha_region
;
1205 memset(recv_buffer
, 0, sizeof(recv_buffer
));
1206 vmbus_recvpacket(dev
->channel
, recv_buffer
,
1207 PAGE_SIZE
, &recvlen
, &requestid
);
1210 dm_msg
= (struct dm_message
*)recv_buffer
;
1211 dm_hdr
= &dm_msg
->hdr
;
1213 switch (dm_hdr
->type
) {
1214 case DM_VERSION_RESPONSE
:
1216 (struct dm_version_response
*)dm_msg
);
1219 case DM_CAPABILITIES_RESPONSE
:
1221 (struct dm_capabilities_resp_msg
*)dm_msg
);
1224 case DM_BALLOON_REQUEST
:
1225 if (dm
->state
== DM_BALLOON_UP
)
1226 pr_warn("Currently ballooning\n");
1227 bal_msg
= (struct dm_balloon
*)recv_buffer
;
1228 dm
->state
= DM_BALLOON_UP
;
1229 dm_device
.balloon_wrk
.num_pages
= bal_msg
->num_pages
;
1230 schedule_work(&dm_device
.balloon_wrk
.wrk
);
1233 case DM_UNBALLOON_REQUEST
:
1234 dm
->state
= DM_BALLOON_DOWN
;
1236 (struct dm_unballoon_request
*)recv_buffer
);
1239 case DM_MEM_HOT_ADD_REQUEST
:
1240 if (dm
->state
== DM_HOT_ADD
)
1241 pr_warn("Currently hot-adding\n");
1242 dm
->state
= DM_HOT_ADD
;
1243 ha_msg
= (struct dm_hot_add
*)recv_buffer
;
1244 if (ha_msg
->hdr
.size
== sizeof(struct dm_hot_add
)) {
1246 * This is a normal hot-add request specifying
1249 ha_pg_range
= &ha_msg
->range
;
1250 dm
->ha_wrk
.ha_page_range
= *ha_pg_range
;
1251 dm
->ha_wrk
.ha_region_range
.page_range
= 0;
1254 * Host is specifying that we first hot-add
1255 * a region and then partially populate this
1258 dm
->host_specified_ha_region
= true;
1259 ha_pg_range
= &ha_msg
->range
;
1260 ha_region
= &ha_pg_range
[1];
1261 dm
->ha_wrk
.ha_page_range
= *ha_pg_range
;
1262 dm
->ha_wrk
.ha_region_range
= *ha_region
;
1264 schedule_work(&dm_device
.ha_wrk
.wrk
);
1267 case DM_INFO_MESSAGE
:
1268 process_info(dm
, (struct dm_info_msg
*)dm_msg
);
1272 pr_err("Unhandled message: type: %d\n", dm_hdr
->type
);
1279 static int balloon_probe(struct hv_device
*dev
,
1280 const struct hv_vmbus_device_id
*dev_id
)
1283 struct dm_version_request version_req
;
1284 struct dm_capabilities cap_msg
;
1286 do_hot_add
= hot_add
;
1289 * First allocate a send buffer.
1292 send_buffer
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
1296 ret
= vmbus_open(dev
->channel
, dm_ring_size
, dm_ring_size
, NULL
, 0,
1297 balloon_onchannelcallback
, dev
);
1302 dm_device
.dev
= dev
;
1303 dm_device
.state
= DM_INITIALIZING
;
1304 dm_device
.next_version
= DYNMEM_PROTOCOL_VERSION_WIN7
;
1305 init_completion(&dm_device
.host_event
);
1306 init_completion(&dm_device
.config_event
);
1307 INIT_LIST_HEAD(&dm_device
.ha_region_list
);
1308 INIT_WORK(&dm_device
.balloon_wrk
.wrk
, balloon_up
);
1309 INIT_WORK(&dm_device
.ha_wrk
.wrk
, hot_add_req
);
1310 dm_device
.host_specified_ha_region
= false;
1313 kthread_run(dm_thread_func
, &dm_device
, "hv_balloon");
1314 if (IS_ERR(dm_device
.thread
)) {
1315 ret
= PTR_ERR(dm_device
.thread
);
1319 #ifdef CONFIG_MEMORY_HOTPLUG
1320 set_online_page_callback(&hv_online_page
);
1323 hv_set_drvdata(dev
, &dm_device
);
1325 * Initiate the hand shake with the host and negotiate
1326 * a version that the host can support. We start with the
1327 * highest version number and go down if the host cannot
1330 memset(&version_req
, 0, sizeof(struct dm_version_request
));
1331 version_req
.hdr
.type
= DM_VERSION_REQUEST
;
1332 version_req
.hdr
.size
= sizeof(struct dm_version_request
);
1333 version_req
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1334 version_req
.version
.version
= DYNMEM_PROTOCOL_VERSION_WIN8
;
1335 version_req
.is_last_attempt
= 0;
1337 ret
= vmbus_sendpacket(dev
->channel
, &version_req
,
1338 sizeof(struct dm_version_request
),
1339 (unsigned long)NULL
,
1340 VM_PKT_DATA_INBAND
, 0);
1344 t
= wait_for_completion_timeout(&dm_device
.host_event
, 5*HZ
);
1351 * If we could not negotiate a compatible version with the host
1352 * fail the probe function.
1354 if (dm_device
.state
== DM_INIT_ERROR
) {
1359 * Now submit our capabilities to the host.
1361 memset(&cap_msg
, 0, sizeof(struct dm_capabilities
));
1362 cap_msg
.hdr
.type
= DM_CAPABILITIES_REPORT
;
1363 cap_msg
.hdr
.size
= sizeof(struct dm_capabilities
);
1364 cap_msg
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1366 cap_msg
.caps
.cap_bits
.balloon
= 1;
1367 cap_msg
.caps
.cap_bits
.hot_add
= 1;
1370 * Currently the host does not use these
1371 * values and we set them to what is done in the
1374 cap_msg
.min_page_cnt
= 0;
1375 cap_msg
.max_page_number
= -1;
1377 ret
= vmbus_sendpacket(dev
->channel
, &cap_msg
,
1378 sizeof(struct dm_capabilities
),
1379 (unsigned long)NULL
,
1380 VM_PKT_DATA_INBAND
, 0);
1384 t
= wait_for_completion_timeout(&dm_device
.host_event
, 5*HZ
);
1391 * If the host does not like our capabilities,
1392 * fail the probe function.
1394 if (dm_device
.state
== DM_INIT_ERROR
) {
1399 dm_device
.state
= DM_INITIALIZED
;
1404 #ifdef CONFIG_MEMORY_HOTPLUG
1405 restore_online_page_callback(&hv_online_page
);
1407 kthread_stop(dm_device
.thread
);
1410 vmbus_close(dev
->channel
);
1416 static int balloon_remove(struct hv_device
*dev
)
1418 struct hv_dynmem_device
*dm
= hv_get_drvdata(dev
);
1419 struct list_head
*cur
, *tmp
;
1420 struct hv_hotadd_state
*has
;
1422 if (dm
->num_pages_ballooned
!= 0)
1423 pr_warn("Ballooned pages: %d\n", dm
->num_pages_ballooned
);
1425 cancel_work_sync(&dm
->balloon_wrk
.wrk
);
1426 cancel_work_sync(&dm
->ha_wrk
.wrk
);
1428 vmbus_close(dev
->channel
);
1429 kthread_stop(dm
->thread
);
1431 #ifdef CONFIG_MEMORY_HOTPLUG
1432 restore_online_page_callback(&hv_online_page
);
1434 list_for_each_safe(cur
, tmp
, &dm
->ha_region_list
) {
1435 has
= list_entry(cur
, struct hv_hotadd_state
, list
);
1436 list_del(&has
->list
);
1443 static const struct hv_vmbus_device_id id_table
[] = {
1444 /* Dynamic Memory Class ID */
1445 /* 525074DC-8985-46e2-8057-A307DC18A502 */
1450 MODULE_DEVICE_TABLE(vmbus
, id_table
);
1452 static struct hv_driver balloon_drv
= {
1453 .name
= "hv_balloon",
1454 .id_table
= id_table
,
1455 .probe
= balloon_probe
,
1456 .remove
= balloon_remove
,
1459 static int __init
init_balloon_drv(void)
1462 return vmbus_driver_register(&balloon_drv
);
1465 module_init(init_balloon_drv
);
1467 MODULE_DESCRIPTION("Hyper-V Balloon");
1468 MODULE_VERSION(HV_DRV_VERSION
);
1469 MODULE_LICENSE("GPL");