hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/hv/hv_balloon.c
....@@ -1,19 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (c) 2012, Microsoft Corporation.
34 *
45 * Author:
56 * K. Y. Srinivasan <kys@microsoft.com>
6
- *
7
- * This program is free software; you can redistribute it and/or modify it
8
- * under the terms of the GNU General Public License version 2 as published
9
- * by the Free Software Foundation.
10
- *
11
- * This program is distributed in the hope that it will be useful, but
12
- * WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14
- * NON INFRINGEMENT. See the GNU General Public License for more
15
- * details.
16
- *
177 */
188
199 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
....@@ -33,6 +23,9 @@
3323 #include <linux/percpu_counter.h>
3424
3525 #include <linux/hyperv.h>
26
+#include <asm/hyperv-tlfs.h>
27
+
28
+#include <asm/mshyperv.h>
3629
3730 #define CREATE_TRACE_POINTS
3831 #include "hv_trace_balloon.h"
....@@ -351,8 +344,6 @@
351344 *
352345 * mem_range: Memory range to hot add.
353346 *
354
- * On Linux we currently don't support this since we cannot hot add
355
- * arbitrary granularity of memory.
356347 */
357348
358349 struct dm_hot_add {
....@@ -467,6 +458,7 @@
467458 struct work_struct wrk;
468459 };
469460
461
+static bool allow_hibernation;
470462 static bool hot_add = true;
471463 static bool do_hot_add;
472464 /*
....@@ -487,7 +479,7 @@
487479 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
488480 static atomic_t trans_id = ATOMIC_INIT(0);
489481
490
-static int dm_ring_size = (5 * PAGE_SIZE);
482
+static int dm_ring_size = 20 * 1024;
491483
492484 /*
493485 * Driver specific state.
....@@ -503,10 +495,10 @@
503495 };
504496
505497
506
-static __u8 recv_buffer[PAGE_SIZE];
507
-static __u8 *send_buffer;
508
-#define PAGES_IN_2M 512
509
-#define HA_CHUNK (32 * 1024)
498
+static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
499
+static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
500
+#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
501
+#define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE)
510502
511503 struct hv_dynmem_device {
512504 struct hv_device *dev;
....@@ -541,7 +533,6 @@
541533 * State to synchronize hot-add.
542534 */
543535 struct completion ol_waitevent;
544
- bool ha_waiting;
545536 /*
546537 * This thread handles hot-add
547538 * requests from the host as well as notifying
....@@ -642,10 +633,7 @@
642633 switch (val) {
643634 case MEM_ONLINE:
644635 case MEM_CANCEL_ONLINE:
645
- if (dm_device.ha_waiting) {
646
- dm_device.ha_waiting = false;
647
- complete(&dm_device.ol_waitevent);
648
- }
636
+ complete(&dm_device.ol_waitevent);
649637 break;
650638
651639 case MEM_OFFLINE:
....@@ -681,15 +669,18 @@
681669 /* Check if the particular page is backed and can be onlined and online it. */
682670 static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
683671 {
684
- if (!has_pfn_is_backed(has, page_to_pfn(pg)))
672
+ if (!has_pfn_is_backed(has, page_to_pfn(pg))) {
673
+ if (!PageOffline(pg))
674
+ __SetPageOffline(pg);
685675 return;
676
+ }
677
+ if (PageOffline(pg))
678
+ __ClearPageOffline(pg);
686679
687680 /* This frame is currently backed; online the page. */
688
- __online_page_set_limits(pg);
689
- __online_page_increment_counters(pg);
690
- __online_page_free(pg);
681
+ generic_online_page(pg, 0);
691682
692
- WARN_ON_ONCE(!spin_is_locked(&dm_device.ha_lock));
683
+ lockdep_assert_held(&dm_device.ha_lock);
693684 dm_device.num_pages_onlined++;
694685 }
695686
....@@ -731,12 +722,11 @@
731722 has->covered_end_pfn += processed_pfn;
732723 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
733724
734
- init_completion(&dm_device.ol_waitevent);
735
- dm_device.ha_waiting = !memhp_auto_online;
725
+ reinit_completion(&dm_device.ol_waitevent);
736726
737727 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
738728 ret = add_memory(nid, PFN_PHYS((start_pfn)),
739
- (HA_CHUNK << PAGE_SHIFT));
729
+ (HA_CHUNK << PAGE_SHIFT), MEMHP_MERGE_RESOURCE);
740730
741731 if (ret) {
742732 pr_err("hot_add memory failed error is %d\n", ret);
....@@ -758,20 +748,19 @@
758748 }
759749
760750 /*
761
- * Wait for the memory block to be onlined when memory onlining
762
- * is done outside of kernel (memhp_auto_online). Since the hot
763
- * add has succeeded, it is ok to proceed even if the pages in
764
- * the hot added region have not been "onlined" within the
765
- * allowed time.
751
+ * Wait for memory to get onlined. If the kernel onlined the
752
+ * memory when adding it, this will return directly. Otherwise,
753
+ * it will wait for user space to online the memory. This helps
754
+ * to avoid adding memory faster than it is getting onlined. As
755
+ * adding succeeded, it is ok to proceed even if the memory was
756
+ * not onlined in time.
766757 */
767
- if (dm_device.ha_waiting)
768
- wait_for_completion_timeout(&dm_device.ol_waitevent,
769
- 5*HZ);
758
+ wait_for_completion_timeout(&dm_device.ol_waitevent, 5 * HZ);
770759 post_status(&dm_device);
771760 }
772761 }
773762
774
-static void hv_online_page(struct page *pg)
763
+static void hv_online_page(struct page *pg, unsigned int order)
775764 {
776765 struct hv_hotadd_state *has;
777766 unsigned long flags;
....@@ -780,10 +769,11 @@
780769 spin_lock_irqsave(&dm_device.ha_lock, flags);
781770 list_for_each_entry(has, &dm_device.ha_region_list, list) {
782771 /* The page belongs to a different HAS. */
783
- if ((pfn < has->start_pfn) || (pfn >= has->end_pfn))
772
+ if ((pfn < has->start_pfn) ||
773
+ (pfn + (1UL << order) > has->end_pfn))
784774 continue;
785775
786
- hv_page_online_one(has, pg);
776
+ hv_bring_pgs_online(has, pfn, 1UL << order);
787777 break;
788778 }
789779 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
....@@ -1057,8 +1047,12 @@
10571047 else
10581048 resp.result = 0;
10591049
1060
- if (!do_hot_add || (resp.page_count == 0))
1061
- pr_err("Memory hot add failed\n");
1050
+ if (!do_hot_add || resp.page_count == 0) {
1051
+ if (!allow_hibernation)
1052
+ pr_err("Memory hot add failed\n");
1053
+ else
1054
+ pr_info("Ignore hot-add request!\n");
1055
+ }
10621056
10631057 dm->state = DM_INITIALIZED;
10641058 resp.hdr.trans_id = atomic_inc_return(&trans_id);
....@@ -1080,7 +1074,7 @@
10801074 __u64 *max_page_count = (__u64 *)&info_hdr[1];
10811075
10821076 pr_info("Max. dynamic memory size: %llu MB\n",
1083
- (*max_page_count) >> (20 - PAGE_SHIFT));
1077
+ (*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT));
10841078 }
10851079
10861080 break;
....@@ -1092,6 +1086,7 @@
10921086 static unsigned long compute_balloon_floor(void)
10931087 {
10941088 unsigned long min_pages;
1089
+ unsigned long nr_pages = totalram_pages();
10951090 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
10961091 /* Simple continuous piecewiese linear function:
10971092 * max MiB -> min MiB gradient
....@@ -1104,16 +1099,16 @@
11041099 * 8192 744 (1/16)
11051100 * 32768 1512 (1/32)
11061101 */
1107
- if (totalram_pages < MB2PAGES(128))
1108
- min_pages = MB2PAGES(8) + (totalram_pages >> 1);
1109
- else if (totalram_pages < MB2PAGES(512))
1110
- min_pages = MB2PAGES(40) + (totalram_pages >> 2);
1111
- else if (totalram_pages < MB2PAGES(2048))
1112
- min_pages = MB2PAGES(104) + (totalram_pages >> 3);
1113
- else if (totalram_pages < MB2PAGES(8192))
1114
- min_pages = MB2PAGES(232) + (totalram_pages >> 4);
1102
+ if (nr_pages < MB2PAGES(128))
1103
+ min_pages = MB2PAGES(8) + (nr_pages >> 1);
1104
+ else if (nr_pages < MB2PAGES(512))
1105
+ min_pages = MB2PAGES(40) + (nr_pages >> 2);
1106
+ else if (nr_pages < MB2PAGES(2048))
1107
+ min_pages = MB2PAGES(104) + (nr_pages >> 3);
1108
+ else if (nr_pages < MB2PAGES(8192))
1109
+ min_pages = MB2PAGES(232) + (nr_pages >> 4);
11151110 else
1116
- min_pages = MB2PAGES(488) + (totalram_pages >> 5);
1111
+ min_pages = MB2PAGES(488) + (nr_pages >> 5);
11171112 #undef MB2PAGES
11181113 return min_pages;
11191114 }
....@@ -1200,6 +1195,7 @@
12001195
12011196 for (i = 0; i < num_pages; i++) {
12021197 pg = pfn_to_page(i + start_frame);
1198
+ __ClearPageOffline(pg);
12031199 __free_page(pg);
12041200 dm->num_pages_ballooned--;
12051201 }
....@@ -1212,12 +1208,12 @@
12121208 struct dm_balloon_response *bl_resp,
12131209 int alloc_unit)
12141210 {
1215
- unsigned int i = 0;
1211
+ unsigned int i, j;
12161212 struct page *pg;
12171213
12181214 for (i = 0; i < num_pages / alloc_unit; i++) {
12191215 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1220
- PAGE_SIZE)
1216
+ HV_HYP_PAGE_SIZE)
12211217 return i * alloc_unit;
12221218
12231219 /*
....@@ -1240,6 +1236,10 @@
12401236
12411237 if (alloc_unit != 1)
12421238 split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1239
+
1240
+ /* mark all pages offline */
1241
+ for (j = 0; j < (1 << get_order(alloc_unit << PAGE_SHIFT)); j++)
1242
+ __SetPageOffline(pg + j);
12431243
12441244 bl_resp->range_count++;
12451245 bl_resp->range_array[i].finfo.start_page =
....@@ -1266,9 +1266,9 @@
12661266
12671267 /*
12681268 * We will attempt 2M allocations. However, if we fail to
1269
- * allocate 2M chunks, we will go back to 4k allocations.
1269
+ * allocate 2M chunks, we will go back to PAGE_SIZE allocations.
12701270 */
1271
- alloc_unit = 512;
1271
+ alloc_unit = PAGES_IN_2M;
12721272
12731273 avail_pages = si_mem_available();
12741274 floor = compute_balloon_floor();
....@@ -1283,8 +1283,8 @@
12831283 }
12841284
12851285 while (!done) {
1286
- bl_resp = (struct dm_balloon_response *)send_buffer;
1287
- memset(send_buffer, 0, PAGE_SIZE);
1286
+ memset(balloon_up_send_buffer, 0, HV_HYP_PAGE_SIZE);
1287
+ bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer;
12881288 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
12891289 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
12901290 bl_resp->more_pages = 1;
....@@ -1482,7 +1482,7 @@
14821482
14831483 memset(recv_buffer, 0, sizeof(recv_buffer));
14841484 vmbus_recvpacket(dev->channel, recv_buffer,
1485
- PAGE_SIZE, &recvlen, &requestid);
1485
+ HV_HYP_PAGE_SIZE, &recvlen, &requestid);
14861486
14871487 if (recvlen > 0) {
14881488 dm_msg = (struct dm_message *)recv_buffer;
....@@ -1500,6 +1500,11 @@
15001500 break;
15011501
15021502 case DM_BALLOON_REQUEST:
1503
+ if (allow_hibernation) {
1504
+ pr_info("Ignore balloon-up request!\n");
1505
+ break;
1506
+ }
1507
+
15031508 if (dm->state == DM_BALLOON_UP)
15041509 pr_warn("Currently ballooning\n");
15051510 bal_msg = (struct dm_balloon *)recv_buffer;
....@@ -1509,6 +1514,11 @@
15091514 break;
15101515
15111516 case DM_UNBALLOON_REQUEST:
1517
+ if (allow_hibernation) {
1518
+ pr_info("Ignore balloon-down request!\n");
1519
+ break;
1520
+ }
1521
+
15121522 dm->state = DM_BALLOON_DOWN;
15131523 balloon_down(dm,
15141524 (struct dm_unballoon_request *)recv_buffer);
....@@ -1548,65 +1558,25 @@
15481558 break;
15491559
15501560 default:
1551
- pr_warn("Unhandled message: type: %d\n", dm_hdr->type);
1561
+ pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type);
15521562
15531563 }
15541564 }
15551565
15561566 }
15571567
1558
-static int balloon_probe(struct hv_device *dev,
1559
- const struct hv_vmbus_device_id *dev_id)
1568
+static int balloon_connect_vsp(struct hv_device *dev)
15601569 {
1561
- int ret;
1562
- unsigned long t;
15631570 struct dm_version_request version_req;
15641571 struct dm_capabilities cap_msg;
1565
-
1566
-#ifdef CONFIG_MEMORY_HOTPLUG
1567
- do_hot_add = hot_add;
1568
-#else
1569
- do_hot_add = false;
1570
-#endif
1571
-
1572
- /*
1573
- * First allocate a send buffer.
1574
- */
1575
-
1576
- send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1577
- if (!send_buffer)
1578
- return -ENOMEM;
1572
+ unsigned long t;
1573
+ int ret;
15791574
15801575 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1581
- balloon_onchannelcallback, dev);
1582
-
1576
+ balloon_onchannelcallback, dev);
15831577 if (ret)
1584
- goto probe_error0;
1578
+ return ret;
15851579
1586
- dm_device.dev = dev;
1587
- dm_device.state = DM_INITIALIZING;
1588
- dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1589
- init_completion(&dm_device.host_event);
1590
- init_completion(&dm_device.config_event);
1591
- INIT_LIST_HEAD(&dm_device.ha_region_list);
1592
- spin_lock_init(&dm_device.ha_lock);
1593
- INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1594
- INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1595
- dm_device.host_specified_ha_region = false;
1596
-
1597
- dm_device.thread =
1598
- kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1599
- if (IS_ERR(dm_device.thread)) {
1600
- ret = PTR_ERR(dm_device.thread);
1601
- goto probe_error1;
1602
- }
1603
-
1604
-#ifdef CONFIG_MEMORY_HOTPLUG
1605
- set_online_page_callback(&hv_online_page);
1606
- register_memory_notifier(&hv_memory_nb);
1607
-#endif
1608
-
1609
- hv_set_drvdata(dev, &dm_device);
16101580 /*
16111581 * Initiate the hand shake with the host and negotiate
16121582 * a version that the host can support. We start with the
....@@ -1622,16 +1592,15 @@
16221592 dm_device.version = version_req.version.version;
16231593
16241594 ret = vmbus_sendpacket(dev->channel, &version_req,
1625
- sizeof(struct dm_version_request),
1626
- (unsigned long)NULL,
1627
- VM_PKT_DATA_INBAND, 0);
1595
+ sizeof(struct dm_version_request),
1596
+ (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
16281597 if (ret)
1629
- goto probe_error2;
1598
+ goto out;
16301599
16311600 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
16321601 if (t == 0) {
16331602 ret = -ETIMEDOUT;
1634
- goto probe_error2;
1603
+ goto out;
16351604 }
16361605
16371606 /*
....@@ -1639,8 +1608,8 @@
16391608 * fail the probe function.
16401609 */
16411610 if (dm_device.state == DM_INIT_ERROR) {
1642
- ret = -ETIMEDOUT;
1643
- goto probe_error2;
1611
+ ret = -EPROTO;
1612
+ goto out;
16441613 }
16451614
16461615 pr_info("Using Dynamic Memory protocol version %u.%u\n",
....@@ -1655,6 +1624,11 @@
16551624 cap_msg.hdr.size = sizeof(struct dm_capabilities);
16561625 cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
16571626
1627
+ /*
1628
+ * When hibernation (i.e. virtual ACPI S4 state) is enabled, the host
1629
+ * currently still requires the bits to be set, so we have to add code
1630
+ * to fail the host's hot-add and balloon up/down requests, if any.
1631
+ */
16581632 cap_msg.caps.cap_bits.balloon = 1;
16591633 cap_msg.caps.cap_bits.hot_add = 1;
16601634
....@@ -1673,16 +1647,15 @@
16731647 cap_msg.max_page_number = -1;
16741648
16751649 ret = vmbus_sendpacket(dev->channel, &cap_msg,
1676
- sizeof(struct dm_capabilities),
1677
- (unsigned long)NULL,
1678
- VM_PKT_DATA_INBAND, 0);
1650
+ sizeof(struct dm_capabilities),
1651
+ (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
16791652 if (ret)
1680
- goto probe_error2;
1653
+ goto out;
16811654
16821655 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
16831656 if (t == 0) {
16841657 ret = -ETIMEDOUT;
1685
- goto probe_error2;
1658
+ goto out;
16861659 }
16871660
16881661 /*
....@@ -1690,25 +1663,72 @@
16901663 * fail the probe function.
16911664 */
16921665 if (dm_device.state == DM_INIT_ERROR) {
1693
- ret = -ETIMEDOUT;
1694
- goto probe_error2;
1666
+ ret = -EPROTO;
1667
+ goto out;
16951668 }
16961669
1670
+ return 0;
1671
+out:
1672
+ vmbus_close(dev->channel);
1673
+ return ret;
1674
+}
1675
+
1676
+static int balloon_probe(struct hv_device *dev,
1677
+ const struct hv_vmbus_device_id *dev_id)
1678
+{
1679
+ int ret;
1680
+
1681
+ allow_hibernation = hv_is_hibernation_supported();
1682
+ if (allow_hibernation)
1683
+ hot_add = false;
1684
+
1685
+#ifdef CONFIG_MEMORY_HOTPLUG
1686
+ do_hot_add = hot_add;
1687
+#else
1688
+ do_hot_add = false;
1689
+#endif
1690
+ dm_device.dev = dev;
1691
+ dm_device.state = DM_INITIALIZING;
1692
+ dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1693
+ init_completion(&dm_device.host_event);
1694
+ init_completion(&dm_device.config_event);
1695
+ INIT_LIST_HEAD(&dm_device.ha_region_list);
1696
+ spin_lock_init(&dm_device.ha_lock);
1697
+ INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1698
+ INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1699
+ dm_device.host_specified_ha_region = false;
1700
+
1701
+#ifdef CONFIG_MEMORY_HOTPLUG
1702
+ set_online_page_callback(&hv_online_page);
1703
+ init_completion(&dm_device.ol_waitevent);
1704
+ register_memory_notifier(&hv_memory_nb);
1705
+#endif
1706
+
1707
+ hv_set_drvdata(dev, &dm_device);
1708
+
1709
+ ret = balloon_connect_vsp(dev);
1710
+ if (ret != 0)
1711
+ return ret;
1712
+
16971713 dm_device.state = DM_INITIALIZED;
1698
- last_post_time = jiffies;
1714
+
1715
+ dm_device.thread =
1716
+ kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1717
+ if (IS_ERR(dm_device.thread)) {
1718
+ ret = PTR_ERR(dm_device.thread);
1719
+ goto probe_error;
1720
+ }
16991721
17001722 return 0;
17011723
1702
-probe_error2:
1724
+probe_error:
1725
+ dm_device.state = DM_INIT_ERROR;
1726
+ dm_device.thread = NULL;
1727
+ vmbus_close(dev->channel);
17031728 #ifdef CONFIG_MEMORY_HOTPLUG
1729
+ unregister_memory_notifier(&hv_memory_nb);
17041730 restore_online_page_callback(&hv_online_page);
17051731 #endif
1706
- kthread_stop(dm_device.thread);
1707
-
1708
-probe_error1:
1709
- vmbus_close(dev->channel);
1710
-probe_error0:
1711
- kfree(send_buffer);
17121732 return ret;
17131733 }
17141734
....@@ -1725,12 +1745,11 @@
17251745 cancel_work_sync(&dm->balloon_wrk.wrk);
17261746 cancel_work_sync(&dm->ha_wrk.wrk);
17271747
1728
- vmbus_close(dev->channel);
17291748 kthread_stop(dm->thread);
1730
- kfree(send_buffer);
1749
+ vmbus_close(dev->channel);
17311750 #ifdef CONFIG_MEMORY_HOTPLUG
1732
- restore_online_page_callback(&hv_online_page);
17331751 unregister_memory_notifier(&hv_memory_nb);
1752
+ restore_online_page_callback(&hv_online_page);
17341753 #endif
17351754 spin_lock_irqsave(&dm_device.ha_lock, flags);
17361755 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
....@@ -1744,6 +1763,59 @@
17441763 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
17451764
17461765 return 0;
1766
+}
1767
+
1768
+static int balloon_suspend(struct hv_device *hv_dev)
1769
+{
1770
+ struct hv_dynmem_device *dm = hv_get_drvdata(hv_dev);
1771
+
1772
+ tasklet_disable(&hv_dev->channel->callback_event);
1773
+
1774
+ cancel_work_sync(&dm->balloon_wrk.wrk);
1775
+ cancel_work_sync(&dm->ha_wrk.wrk);
1776
+
1777
+ if (dm->thread) {
1778
+ kthread_stop(dm->thread);
1779
+ dm->thread = NULL;
1780
+ vmbus_close(hv_dev->channel);
1781
+ }
1782
+
1783
+ tasklet_enable(&hv_dev->channel->callback_event);
1784
+
1785
+ return 0;
1786
+
1787
+}
1788
+
1789
+static int balloon_resume(struct hv_device *dev)
1790
+{
1791
+ int ret;
1792
+
1793
+ dm_device.state = DM_INITIALIZING;
1794
+
1795
+ ret = balloon_connect_vsp(dev);
1796
+
1797
+ if (ret != 0)
1798
+ goto out;
1799
+
1800
+ dm_device.thread =
1801
+ kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1802
+ if (IS_ERR(dm_device.thread)) {
1803
+ ret = PTR_ERR(dm_device.thread);
1804
+ dm_device.thread = NULL;
1805
+ goto close_channel;
1806
+ }
1807
+
1808
+ dm_device.state = DM_INITIALIZED;
1809
+ return 0;
1810
+close_channel:
1811
+ vmbus_close(dev->channel);
1812
+out:
1813
+ dm_device.state = DM_INIT_ERROR;
1814
+#ifdef CONFIG_MEMORY_HOTPLUG
1815
+ unregister_memory_notifier(&hv_memory_nb);
1816
+ restore_online_page_callback(&hv_online_page);
1817
+#endif
1818
+ return ret;
17471819 }
17481820
17491821 static const struct hv_vmbus_device_id id_table[] = {
....@@ -1760,6 +1832,8 @@
17601832 .id_table = id_table,
17611833 .probe = balloon_probe,
17621834 .remove = balloon_remove,
1835
+ .suspend = balloon_suspend,
1836
+ .resume = balloon_resume,
17631837 .driver = {
17641838 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
17651839 },