diff options
Diffstat (limited to 'drivers/hv')
-rw-r--r-- | drivers/hv/channel.c | 16 | ||||
-rw-r--r-- | drivers/hv/connection.c | 8 | ||||
-rw-r--r-- | drivers/hv/hv.c | 5 | ||||
-rw-r--r-- | drivers/hv/hv_balloon.c | 136 |
4 files changed, 114 insertions, 51 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 1ef37c727572..d037454fe7b8 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -73,7 +73,6 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, void *in, *out; unsigned long flags; int ret, err = 0; - unsigned long t; struct page *page; spin_lock_irqsave(&newchannel->lock, flags); @@ -183,11 +182,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, goto error1; } - t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ); - if (t == 0) { - err = -ETIMEDOUT; - goto error1; - } + wait_for_completion(&open_info->waitevent); spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&open_info->msglistentry); @@ -375,7 +370,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, struct vmbus_channel_gpadl_header *gpadlmsg; struct vmbus_channel_gpadl_body *gpadl_body; struct vmbus_channel_msginfo *msginfo = NULL; - struct vmbus_channel_msginfo *submsginfo; + struct vmbus_channel_msginfo *submsginfo, *tmp; u32 msgcount; struct list_head *curr; u32 next_gpadl_handle; @@ -437,6 +432,13 @@ cleanup: list_del(&msginfo->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); + if (msgcount > 1) { + list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist, + msglistentry) { + kfree(submsginfo); + } + } + kfree(msginfo); return ret; } diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 4fc2e8836e60..2bbc53025549 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -429,7 +429,7 @@ int vmbus_post_msg(void *buffer, size_t buflen) union hv_connection_id conn_id; int ret = 0; int retries = 0; - u32 msec = 1; + u32 usec = 1; conn_id.asu32 = 0; conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID; @@ -462,9 +462,9 @@ int vmbus_post_msg(void *buffer, size_t buflen) } retries++; - msleep(msec); - if (msec < 2048) - msec *= 2; + udelay(usec); + if (usec < 2048) + usec *= 2; } return ret; } diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index ddbf7e7e0d98..8ce1f2e22912 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -305,9 +305,10 @@ void hv_cleanup(bool crash) hypercall_msr.as_uint64 = 0; wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64); - if (!crash) + if (!crash) { vfree(hv_context.tsc_page); - hv_context.tsc_page = NULL; + hv_context.tsc_page = NULL; + } } #endif } diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 43af91362be5..354da7f207b7 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -430,16 +430,27 @@ struct dm_info_msg { * currently hot added. We hot add in multiples of 128M * chunks; it is possible that we may not be able to bring * online all the pages in the region. The range - * covered_end_pfn defines the pages that can + * covered_start_pfn:covered_end_pfn defines the pages that can * be brough online. */ struct hv_hotadd_state { struct list_head list; unsigned long start_pfn; + unsigned long covered_start_pfn; unsigned long covered_end_pfn; unsigned long ha_end_pfn; unsigned long end_pfn; + /* + * A list of gaps. + */ + struct list_head gap_list; +}; + +struct hv_hotadd_gap { + struct list_head list; + unsigned long start_pfn; + unsigned long end_pfn; }; struct balloon_state { @@ -595,18 +606,46 @@ static struct notifier_block hv_memory_nb = { .priority = 0 }; +/* Check if the particular page is backed and can be onlined and online it. */ +static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg) +{ + unsigned long cur_start_pgp; + unsigned long cur_end_pgp; + struct hv_hotadd_gap *gap; + + cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn); + cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn); + + /* The page is not backed. */ + if (((unsigned long)pg < cur_start_pgp) || + ((unsigned long)pg >= cur_end_pgp)) + return; + + /* Check for gaps. */ + list_for_each_entry(gap, &has->gap_list, list) { + cur_start_pgp = (unsigned long) + pfn_to_page(gap->start_pfn); + cur_end_pgp = (unsigned long) + pfn_to_page(gap->end_pfn); + if (((unsigned long)pg >= cur_start_pgp) && + ((unsigned long)pg < cur_end_pgp)) { + return; + } + } -static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size) + /* This frame is currently backed; online the page. */ + __online_page_set_limits(pg); + __online_page_increment_counters(pg); + __online_page_free(pg); +} + +static void hv_bring_pgs_online(struct hv_hotadd_state *has, + unsigned long start_pfn, unsigned long size) { int i; - for (i = 0; i < size; i++) { - struct page *pg; - pg = pfn_to_page(start_pfn + i); - __online_page_set_limits(pg); - __online_page_increment_counters(pg); - __online_page_free(pg); - } + for (i = 0; i < size; i++) + hv_page_online_one(has, pfn_to_page(start_pfn + i)); } static void hv_mem_hot_add(unsigned long start, unsigned long size, @@ -682,26 +721,25 @@ static void hv_online_page(struct page *pg) list_for_each(cur, &dm_device.ha_region_list) { has = list_entry(cur, struct hv_hotadd_state, list); - cur_start_pgp = (unsigned long)pfn_to_page(has->start_pfn); - cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn); + cur_start_pgp = (unsigned long) + pfn_to_page(has->start_pfn); + cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn); - if (((unsigned long)pg >= cur_start_pgp) && - ((unsigned long)pg < cur_end_pgp)) { - /* - * This frame is currently backed; online the - * page. - */ - __online_page_set_limits(pg); - __online_page_increment_counters(pg); - __online_page_free(pg); - } + /* The page belongs to a different HAS. */ + if (((unsigned long)pg < cur_start_pgp) || + ((unsigned long)pg >= cur_end_pgp)) + continue; + + hv_page_online_one(has, pg); + break; } } -static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) +static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) { struct list_head *cur; struct hv_hotadd_state *has; + struct hv_hotadd_gap *gap; unsigned long residual, new_inc; if (list_empty(&dm_device.ha_region_list)) @@ -716,6 +754,24 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) */ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) continue; + + /* + * If the current start pfn is not where the covered_end + * is, create a gap and update covered_end_pfn. + */ + if (has->covered_end_pfn != start_pfn) { + gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC); + if (!gap) + return -ENOMEM; + + INIT_LIST_HEAD(&gap->list); + gap->start_pfn = has->covered_end_pfn; + gap->end_pfn = start_pfn; + list_add_tail(&gap->list, &has->gap_list); + + has->covered_end_pfn = start_pfn; + } + /* * If the current hot add-request extends beyond * our current limit; extend it. @@ -732,19 +788,10 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) has->end_pfn += new_inc; } - /* - * If the current start pfn is not where the covered_end - * is, update it. - */ - - if (has->covered_end_pfn != start_pfn) - has->covered_end_pfn = start_pfn; - - return true; - + return 1; } - return false; + return 0; } static unsigned long handle_pg_range(unsigned long pg_start, @@ -783,6 +830,8 @@ static unsigned long handle_pg_range(unsigned long pg_start, if (pgs_ol > pfn_cnt) pgs_ol = pfn_cnt; + has->covered_end_pfn += pgs_ol; + pfn_cnt -= pgs_ol; /* * Check if the corresponding memory block is already * online by checking its last previously backed page. @@ -791,10 +840,8 @@ static unsigned long handle_pg_range(unsigned long pg_start, */ if (start_pfn > has->start_pfn && !PageReserved(pfn_to_page(start_pfn - 1))) - hv_bring_pgs_online(start_pfn, pgs_ol); + hv_bring_pgs_online(has, start_pfn, pgs_ol); - has->covered_end_pfn += pgs_ol; - pfn_cnt -= pgs_ol; } if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) { @@ -832,13 +879,19 @@ static unsigned long process_hot_add(unsigned long pg_start, unsigned long rg_size) { struct hv_hotadd_state *ha_region = NULL; + int covered; if (pfn_cnt == 0) return 0; - if (!dm_device.host_specified_ha_region) - if (pfn_covered(pg_start, pfn_cnt)) + if (!dm_device.host_specified_ha_region) { + covered = pfn_covered(pg_start, pfn_cnt); + if (covered < 0) + return 0; + + if (covered) goto do_pg_range; + } /* * If the host has specified a hot-add range; deal with it first. @@ -850,10 +903,12 @@ static unsigned long process_hot_add(unsigned long pg_start, return 0; INIT_LIST_HEAD(&ha_region->list); + INIT_LIST_HEAD(&ha_region->gap_list); list_add_tail(&ha_region->list, &dm_device.ha_region_list); ha_region->start_pfn = rg_start; ha_region->ha_end_pfn = rg_start; + ha_region->covered_start_pfn = pg_start; ha_region->covered_end_pfn = pg_start; ha_region->end_pfn = rg_start + rg_size; } @@ -1581,6 +1636,7 @@ static int balloon_remove(struct hv_device *dev) struct hv_dynmem_device *dm = hv_get_drvdata(dev); struct list_head *cur, *tmp; struct hv_hotadd_state *has; + struct hv_hotadd_gap *gap, *tmp_gap; if (dm->num_pages_ballooned != 0) pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned); @@ -1597,6 +1653,10 @@ static int balloon_remove(struct hv_device *dev) #endif list_for_each_safe(cur, tmp, &dm->ha_region_list) { has = list_entry(cur, struct hv_hotadd_state, list); + list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) { + list_del(&gap->list); + kfree(gap); + } list_del(&has->list); kfree(has); } |