diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-05-06 15:58:06 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-05-06 15:58:06 -0700 |
commit | 0e1dc4274828f64fcb56fc7b950acdc5ff7a395f (patch) | |
tree | 0855a6e189dede21e9e2dd0094774089b1c7d8d2 /drivers/block | |
parent | 3d54ac9e35a69d19381420bb2fa1702d5bf73846 (diff) | |
parent | 8746515d7f04c9ea94cf43e2db1fd2cfca93276d (diff) |
Merge tag 'for-linus-4.1b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen bug fixes from David Vrabel:
- fix blkback regression if using persistent grants
- fix various event channel related suspend/resume bugs
- fix AMD x86 regression with X86_BUG_SYSRET_SS_ATTRS
- SWIOTLB on ARM now uses frames <4 GiB (if available) so device only
capable of 32-bit DMA work.
* tag 'for-linus-4.1b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen: Add __GFP_DMA flag when xen_swiotlb_init gets free pages on ARM
hypervisor/x86/xen: Unset X86_BUG_SYSRET_SS_ATTRS on Xen PV guests
xen/events: Set irq_info->evtchn before binding the channel to CPU in __startup_pirq()
xen/console: Update console event channel on resume
xen/xenbus: Update xenbus event channel on resume
xen/events: Clear cpu_evtchn_mask before resuming
xen-pciback: Add name prefix to global 'permissive' variable
xen: Suspend ticks on all CPUs during suspend
xen/grant: introduce func gnttab_unmap_refs_sync()
xen/blkback: safely unmap purge persistent grants
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 35 |
1 files changed, 11 insertions, 24 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index bd2b3bbbb22c..713fc9ff1149 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -265,17 +265,6 @@ static void put_persistent_gnt(struct xen_blkif *blkif, atomic_dec(&blkif->persistent_gnt_in_use); } -static void free_persistent_gnts_unmap_callback(int result, - struct gntab_unmap_queue_data *data) -{ - struct completion *c = data->data; - - /* BUG_ON used to reproduce existing behaviour, - but is this the best way to deal with this? */ - BUG_ON(result); - complete(c); -} - static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, unsigned int num) { @@ -285,12 +274,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, struct rb_node *n; int segs_to_unmap = 0; struct gntab_unmap_queue_data unmap_data; - struct completion unmap_completion; - init_completion(&unmap_completion); - - unmap_data.data = &unmap_completion; - unmap_data.done = &free_persistent_gnts_unmap_callback; unmap_data.pages = pages; unmap_data.unmap_ops = unmap; unmap_data.kunmap_ops = NULL; @@ -310,8 +294,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, !rb_next(&persistent_gnt->node)) { unmap_data.count = segs_to_unmap; - gnttab_unmap_refs_async(&unmap_data); - wait_for_completion(&unmap_completion); + BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; @@ -329,8 +312,13 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt; - int ret, segs_to_unmap = 0; + int segs_to_unmap = 0; struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); + struct gntab_unmap_queue_data unmap_data; + + unmap_data.pages = pages; + unmap_data.unmap_ops = unmap; + unmap_data.kunmap_ops = NULL; while(!list_empty(&blkif->persistent_purge_list)) { persistent_gnt = list_first_entry(&blkif->persistent_purge_list, @@ -346,17 +334,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) pages[segs_to_unmap] = persistent_gnt->page; if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { - ret = gnttab_unmap_refs(unmap, NULL, pages, - segs_to_unmap); - BUG_ON(ret); + unmap_data.count = segs_to_unmap; + BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; } kfree(persistent_gnt); } if (segs_to_unmap > 0) { - ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); - BUG_ON(ret); + unmap_data.count = segs_to_unmap; + BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); put_free_pages(blkif, pages, segs_to_unmap); } } |