summaryrefslogtreecommitdiff
path: root/drivers/hv
diff options
context:
space:
mode:
authorK. Y. Srinivasan <kys@microsoft.com>2012-12-01 06:46:36 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-17 10:46:39 -0800
commit98fa8cf4bcc79cb14de8fd815bbcd00dcbd7b20e (patch)
tree0d48abd179e49709cdac7e28eb853a653df7fbe2 /drivers/hv
parentf878f3d59ed26f489add852ed6d5c8e5f3bbb1aa (diff)
Drivers: hv: Optimize the signaling on the write path
The host has already implemented the "read" side optimizations. Leverage that to optimize "write" side signaling. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/hv')
-rw-r--r--drivers/hv/channel.c15
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hv/ring_buffer.c42
3 files changed, 49 insertions, 10 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 773a2f25a8f0..727c5f1d6acf 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -564,6 +564,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
int ret;
+ bool signal = false;
/* Setup the descriptor */
@@ -580,9 +581,9 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@@ -606,6 +607,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
+ bool signal = false;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL;
@@ -641,9 +643,9 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@@ -665,6 +667,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
+ bool signal = false;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
@@ -703,9 +706,9 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 3184f6ff4e74..895c898812bd 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -555,7 +555,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
struct scatterlist *sglist,
- u32 sgcount);
+ u32 sgcount, bool *signal);
int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
u32 buflen);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 001079287709..9bc55f0dcf47 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -53,6 +53,37 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
return read;
}
+/*
+ * When we write to the ring buffer, check if the host needs to
+ * be signaled. Here is the details of this protocol:
+ *
+ * 1. The host guarantees that while it is draining the
+ * ring buffer, it will set the interrupt_mask to
+ * indicate it does not need to be interrupted when
+ * new data is placed.
+ *
+ * 2. The host guarantees that it will completely drain
+ * the ring buffer before exiting the read loop. Further,
+ * once the ring buffer is empty, it will clear the
+ * interrupt_mask and re-check to see if new data has
+ * arrived.
+ */
+
+static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
+{
+ if (rbi->ring_buffer->interrupt_mask)
+ return false;
+
+ /*
+ * This is the only case we need to signal when the
+ * ring transitions from being empty to non-empty.
+ */
+ if (old_write == rbi->ring_buffer->read_index)
+ return true;
+
+ return false;
+}
+
/*
* hv_get_next_write_location()
@@ -322,7 +353,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
*
*/
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct scatterlist *sglist, u32 sgcount)
+ struct scatterlist *sglist, u32 sgcount, bool *signal)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -331,6 +362,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
struct scatterlist *sg;
u32 next_write_location;
+ u32 old_write;
u64 prev_indices = 0;
unsigned long flags;
@@ -359,6 +391,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
/* Write to the ring buffer */
next_write_location = hv_get_next_write_location(outring_info);
+ old_write = next_write_location;
+
for_each_sg(sglist, sg, sgcount, i)
{
next_write_location = hv_copyto_ringbuffer(outring_info,
@@ -375,14 +409,16 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
&prev_indices,
sizeof(u64));
- /* Make sure we flush all writes before updating the writeIndex */
- smp_wmb();
+ /* Issue a full memory barrier before updating the write index */
+ smp_mb();
/* Now, update the write location */
hv_set_next_write_location(outring_info, next_write_location);
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+
+ *signal = hv_need_to_signal(old_write, outring_info);
return 0;
}