Drivers: hv: ring_buffer: use wrap around mappings in hv_copy{from, to}_ringbuffer()
[cascardo/linux.git] / drivers / hv / ring_buffer.c
index fe586bf..08043da 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/mm.h>
 #include <linux/hyperv.h>
 #include <linux/uio.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
 
 #include "hyperv_vmbus.h"
 
@@ -66,12 +68,20 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
  *        arrived.
  */
 
-static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
+static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
+                             enum hv_signal_policy policy)
 {
        virt_mb();
        if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
                return false;
 
+       /*
+        * When the client wants to control signaling,
+        * we only honour the host interrupt mask.
+        */
+       if (policy == HV_SIGNAL_POLICY_EXPLICIT)
+               return true;
+
        /* check interrupt_mask before read_index */
        virt_rmb();
        /*
@@ -162,18 +172,7 @@ static u32 hv_copyfrom_ringbuffer(
        void *ring_buffer = hv_get_ring_buffer(ring_info);
        u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
 
-       u32 frag_len;
-
-       /* wrap-around detected at the src */
-       if (destlen > ring_buffer_size - start_read_offset) {
-               frag_len = ring_buffer_size - start_read_offset;
-
-               memcpy(dest, ring_buffer + start_read_offset, frag_len);
-               memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
-       } else
-
-               memcpy(dest, ring_buffer + start_read_offset, destlen);
-
+       memcpy(dest, ring_buffer + start_read_offset, destlen);
 
        start_read_offset += destlen;
        start_read_offset %= ring_buffer_size;
@@ -194,15 +193,8 @@ static u32 hv_copyto_ringbuffer(
 {
        void *ring_buffer = hv_get_ring_buffer(ring_info);
        u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
-       u32 frag_len;
 
-       /* wrap-around detected! */
-       if (srclen > ring_buffer_size - start_write_offset) {
-               frag_len = ring_buffer_size - start_write_offset;
-               memcpy(ring_buffer + start_write_offset, src, frag_len);
-               memcpy(ring_buffer, src + frag_len, srclen - frag_len);
-       } else
-               memcpy(ring_buffer + start_write_offset, src, srclen);
+       memcpy(ring_buffer + start_write_offset, src, srclen);
 
        start_write_offset += srclen;
        start_write_offset %= ring_buffer_size;
@@ -235,22 +227,46 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
 
 /* Initialize the ring buffer. */
 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
-                  void *buffer, u32 buflen)
+                      struct page *pages, u32 page_cnt)
 {
-       if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
-               return -EINVAL;
+       int i;
+       struct page **pages_wraparound;
+
+       BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
 
        memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
 
-       ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
+       /*
+        * First page holds struct hv_ring_buffer, do wraparound mapping for
+        * the rest.
+        */
+       pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
+                                  GFP_KERNEL);
+       if (!pages_wraparound)
+               return -ENOMEM;
+
+       pages_wraparound[0] = pages;
+       for (i = 0; i < 2 * (page_cnt - 1); i++)
+               pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
+
+       ring_info->ring_buffer = (struct hv_ring_buffer *)
+               vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
+
+       kfree(pages_wraparound);
+
+
+       if (!ring_info->ring_buffer)
+               return -ENOMEM;
+
        ring_info->ring_buffer->read_index =
                ring_info->ring_buffer->write_index = 0;
 
        /* Set the feature bit for enabling flow control. */
        ring_info->ring_buffer->feature_bits.value = 1;
 
-       ring_info->ring_size = buflen;
-       ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
+       ring_info->ring_size = page_cnt << PAGE_SHIFT;
+       ring_info->ring_datasize = ring_info->ring_size -
+               sizeof(struct hv_ring_buffer);
 
        spin_lock_init(&ring_info->ring_lock);
 
@@ -260,11 +276,13 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
 /* Cleanup the ring buffer. */
 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
 {
+       vunmap(ring_info->ring_buffer);
 }
 
 /* Write to the ring buffer. */
 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
-                   struct kvec *kv_list, u32 kv_count, bool *signal, bool lock)
+                   struct kvec *kv_list, u32 kv_count, bool *signal, bool lock,
+                   enum hv_signal_policy policy)
 {
        int i = 0;
        u32 bytes_avail_towrite;
@@ -326,7 +344,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
        if (lock)
                spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 
-       *signal = hv_need_to_signal(old_write, outring_info);
+       *signal = hv_need_to_signal(old_write, outring_info, policy);
        return 0;
 }