xen/blkback: correctly respond to unknown, non-native requests
[cascardo/linux.git] / drivers / block / xen-blkback / blkback.c
index 74374fb..1a0faf6 100644 (file)
@@ -46,6 +46,7 @@
 #include <xen/xen.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
+#include <xen/balloon.h>
 #include "common.h"
 
 /*
@@ -161,10 +162,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
 static void make_response(struct xen_blkif *blkif, u64 id,
                          unsigned short op, int st);
 
-#define foreach_grant(pos, rbtree, node) \
-       for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \
+#define foreach_grant_safe(pos, n, rbtree, node) \
+       for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
+            (n) = rb_next(&(pos)->node); \
             &(pos)->node != NULL; \
-            (pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node))
+            (pos) = container_of(n, typeof(*(pos)), node), \
+            (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
 
 
 static void add_persistent_gnt(struct rb_root *root,
@@ -217,10 +220,11 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
        struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        struct persistent_gnt *persistent_gnt;
+       struct rb_node *n;
        int ret = 0;
        int segs_to_unmap = 0;
 
-       foreach_grant(persistent_gnt, root, node) {
+       foreach_grant_safe(persistent_gnt, n, root, node) {
                BUG_ON(persistent_gnt->handle ==
                        BLKBACK_INVALID_HANDLE);
                gnttab_set_unmap_op(&unmap[segs_to_unmap],
@@ -230,17 +234,19 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
                        persistent_gnt->handle);
 
                pages[segs_to_unmap] = persistent_gnt->page;
-               rb_erase(&persistent_gnt->node, root);
-               kfree(persistent_gnt);
-               num--;
 
                if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
                        !rb_next(&persistent_gnt->node)) {
                        ret = gnttab_unmap_refs(unmap, NULL, pages,
                                segs_to_unmap);
                        BUG_ON(ret);
+                       free_xenballooned_pages(segs_to_unmap, pages);
                        segs_to_unmap = 0;
                }
+
+               rb_erase(&persistent_gnt->node, root);
+               kfree(persistent_gnt);
+               num--;
        }
        BUG_ON(num != 0);
 }
@@ -523,8 +529,8 @@ static int xen_blkbk_map(struct blkif_request *req,
                                GFP_KERNEL);
                        if (!persistent_gnt)
                                return -ENOMEM;
-                       persistent_gnt->page = alloc_page(GFP_KERNEL);
-                       if (!persistent_gnt->page) {
+                       if (alloc_xenballooned_pages(1, &persistent_gnt->page,
+                           false)) {
                                kfree(persistent_gnt);
                                return -ENOMEM;
                        }
@@ -673,6 +679,16 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
        return err;
 }
 
+static int dispatch_other_io(struct xen_blkif *blkif,
+                            struct blkif_request *req,
+                            struct pending_req *pending_req)
+{
+       free_req(pending_req);
+       make_response(blkif, req->u.other.id, req->operation,
+                     BLKIF_RSP_EOPNOTSUPP);
+       return -EIO;
+}
+
 static void xen_blk_drain_io(struct xen_blkif *blkif)
 {
        atomic_set(&blkif->drain, 1);
@@ -794,17 +810,30 @@ __do_block_io_op(struct xen_blkif *blkif)
 
                /* Apply all sanity checks to /private copy/ of request. */
                barrier();
-               if (unlikely(req.operation == BLKIF_OP_DISCARD)) {
+
+               switch (req.operation) {
+               case BLKIF_OP_READ:
+               case BLKIF_OP_WRITE:
+               case BLKIF_OP_WRITE_BARRIER:
+               case BLKIF_OP_FLUSH_DISKCACHE:
+                       if (dispatch_rw_block_io(blkif, &req, pending_req))
+                               goto done;
+                       break;
+               case BLKIF_OP_DISCARD:
                        free_req(pending_req);
                        if (dispatch_discard_io(blkif, &req))
-                               break;
-               } else if (dispatch_rw_block_io(blkif, &req, pending_req))
+                               goto done;
                        break;
+               default:
+                       if (dispatch_other_io(blkif, &req, pending_req))
+                               goto done;
+                       break;
+               }
 
                /* Yield point for this unbounded loop. */
                cond_resched();
        }
-
+done:
        return more_to_do;
 }
 
@@ -875,7 +904,6 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                goto fail_response;
        }
 
-       preq.dev           = req->u.rw.handle;
        preq.sector_number = req->u.rw.sector_number;
        preq.nr_sects      = 0;
 
@@ -899,7 +927,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
                         operation == READ ? "read" : "write",
                         preq.sector_number,
-                        preq.sector_number + preq.nr_sects, preq.dev);
+                        preq.sector_number + preq.nr_sects,
+                        blkif->vbd.pdevice);
                goto fail_response;
        }