Merge remote-tracking branch 'regmap/topic/flat' into regmap-next
[cascardo/linux.git] / drivers / block / xen-blkback / blkback.c
1 /******************************************************************************
2  *
3  * Back-end of the driver for virtual block devices. This portion of the
4  * driver exports a 'unified' block-device interface that can be accessed
5  * by any operating system that implements a compatible front end. A
6  * reference front-end implementation can be found in:
7  *  drivers/block/xen-blkfront.c
8  *
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36
37 #include <linux/spinlock.h>
38 #include <linux/kthread.h>
39 #include <linux/list.h>
40 #include <linux/delay.h>
41 #include <linux/freezer.h>
42 #include <linux/bitmap.h>
43
44 #include <xen/events.h>
45 #include <xen/page.h>
46 #include <xen/xen.h>
47 #include <asm/xen/hypervisor.h>
48 #include <asm/xen/hypercall.h>
49 #include "common.h"
50
51 /*
52  * These are rather arbitrary. They are fairly large because adjacent requests
53  * pulled from a communication ring are quite likely to end up being part of
54  * the same scatter/gather request at the disc.
55  *
56  * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
57  *
58  * This will increase the chances of being able to write whole tracks.
59  * 64 should be enough to keep us competitive with Linux.
60  */
61 static int xen_blkif_reqs = 64;
62 module_param_named(reqs, xen_blkif_reqs, int, 0);
63 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
64
65 /* Run-time switchable: /sys/module/blkback/parameters/ */
66 static unsigned int log_stats;
67 module_param(log_stats, int, 0644);
68
69 /*
70  * Each outstanding request that we've passed to the lower device layers has a
71  * 'pending_req' allocated to it. Each buffer_head that completes decrements
72  * the pendcnt towards zero. When it hits zero, the specified domain has a
73  * response queued for it, with the saved 'id' passed back.
74  */
75 struct pending_req {
76         struct xen_blkif        *blkif;
77         u64                     id;
78         int                     nr_pages;
79         atomic_t                pendcnt;
80         unsigned short          operation;
81         int                     status;
82         struct list_head        free_list;
83         DECLARE_BITMAP(unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
84 };
85
86 #define BLKBACK_INVALID_HANDLE (~0)
87
88 struct xen_blkbk {
89         struct pending_req      *pending_reqs;
90         /* List of all 'pending_req' available */
91         struct list_head        pending_free;
92         /* And its spinlock. */
93         spinlock_t              pending_free_lock;
94         wait_queue_head_t       pending_free_wq;
95         /* The list of all pages that are available. */
96         struct page             **pending_pages;
97         /* And the grant handles that are available. */
98         grant_handle_t          *pending_grant_handles;
99 };
100
101 static struct xen_blkbk *blkbk;
102
103 /*
104  * Maximum number of grant pages that can be mapped in blkback.
105  * BLKIF_MAX_SEGMENTS_PER_REQUEST * RING_SIZE is the maximum number of
106  * pages that blkback will persistently map.
107  * Currently, this is:
108  * RING_SIZE = 32 (for all known ring types)
109  * BLKIF_MAX_SEGMENTS_PER_REQUEST = 11
110  * sizeof(struct persistent_gnt) = 48
111  * So the maximum memory used to store the grants is:
112  * 32 * 11 * 48 = 16896 bytes
113  */
114 static inline unsigned int max_mapped_grant_pages(enum blkif_protocol protocol)
115 {
116         switch (protocol) {
117         case BLKIF_PROTOCOL_NATIVE:
118                 return __CONST_RING_SIZE(blkif, PAGE_SIZE) *
119                            BLKIF_MAX_SEGMENTS_PER_REQUEST;
120         case BLKIF_PROTOCOL_X86_32:
121                 return __CONST_RING_SIZE(blkif_x86_32, PAGE_SIZE) *
122                            BLKIF_MAX_SEGMENTS_PER_REQUEST;
123         case BLKIF_PROTOCOL_X86_64:
124                 return __CONST_RING_SIZE(blkif_x86_64, PAGE_SIZE) *
125                            BLKIF_MAX_SEGMENTS_PER_REQUEST;
126         default:
127                 BUG();
128         }
129         return 0;
130 }
131
132
133 /*
134  * Little helpful macro to figure out the index and virtual address of the
135  * pending_pages[..]. For each 'pending_req' we have have up to
136  * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
137  * 10 and would index in the pending_pages[..].
138  */
139 static inline int vaddr_pagenr(struct pending_req *req, int seg)
140 {
141         return (req - blkbk->pending_reqs) *
142                 BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
143 }
144
145 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
146
147 static inline unsigned long vaddr(struct pending_req *req, int seg)
148 {
149         unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
150         return (unsigned long)pfn_to_kaddr(pfn);
151 }
152
153 #define pending_handle(_req, _seg) \
154         (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
155
156
157 static int do_block_io_op(struct xen_blkif *blkif);
158 static int dispatch_rw_block_io(struct xen_blkif *blkif,
159                                 struct blkif_request *req,
160                                 struct pending_req *pending_req);
161 static void make_response(struct xen_blkif *blkif, u64 id,
162                           unsigned short op, int st);
163
164 #define foreach_grant_safe(pos, n, rbtree, node) \
165         for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
166              (n) = rb_next(&(pos)->node); \
167              &(pos)->node != NULL; \
168              (pos) = container_of(n, typeof(*(pos)), node), \
169              (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
170
171
172 static void add_persistent_gnt(struct rb_root *root,
173                                struct persistent_gnt *persistent_gnt)
174 {
175         struct rb_node **new = &(root->rb_node), *parent = NULL;
176         struct persistent_gnt *this;
177
178         /* Figure out where to put new node */
179         while (*new) {
180                 this = container_of(*new, struct persistent_gnt, node);
181
182                 parent = *new;
183                 if (persistent_gnt->gnt < this->gnt)
184                         new = &((*new)->rb_left);
185                 else if (persistent_gnt->gnt > this->gnt)
186                         new = &((*new)->rb_right);
187                 else {
188                         pr_alert(DRV_PFX " trying to add a gref that's already in the tree\n");
189                         BUG();
190                 }
191         }
192
193         /* Add new node and rebalance tree. */
194         rb_link_node(&(persistent_gnt->node), parent, new);
195         rb_insert_color(&(persistent_gnt->node), root);
196 }
197
198 static struct persistent_gnt *get_persistent_gnt(struct rb_root *root,
199                                                  grant_ref_t gref)
200 {
201         struct persistent_gnt *data;
202         struct rb_node *node = root->rb_node;
203
204         while (node) {
205                 data = container_of(node, struct persistent_gnt, node);
206
207                 if (gref < data->gnt)
208                         node = node->rb_left;
209                 else if (gref > data->gnt)
210                         node = node->rb_right;
211                 else
212                         return data;
213         }
214         return NULL;
215 }
216
217 static void free_persistent_gnts(struct rb_root *root, unsigned int num)
218 {
219         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
220         struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
221         struct persistent_gnt *persistent_gnt;
222         struct rb_node *n;
223         int ret = 0;
224         int segs_to_unmap = 0;
225
226         foreach_grant_safe(persistent_gnt, n, root, node) {
227                 BUG_ON(persistent_gnt->handle ==
228                         BLKBACK_INVALID_HANDLE);
229                 gnttab_set_unmap_op(&unmap[segs_to_unmap],
230                         (unsigned long) pfn_to_kaddr(page_to_pfn(
231                                 persistent_gnt->page)),
232                         GNTMAP_host_map,
233                         persistent_gnt->handle);
234
235                 pages[segs_to_unmap] = persistent_gnt->page;
236
237                 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
238                         !rb_next(&persistent_gnt->node)) {
239                         ret = gnttab_unmap_refs(unmap, NULL, pages,
240                                 segs_to_unmap);
241                         BUG_ON(ret);
242                         segs_to_unmap = 0;
243                 }
244
245                 rb_erase(&persistent_gnt->node, root);
246                 kfree(persistent_gnt);
247                 num--;
248         }
249         BUG_ON(num != 0);
250 }
251
252 /*
253  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
254  */
255 static struct pending_req *alloc_req(void)
256 {
257         struct pending_req *req = NULL;
258         unsigned long flags;
259
260         spin_lock_irqsave(&blkbk->pending_free_lock, flags);
261         if (!list_empty(&blkbk->pending_free)) {
262                 req = list_entry(blkbk->pending_free.next, struct pending_req,
263                                  free_list);
264                 list_del(&req->free_list);
265         }
266         spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
267         return req;
268 }
269
270 /*
271  * Return the 'pending_req' structure back to the freepool. We also
272  * wake up the thread if it was waiting for a free page.
273  */
274 static void free_req(struct pending_req *req)
275 {
276         unsigned long flags;
277         int was_empty;
278
279         spin_lock_irqsave(&blkbk->pending_free_lock, flags);
280         was_empty = list_empty(&blkbk->pending_free);
281         list_add(&req->free_list, &blkbk->pending_free);
282         spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
283         if (was_empty)
284                 wake_up(&blkbk->pending_free_wq);
285 }
286
287 /*
288  * Routines for managing virtual block devices (vbds).
289  */
290 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
291                              int operation)
292 {
293         struct xen_vbd *vbd = &blkif->vbd;
294         int rc = -EACCES;
295
296         if ((operation != READ) && vbd->readonly)
297                 goto out;
298
299         if (likely(req->nr_sects)) {
300                 blkif_sector_t end = req->sector_number + req->nr_sects;
301
302                 if (unlikely(end < req->sector_number))
303                         goto out;
304                 if (unlikely(end > vbd_sz(vbd)))
305                         goto out;
306         }
307
308         req->dev  = vbd->pdevice;
309         req->bdev = vbd->bdev;
310         rc = 0;
311
312  out:
313         return rc;
314 }
315
316 static void xen_vbd_resize(struct xen_blkif *blkif)
317 {
318         struct xen_vbd *vbd = &blkif->vbd;
319         struct xenbus_transaction xbt;
320         int err;
321         struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
322         unsigned long long new_size = vbd_sz(vbd);
323
324         pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
325                 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
326         pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
327         vbd->size = new_size;
328 again:
329         err = xenbus_transaction_start(&xbt);
330         if (err) {
331                 pr_warn(DRV_PFX "Error starting transaction");
332                 return;
333         }
334         err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
335                             (unsigned long long)vbd_sz(vbd));
336         if (err) {
337                 pr_warn(DRV_PFX "Error writing new size");
338                 goto abort;
339         }
340         /*
341          * Write the current state; we will use this to synchronize
342          * the front-end. If the current state is "connected" the
343          * front-end will get the new size information online.
344          */
345         err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
346         if (err) {
347                 pr_warn(DRV_PFX "Error writing the state");
348                 goto abort;
349         }
350
351         err = xenbus_transaction_end(xbt, 0);
352         if (err == -EAGAIN)
353                 goto again;
354         if (err)
355                 pr_warn(DRV_PFX "Error ending transaction");
356         return;
357 abort:
358         xenbus_transaction_end(xbt, 1);
359 }
360
361 /*
362  * Notification from the guest OS.
363  */
364 static void blkif_notify_work(struct xen_blkif *blkif)
365 {
366         blkif->waiting_reqs = 1;
367         wake_up(&blkif->wq);
368 }
369
370 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
371 {
372         blkif_notify_work(dev_id);
373         return IRQ_HANDLED;
374 }
375
376 /*
377  * SCHEDULER FUNCTIONS
378  */
379
380 static void print_stats(struct xen_blkif *blkif)
381 {
382         pr_info("xen-blkback (%s): oo %3d  |  rd %4d  |  wr %4d  |  f %4d"
383                  "  |  ds %4d\n",
384                  current->comm, blkif->st_oo_req,
385                  blkif->st_rd_req, blkif->st_wr_req,
386                  blkif->st_f_req, blkif->st_ds_req);
387         blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
388         blkif->st_rd_req = 0;
389         blkif->st_wr_req = 0;
390         blkif->st_oo_req = 0;
391         blkif->st_ds_req = 0;
392 }
393
394 int xen_blkif_schedule(void *arg)
395 {
396         struct xen_blkif *blkif = arg;
397         struct xen_vbd *vbd = &blkif->vbd;
398
399         xen_blkif_get(blkif);
400
401         while (!kthread_should_stop()) {
402                 if (try_to_freeze())
403                         continue;
404                 if (unlikely(vbd->size != vbd_sz(vbd)))
405                         xen_vbd_resize(blkif);
406
407                 wait_event_interruptible(
408                         blkif->wq,
409                         blkif->waiting_reqs || kthread_should_stop());
410                 wait_event_interruptible(
411                         blkbk->pending_free_wq,
412                         !list_empty(&blkbk->pending_free) ||
413                         kthread_should_stop());
414
415                 blkif->waiting_reqs = 0;
416                 smp_mb(); /* clear flag *before* checking for work */
417
418                 if (do_block_io_op(blkif))
419                         blkif->waiting_reqs = 1;
420
421                 if (log_stats && time_after(jiffies, blkif->st_print))
422                         print_stats(blkif);
423         }
424
425         /* Free all persistent grant pages */
426         if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
427                 free_persistent_gnts(&blkif->persistent_gnts,
428                         blkif->persistent_gnt_c);
429
430         BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
431         blkif->persistent_gnt_c = 0;
432
433         if (log_stats)
434                 print_stats(blkif);
435
436         blkif->xenblkd = NULL;
437         xen_blkif_put(blkif);
438
439         return 0;
440 }
441
442 struct seg_buf {
443         unsigned long buf;
444         unsigned int nsec;
445 };
446 /*
447  * Unmap the grant references, and also remove the M2P over-rides
448  * used in the 'pending_req'.
449  */
450 static void xen_blkbk_unmap(struct pending_req *req)
451 {
452         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
453         struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
454         unsigned int i, invcount = 0;
455         grant_handle_t handle;
456         int ret;
457
458         for (i = 0; i < req->nr_pages; i++) {
459                 if (!test_bit(i, req->unmap_seg))
460                         continue;
461                 handle = pending_handle(req, i);
462                 if (handle == BLKBACK_INVALID_HANDLE)
463                         continue;
464                 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
465                                     GNTMAP_host_map, handle);
466                 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
467                 pages[invcount] = virt_to_page(vaddr(req, i));
468                 invcount++;
469         }
470
471         ret = gnttab_unmap_refs(unmap, NULL, pages, invcount);
472         BUG_ON(ret);
473 }
474
475 static int xen_blkbk_map(struct blkif_request *req,
476                          struct pending_req *pending_req,
477                          struct seg_buf seg[],
478                          struct page *pages[])
479 {
480         struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
481         struct persistent_gnt *persistent_gnts[BLKIF_MAX_SEGMENTS_PER_REQUEST];
482         struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
483         struct persistent_gnt *persistent_gnt = NULL;
484         struct xen_blkif *blkif = pending_req->blkif;
485         phys_addr_t addr = 0;
486         int i, j;
487         bool new_map;
488         int nseg = req->u.rw.nr_segments;
489         int segs_to_map = 0;
490         int ret = 0;
491         int use_persistent_gnts;
492
493         use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
494
495         BUG_ON(blkif->persistent_gnt_c >
496                    max_mapped_grant_pages(pending_req->blkif->blk_protocol));
497
498         /*
499          * Fill out preq.nr_sects with proper amount of sectors, and setup
500          * assign map[..] with the PFN of the page in our domain with the
501          * corresponding grant reference for each page.
502          */
503         for (i = 0; i < nseg; i++) {
504                 uint32_t flags;
505
506                 if (use_persistent_gnts)
507                         persistent_gnt = get_persistent_gnt(
508                                 &blkif->persistent_gnts,
509                                 req->u.rw.seg[i].gref);
510
511                 if (persistent_gnt) {
512                         /*
513                          * We are using persistent grants and
514                          * the grant is already mapped
515                          */
516                         new_map = false;
517                 } else if (use_persistent_gnts &&
518                            blkif->persistent_gnt_c <
519                            max_mapped_grant_pages(blkif->blk_protocol)) {
520                         /*
521                          * We are using persistent grants, the grant is
522                          * not mapped but we have room for it
523                          */
524                         new_map = true;
525                         persistent_gnt = kmalloc(
526                                 sizeof(struct persistent_gnt),
527                                 GFP_KERNEL);
528                         if (!persistent_gnt)
529                                 return -ENOMEM;
530                         persistent_gnt->page = alloc_page(GFP_KERNEL);
531                         if (!persistent_gnt->page) {
532                                 kfree(persistent_gnt);
533                                 return -ENOMEM;
534                         }
535                         persistent_gnt->gnt = req->u.rw.seg[i].gref;
536                         persistent_gnt->handle = BLKBACK_INVALID_HANDLE;
537
538                         pages_to_gnt[segs_to_map] =
539                                 persistent_gnt->page;
540                         addr = (unsigned long) pfn_to_kaddr(
541                                 page_to_pfn(persistent_gnt->page));
542
543                         add_persistent_gnt(&blkif->persistent_gnts,
544                                 persistent_gnt);
545                         blkif->persistent_gnt_c++;
546                         pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
547                                  persistent_gnt->gnt, blkif->persistent_gnt_c,
548                                  max_mapped_grant_pages(blkif->blk_protocol));
549                 } else {
550                         /*
551                          * We are either using persistent grants and
552                          * hit the maximum limit of grants mapped,
553                          * or we are not using persistent grants.
554                          */
555                         if (use_persistent_gnts &&
556                                 !blkif->vbd.overflow_max_grants) {
557                                 blkif->vbd.overflow_max_grants = 1;
558                                 pr_alert(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
559                                          blkif->domid, blkif->vbd.handle);
560                         }
561                         new_map = true;
562                         pages[i] = blkbk->pending_page(pending_req, i);
563                         addr = vaddr(pending_req, i);
564                         pages_to_gnt[segs_to_map] =
565                                 blkbk->pending_page(pending_req, i);
566                 }
567
568                 if (persistent_gnt) {
569                         pages[i] = persistent_gnt->page;
570                         persistent_gnts[i] = persistent_gnt;
571                 } else {
572                         persistent_gnts[i] = NULL;
573                 }
574
575                 if (new_map) {
576                         flags = GNTMAP_host_map;
577                         if (!persistent_gnt &&
578                             (pending_req->operation != BLKIF_OP_READ))
579                                 flags |= GNTMAP_readonly;
580                         gnttab_set_map_op(&map[segs_to_map++], addr,
581                                           flags, req->u.rw.seg[i].gref,
582                                           blkif->domid);
583                 }
584         }
585
586         if (segs_to_map) {
587                 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
588                 BUG_ON(ret);
589         }
590
591         /*
592          * Now swizzle the MFN in our domain with the MFN from the other domain
593          * so that when we access vaddr(pending_req,i) it has the contents of
594          * the page from the other domain.
595          */
596         bitmap_zero(pending_req->unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
597         for (i = 0, j = 0; i < nseg; i++) {
598                 if (!persistent_gnts[i] ||
599                     persistent_gnts[i]->handle == BLKBACK_INVALID_HANDLE) {
600                         /* This is a newly mapped grant */
601                         BUG_ON(j >= segs_to_map);
602                         if (unlikely(map[j].status != 0)) {
603                                 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
604                                 map[j].handle = BLKBACK_INVALID_HANDLE;
605                                 ret |= 1;
606                                 if (persistent_gnts[i]) {
607                                         rb_erase(&persistent_gnts[i]->node,
608                                                  &blkif->persistent_gnts);
609                                         blkif->persistent_gnt_c--;
610                                         kfree(persistent_gnts[i]);
611                                         persistent_gnts[i] = NULL;
612                                 }
613                         }
614                 }
615                 if (persistent_gnts[i]) {
616                         if (persistent_gnts[i]->handle ==
617                             BLKBACK_INVALID_HANDLE) {
618                                 /*
619                                  * If this is a new persistent grant
620                                  * save the handler
621                                  */
622                                 persistent_gnts[i]->handle = map[j].handle;
623                                 persistent_gnts[i]->dev_bus_addr =
624                                         map[j++].dev_bus_addr;
625                         }
626                         pending_handle(pending_req, i) =
627                                 persistent_gnts[i]->handle;
628
629                         if (ret)
630                                 continue;
631
632                         seg[i].buf = persistent_gnts[i]->dev_bus_addr |
633                                 (req->u.rw.seg[i].first_sect << 9);
634                 } else {
635                         pending_handle(pending_req, i) = map[j].handle;
636                         bitmap_set(pending_req->unmap_seg, i, 1);
637
638                         if (ret) {
639                                 j++;
640                                 continue;
641                         }
642
643                         seg[i].buf = map[j++].dev_bus_addr |
644                                 (req->u.rw.seg[i].first_sect << 9);
645                 }
646         }
647         return ret;
648 }
649
650 static int dispatch_discard_io(struct xen_blkif *blkif,
651                                 struct blkif_request *req)
652 {
653         int err = 0;
654         int status = BLKIF_RSP_OKAY;
655         struct block_device *bdev = blkif->vbd.bdev;
656         unsigned long secure;
657
658         blkif->st_ds_req++;
659
660         xen_blkif_get(blkif);
661         secure = (blkif->vbd.discard_secure &&
662                  (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
663                  BLKDEV_DISCARD_SECURE : 0;
664
665         err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
666                                    req->u.discard.nr_sectors,
667                                    GFP_KERNEL, secure);
668
669         if (err == -EOPNOTSUPP) {
670                 pr_debug(DRV_PFX "discard op failed, not supported\n");
671                 status = BLKIF_RSP_EOPNOTSUPP;
672         } else if (err)
673                 status = BLKIF_RSP_ERROR;
674
675         make_response(blkif, req->u.discard.id, req->operation, status);
676         xen_blkif_put(blkif);
677         return err;
678 }
679
680 static void xen_blk_drain_io(struct xen_blkif *blkif)
681 {
682         atomic_set(&blkif->drain, 1);
683         do {
684                 /* The initial value is one, and one refcnt taken at the
685                  * start of the xen_blkif_schedule thread. */
686                 if (atomic_read(&blkif->refcnt) <= 2)
687                         break;
688                 wait_for_completion_interruptible_timeout(
689                                 &blkif->drain_complete, HZ);
690
691                 if (!atomic_read(&blkif->drain))
692                         break;
693         } while (!kthread_should_stop());
694         atomic_set(&blkif->drain, 0);
695 }
696
697 /*
698  * Completion callback on the bio's. Called as bh->b_end_io()
699  */
700
701 static void __end_block_io_op(struct pending_req *pending_req, int error)
702 {
703         /* An error fails the entire request. */
704         if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
705             (error == -EOPNOTSUPP)) {
706                 pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
707                 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
708                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
709         } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
710                     (error == -EOPNOTSUPP)) {
711                 pr_debug(DRV_PFX "write barrier op failed, not supported\n");
712                 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
713                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
714         } else if (error) {
715                 pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
716                          " error=%d\n", error);
717                 pending_req->status = BLKIF_RSP_ERROR;
718         }
719
720         /*
721          * If all of the bio's have completed it is time to unmap
722          * the grant references associated with 'request' and provide
723          * the proper response on the ring.
724          */
725         if (atomic_dec_and_test(&pending_req->pendcnt)) {
726                 xen_blkbk_unmap(pending_req);
727                 make_response(pending_req->blkif, pending_req->id,
728                               pending_req->operation, pending_req->status);
729                 xen_blkif_put(pending_req->blkif);
730                 if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
731                         if (atomic_read(&pending_req->blkif->drain))
732                                 complete(&pending_req->blkif->drain_complete);
733                 }
734                 free_req(pending_req);
735         }
736 }
737
738 /*
739  * bio callback.
740  */
741 static void end_block_io_op(struct bio *bio, int error)
742 {
743         __end_block_io_op(bio->bi_private, error);
744         bio_put(bio);
745 }
746
747
748
749 /*
750  * Function to copy the from the ring buffer the 'struct blkif_request'
751  * (which has the sectors we want, number of them, grant references, etc),
752  * and transmute  it to the block API to hand it over to the proper block disk.
753  */
754 static int
755 __do_block_io_op(struct xen_blkif *blkif)
756 {
757         union blkif_back_rings *blk_rings = &blkif->blk_rings;
758         struct blkif_request req;
759         struct pending_req *pending_req;
760         RING_IDX rc, rp;
761         int more_to_do = 0;
762
763         rc = blk_rings->common.req_cons;
764         rp = blk_rings->common.sring->req_prod;
765         rmb(); /* Ensure we see queued requests up to 'rp'. */
766
767         while (rc != rp) {
768
769                 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
770                         break;
771
772                 if (kthread_should_stop()) {
773                         more_to_do = 1;
774                         break;
775                 }
776
777                 pending_req = alloc_req();
778                 if (NULL == pending_req) {
779                         blkif->st_oo_req++;
780                         more_to_do = 1;
781                         break;
782                 }
783
784                 switch (blkif->blk_protocol) {
785                 case BLKIF_PROTOCOL_NATIVE:
786                         memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
787                         break;
788                 case BLKIF_PROTOCOL_X86_32:
789                         blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
790                         break;
791                 case BLKIF_PROTOCOL_X86_64:
792                         blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
793                         break;
794                 default:
795                         BUG();
796                 }
797                 blk_rings->common.req_cons = ++rc; /* before make_response() */
798
799                 /* Apply all sanity checks to /private copy/ of request. */
800                 barrier();
801                 if (unlikely(req.operation == BLKIF_OP_DISCARD)) {
802                         free_req(pending_req);
803                         if (dispatch_discard_io(blkif, &req))
804                                 break;
805                 } else if (dispatch_rw_block_io(blkif, &req, pending_req))
806                         break;
807
808                 /* Yield point for this unbounded loop. */
809                 cond_resched();
810         }
811
812         return more_to_do;
813 }
814
815 static int
816 do_block_io_op(struct xen_blkif *blkif)
817 {
818         union blkif_back_rings *blk_rings = &blkif->blk_rings;
819         int more_to_do;
820
821         do {
822                 more_to_do = __do_block_io_op(blkif);
823                 if (more_to_do)
824                         break;
825
826                 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
827         } while (more_to_do);
828
829         return more_to_do;
830 }
831 /*
832  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
833  * and call the 'submit_bio' to pass it to the underlying storage.
834  */
835 static int dispatch_rw_block_io(struct xen_blkif *blkif,
836                                 struct blkif_request *req,
837                                 struct pending_req *pending_req)
838 {
839         struct phys_req preq;
840         struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
841         unsigned int nseg;
842         struct bio *bio = NULL;
843         struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
844         int i, nbio = 0;
845         int operation;
846         struct blk_plug plug;
847         bool drain = false;
848         struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
849
850         switch (req->operation) {
851         case BLKIF_OP_READ:
852                 blkif->st_rd_req++;
853                 operation = READ;
854                 break;
855         case BLKIF_OP_WRITE:
856                 blkif->st_wr_req++;
857                 operation = WRITE_ODIRECT;
858                 break;
859         case BLKIF_OP_WRITE_BARRIER:
860                 drain = true;
861         case BLKIF_OP_FLUSH_DISKCACHE:
862                 blkif->st_f_req++;
863                 operation = WRITE_FLUSH;
864                 break;
865         default:
866                 operation = 0; /* make gcc happy */
867                 goto fail_response;
868                 break;
869         }
870
871         /* Check that the number of segments is sane. */
872         nseg = req->u.rw.nr_segments;
873
874         if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
875             unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
876                 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
877                          nseg);
878                 /* Haven't submitted any bio's yet. */
879                 goto fail_response;
880         }
881
882         preq.dev           = req->u.rw.handle;
883         preq.sector_number = req->u.rw.sector_number;
884         preq.nr_sects      = 0;
885
886         pending_req->blkif     = blkif;
887         pending_req->id        = req->u.rw.id;
888         pending_req->operation = req->operation;
889         pending_req->status    = BLKIF_RSP_OKAY;
890         pending_req->nr_pages  = nseg;
891
892         for (i = 0; i < nseg; i++) {
893                 seg[i].nsec = req->u.rw.seg[i].last_sect -
894                         req->u.rw.seg[i].first_sect + 1;
895                 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
896                     (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
897                         goto fail_response;
898                 preq.nr_sects += seg[i].nsec;
899
900         }
901
902         if (xen_vbd_translate(&preq, blkif, operation) != 0) {
903                 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
904                          operation == READ ? "read" : "write",
905                          preq.sector_number,
906                          preq.sector_number + preq.nr_sects, preq.dev);
907                 goto fail_response;
908         }
909
910         /*
911          * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
912          * is set there.
913          */
914         for (i = 0; i < nseg; i++) {
915                 if (((int)preq.sector_number|(int)seg[i].nsec) &
916                     ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
917                         pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
918                                  blkif->domid);
919                         goto fail_response;
920                 }
921         }
922
923         /* Wait on all outstanding I/O's and once that has been completed
924          * issue the WRITE_FLUSH.
925          */
926         if (drain)
927                 xen_blk_drain_io(pending_req->blkif);
928
929         /*
930          * If we have failed at this point, we need to undo the M2P override,
931          * set gnttab_set_unmap_op on all of the grant references and perform
932          * the hypercall to unmap the grants - that is all done in
933          * xen_blkbk_unmap.
934          */
935         if (xen_blkbk_map(req, pending_req, seg, pages))
936                 goto fail_flush;
937
938         /*
939          * This corresponding xen_blkif_put is done in __end_block_io_op, or
940          * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
941          */
942         xen_blkif_get(blkif);
943
944         for (i = 0; i < nseg; i++) {
945                 while ((bio == NULL) ||
946                        (bio_add_page(bio,
947                                      pages[i],
948                                      seg[i].nsec << 9,
949                                      seg[i].buf & ~PAGE_MASK) == 0)) {
950
951                         bio = bio_alloc(GFP_KERNEL, nseg-i);
952                         if (unlikely(bio == NULL))
953                                 goto fail_put_bio;
954
955                         biolist[nbio++] = bio;
956                         bio->bi_bdev    = preq.bdev;
957                         bio->bi_private = pending_req;
958                         bio->bi_end_io  = end_block_io_op;
959                         bio->bi_sector  = preq.sector_number;
960                 }
961
962                 preq.sector_number += seg[i].nsec;
963         }
964
965         /* This will be hit if the operation was a flush or discard. */
966         if (!bio) {
967                 BUG_ON(operation != WRITE_FLUSH);
968
969                 bio = bio_alloc(GFP_KERNEL, 0);
970                 if (unlikely(bio == NULL))
971                         goto fail_put_bio;
972
973                 biolist[nbio++] = bio;
974                 bio->bi_bdev    = preq.bdev;
975                 bio->bi_private = pending_req;
976                 bio->bi_end_io  = end_block_io_op;
977         }
978
979         /*
980          * We set it one so that the last submit_bio does not have to call
981          * atomic_inc.
982          */
983         atomic_set(&pending_req->pendcnt, nbio);
984
985         /* Get a reference count for the disk queue and start sending I/O */
986         blk_start_plug(&plug);
987
988         for (i = 0; i < nbio; i++)
989                 submit_bio(operation, biolist[i]);
990
991         /* Let the I/Os go.. */
992         blk_finish_plug(&plug);
993
994         if (operation == READ)
995                 blkif->st_rd_sect += preq.nr_sects;
996         else if (operation & WRITE)
997                 blkif->st_wr_sect += preq.nr_sects;
998
999         return 0;
1000
1001  fail_flush:
1002         xen_blkbk_unmap(pending_req);
1003  fail_response:
1004         /* Haven't submitted any bio's yet. */
1005         make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR);
1006         free_req(pending_req);
1007         msleep(1); /* back off a bit */
1008         return -EIO;
1009
1010  fail_put_bio:
1011         for (i = 0; i < nbio; i++)
1012                 bio_put(biolist[i]);
1013         __end_block_io_op(pending_req, -EINVAL);
1014         msleep(1); /* back off a bit */
1015         return -EIO;
1016 }
1017
1018
1019
1020 /*
1021  * Put a response on the ring on how the operation fared.
1022  */
1023 static void make_response(struct xen_blkif *blkif, u64 id,
1024                           unsigned short op, int st)
1025 {
1026         struct blkif_response  resp;
1027         unsigned long     flags;
1028         union blkif_back_rings *blk_rings = &blkif->blk_rings;
1029         int notify;
1030
1031         resp.id        = id;
1032         resp.operation = op;
1033         resp.status    = st;
1034
1035         spin_lock_irqsave(&blkif->blk_ring_lock, flags);
1036         /* Place on the response ring for the relevant domain. */
1037         switch (blkif->blk_protocol) {
1038         case BLKIF_PROTOCOL_NATIVE:
1039                 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
1040                        &resp, sizeof(resp));
1041                 break;
1042         case BLKIF_PROTOCOL_X86_32:
1043                 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
1044                        &resp, sizeof(resp));
1045                 break;
1046         case BLKIF_PROTOCOL_X86_64:
1047                 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
1048                        &resp, sizeof(resp));
1049                 break;
1050         default:
1051                 BUG();
1052         }
1053         blk_rings->common.rsp_prod_pvt++;
1054         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1055         spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
1056         if (notify)
1057                 notify_remote_via_irq(blkif->irq);
1058 }
1059
1060 static int __init xen_blkif_init(void)
1061 {
1062         int i, mmap_pages;
1063         int rc = 0;
1064
1065         if (!xen_domain())
1066                 return -ENODEV;
1067
1068         blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
1069         if (!blkbk) {
1070                 pr_alert(DRV_PFX "%s: out of memory!\n", __func__);
1071                 return -ENOMEM;
1072         }
1073
1074         mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
1075
1076         blkbk->pending_reqs          = kzalloc(sizeof(blkbk->pending_reqs[0]) *
1077                                         xen_blkif_reqs, GFP_KERNEL);
1078         blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) *
1079                                         mmap_pages, GFP_KERNEL);
1080         blkbk->pending_pages         = kzalloc(sizeof(blkbk->pending_pages[0]) *
1081                                         mmap_pages, GFP_KERNEL);
1082
1083         if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
1084             !blkbk->pending_pages) {
1085                 rc = -ENOMEM;
1086                 goto out_of_memory;
1087         }
1088
1089         for (i = 0; i < mmap_pages; i++) {
1090                 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
1091                 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
1092                 if (blkbk->pending_pages[i] == NULL) {
1093                         rc = -ENOMEM;
1094                         goto out_of_memory;
1095                 }
1096         }
1097         rc = xen_blkif_interface_init();
1098         if (rc)
1099                 goto failed_init;
1100
1101         INIT_LIST_HEAD(&blkbk->pending_free);
1102         spin_lock_init(&blkbk->pending_free_lock);
1103         init_waitqueue_head(&blkbk->pending_free_wq);
1104
1105         for (i = 0; i < xen_blkif_reqs; i++)
1106                 list_add_tail(&blkbk->pending_reqs[i].free_list,
1107                               &blkbk->pending_free);
1108
1109         rc = xen_blkif_xenbus_init();
1110         if (rc)
1111                 goto failed_init;
1112
1113         return 0;
1114
1115  out_of_memory:
1116         pr_alert(DRV_PFX "%s: out of memory\n", __func__);
1117  failed_init:
1118         kfree(blkbk->pending_reqs);
1119         kfree(blkbk->pending_grant_handles);
1120         if (blkbk->pending_pages) {
1121                 for (i = 0; i < mmap_pages; i++) {
1122                         if (blkbk->pending_pages[i])
1123                                 __free_page(blkbk->pending_pages[i]);
1124                 }
1125                 kfree(blkbk->pending_pages);
1126         }
1127         kfree(blkbk);
1128         blkbk = NULL;
1129         return rc;
1130 }
1131
1132 module_init(xen_blkif_init);
1133
1134 MODULE_LICENSE("Dual BSD/GPL");
1135 MODULE_ALIAS("xen-backend:vbd");