Merge tag 'tegra-for-4.8-i2c' of git://git.kernel.org/pub/scm/linux/kernel/git/tegra...
[cascardo/linux.git] / block / blk-lib.c
1 /*
2  * Functions related to generic helpers functions
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9
10 #include "blk.h"
11
12 static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
13                 gfp_t gfp)
14 {
15         struct bio *new = bio_alloc(gfp, nr_pages);
16
17         if (bio) {
18                 bio_chain(bio, new);
19                 submit_bio(bio);
20         }
21
22         return new;
23 }
24
25 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
26                 sector_t nr_sects, gfp_t gfp_mask, int flags,
27                 struct bio **biop)
28 {
29         struct request_queue *q = bdev_get_queue(bdev);
30         struct bio *bio = *biop;
31         unsigned int granularity;
32         enum req_op op;
33         int alignment;
34
35         if (!q)
36                 return -ENXIO;
37
38         if (flags & BLKDEV_DISCARD_SECURE) {
39                 if (flags & BLKDEV_DISCARD_ZERO)
40                         return -EOPNOTSUPP;
41                 if (!blk_queue_secure_erase(q))
42                         return -EOPNOTSUPP;
43                 op = REQ_OP_SECURE_ERASE;
44         } else {
45                 if (!blk_queue_discard(q))
46                         return -EOPNOTSUPP;
47                 if ((flags & BLKDEV_DISCARD_ZERO) &&
48                     !q->limits.discard_zeroes_data)
49                         return -EOPNOTSUPP;
50                 op = REQ_OP_DISCARD;
51         }
52
53         /* Zero-sector (unknown) and one-sector granularities are the same.  */
54         granularity = max(q->limits.discard_granularity >> 9, 1U);
55         alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
56
57         while (nr_sects) {
58                 unsigned int req_sects;
59                 sector_t end_sect, tmp;
60
61                 /* Make sure bi_size doesn't overflow */
62                 req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
63
64                 /**
65                  * If splitting a request, and the next starting sector would be
66                  * misaligned, stop the discard at the previous aligned sector.
67                  */
68                 end_sect = sector + req_sects;
69                 tmp = end_sect;
70                 if (req_sects < nr_sects &&
71                     sector_div(tmp, granularity) != alignment) {
72                         end_sect = end_sect - alignment;
73                         sector_div(end_sect, granularity);
74                         end_sect = end_sect * granularity + alignment;
75                         req_sects = end_sect - sector;
76                 }
77
78                 bio = next_bio(bio, 1, gfp_mask);
79                 bio->bi_iter.bi_sector = sector;
80                 bio->bi_bdev = bdev;
81                 bio_set_op_attrs(bio, op, 0);
82
83                 bio->bi_iter.bi_size = req_sects << 9;
84                 nr_sects -= req_sects;
85                 sector = end_sect;
86
87                 /*
88                  * We can loop for a long time in here, if someone does
89                  * full device discards (like mkfs). Be nice and allow
90                  * us to schedule out to avoid softlocking if preempt
91                  * is disabled.
92                  */
93                 cond_resched();
94         }
95
96         *biop = bio;
97         return 0;
98 }
99 EXPORT_SYMBOL(__blkdev_issue_discard);
100
101 /**
102  * blkdev_issue_discard - queue a discard
103  * @bdev:       blockdev to issue discard for
104  * @sector:     start sector
105  * @nr_sects:   number of sectors to discard
106  * @gfp_mask:   memory allocation flags (for bio_alloc)
107  * @flags:      BLKDEV_IFL_* flags to control behaviour
108  *
109  * Description:
110  *    Issue a discard request for the sectors in question.
111  */
112 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
113                 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
114 {
115         struct bio *bio = NULL;
116         struct blk_plug plug;
117         int ret;
118
119         blk_start_plug(&plug);
120         ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
121                         &bio);
122         if (!ret && bio) {
123                 ret = submit_bio_wait(bio);
124                 if (ret == -EOPNOTSUPP && !(flags & BLKDEV_DISCARD_ZERO))
125                         ret = 0;
126                 bio_put(bio);
127         }
128         blk_finish_plug(&plug);
129
130         return ret;
131 }
132 EXPORT_SYMBOL(blkdev_issue_discard);
133
134 /**
135  * blkdev_issue_write_same - queue a write same operation
136  * @bdev:       target blockdev
137  * @sector:     start sector
138  * @nr_sects:   number of sectors to write
139  * @gfp_mask:   memory allocation flags (for bio_alloc)
140  * @page:       page containing data to write
141  *
142  * Description:
143  *    Issue a write same request for the sectors in question.
144  */
145 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
146                             sector_t nr_sects, gfp_t gfp_mask,
147                             struct page *page)
148 {
149         struct request_queue *q = bdev_get_queue(bdev);
150         unsigned int max_write_same_sectors;
151         struct bio *bio = NULL;
152         int ret = 0;
153
154         if (!q)
155                 return -ENXIO;
156
157         /* Ensure that max_write_same_sectors doesn't overflow bi_size */
158         max_write_same_sectors = UINT_MAX >> 9;
159
160         while (nr_sects) {
161                 bio = next_bio(bio, 1, gfp_mask);
162                 bio->bi_iter.bi_sector = sector;
163                 bio->bi_bdev = bdev;
164                 bio->bi_vcnt = 1;
165                 bio->bi_io_vec->bv_page = page;
166                 bio->bi_io_vec->bv_offset = 0;
167                 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
168                 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
169
170                 if (nr_sects > max_write_same_sectors) {
171                         bio->bi_iter.bi_size = max_write_same_sectors << 9;
172                         nr_sects -= max_write_same_sectors;
173                         sector += max_write_same_sectors;
174                 } else {
175                         bio->bi_iter.bi_size = nr_sects << 9;
176                         nr_sects = 0;
177                 }
178         }
179
180         if (bio) {
181                 ret = submit_bio_wait(bio);
182                 bio_put(bio);
183         }
184         return ret;
185 }
186 EXPORT_SYMBOL(blkdev_issue_write_same);
187
188 /**
189  * blkdev_issue_zeroout - generate number of zero filed write bios
190  * @bdev:       blockdev to issue
191  * @sector:     start sector
192  * @nr_sects:   number of sectors to write
193  * @gfp_mask:   memory allocation flags (for bio_alloc)
194  *
195  * Description:
196  *  Generate and issue number of bios with zerofiled pages.
197  */
198
199 static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
200                                   sector_t nr_sects, gfp_t gfp_mask)
201 {
202         int ret;
203         struct bio *bio = NULL;
204         unsigned int sz;
205
206         while (nr_sects != 0) {
207                 bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
208                                 gfp_mask);
209                 bio->bi_iter.bi_sector = sector;
210                 bio->bi_bdev   = bdev;
211                 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
212
213                 while (nr_sects != 0) {
214                         sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
215                         ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
216                         nr_sects -= ret >> 9;
217                         sector += ret >> 9;
218                         if (ret < (sz << 9))
219                                 break;
220                 }
221         }
222
223         if (bio) {
224                 ret = submit_bio_wait(bio);
225                 bio_put(bio);
226                 return ret;
227         }
228         return 0;
229 }
230
231 /**
232  * blkdev_issue_zeroout - zero-fill a block range
233  * @bdev:       blockdev to write
234  * @sector:     start sector
235  * @nr_sects:   number of sectors to write
236  * @gfp_mask:   memory allocation flags (for bio_alloc)
237  * @discard:    whether to discard the block range
238  *
239  * Description:
240  *  Zero-fill a block range.  If the discard flag is set and the block
241  *  device guarantees that subsequent READ operations to the block range
242  *  in question will return zeroes, the blocks will be discarded. Should
243  *  the discard request fail, if the discard flag is not set, or if
244  *  discard_zeroes_data is not supported, this function will resort to
245  *  zeroing the blocks manually, thus provisioning (allocating,
246  *  anchoring) them. If the block device supports the WRITE SAME command
247  *  blkdev_issue_zeroout() will use it to optimize the process of
248  *  clearing the block range. Otherwise the zeroing will be performed
249  *  using regular WRITE calls.
250  */
251
252 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
253                          sector_t nr_sects, gfp_t gfp_mask, bool discard)
254 {
255         if (discard) {
256                 if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
257                                 BLKDEV_DISCARD_ZERO))
258                         return 0;
259         }
260
261         if (bdev_write_same(bdev) &&
262             blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
263                                     ZERO_PAGE(0)) == 0)
264                 return 0;
265
266         return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
267 }
268 EXPORT_SYMBOL(blkdev_issue_zeroout);