Merge tag 'dm-3.18-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[cascardo/linux.git] / fs / btrfs / scrub.c
1 /*
2  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include "ctree.h"
22 #include "volumes.h"
23 #include "disk-io.h"
24 #include "ordered-data.h"
25 #include "transaction.h"
26 #include "backref.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
31 #include "raid56.h"
32
33 /*
34  * This is only the first step towards a full-features scrub. It reads all
35  * extent and super block and verifies the checksums. In case a bad checksum
36  * is found or the extent cannot be read, good data will be written back if
37  * any can be found.
38  *
39  * Future enhancements:
40  *  - In case an unrepairable extent is encountered, track which files are
41  *    affected and report them
42  *  - track and record media errors, throw out bad devices
43  *  - add a mode to also read unallocated space
44  */
45
46 struct scrub_block;
47 struct scrub_ctx;
48
49 /*
50  * the following three values only influence the performance.
51  * The last one configures the number of parallel and outstanding I/O
52  * operations. The first two values configure an upper limit for the number
53  * of (dynamically allocated) pages that are added to a bio.
54  */
55 #define SCRUB_PAGES_PER_RD_BIO  32      /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO  32      /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX     64      /* 8MB per device in flight */
58
59 /*
60  * the following value times PAGE_SIZE needs to be large enough to match the
61  * largest node/leaf/sector size that shall be supported.
62  * Values larger than BTRFS_STRIPE_LEN are not supported.
63  */
64 #define SCRUB_MAX_PAGES_PER_BLOCK       16      /* 64k per node/leaf/sector */
65
66 struct scrub_page {
67         struct scrub_block      *sblock;
68         struct page             *page;
69         struct btrfs_device     *dev;
70         u64                     flags;  /* extent flags */
71         u64                     generation;
72         u64                     logical;
73         u64                     physical;
74         u64                     physical_for_dev_replace;
75         atomic_t                ref_count;
76         struct {
77                 unsigned int    mirror_num:8;
78                 unsigned int    have_csum:1;
79                 unsigned int    io_error:1;
80         };
81         u8                      csum[BTRFS_CSUM_SIZE];
82 };
83
84 struct scrub_bio {
85         int                     index;
86         struct scrub_ctx        *sctx;
87         struct btrfs_device     *dev;
88         struct bio              *bio;
89         int                     err;
90         u64                     logical;
91         u64                     physical;
92 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
93         struct scrub_page       *pagev[SCRUB_PAGES_PER_WR_BIO];
94 #else
95         struct scrub_page       *pagev[SCRUB_PAGES_PER_RD_BIO];
96 #endif
97         int                     page_count;
98         int                     next_free;
99         struct btrfs_work       work;
100 };
101
102 struct scrub_block {
103         struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
104         int                     page_count;
105         atomic_t                outstanding_pages;
106         atomic_t                ref_count; /* free mem on transition to zero */
107         struct scrub_ctx        *sctx;
108         struct {
109                 unsigned int    header_error:1;
110                 unsigned int    checksum_error:1;
111                 unsigned int    no_io_error_seen:1;
112                 unsigned int    generation_error:1; /* also sets header_error */
113         };
114 };
115
116 struct scrub_wr_ctx {
117         struct scrub_bio *wr_curr_bio;
118         struct btrfs_device *tgtdev;
119         int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
120         atomic_t flush_all_writes;
121         struct mutex wr_lock;
122 };
123
124 struct scrub_ctx {
125         struct scrub_bio        *bios[SCRUB_BIOS_PER_SCTX];
126         struct btrfs_root       *dev_root;
127         int                     first_free;
128         int                     curr;
129         atomic_t                bios_in_flight;
130         atomic_t                workers_pending;
131         spinlock_t              list_lock;
132         wait_queue_head_t       list_wait;
133         u16                     csum_size;
134         struct list_head        csum_list;
135         atomic_t                cancel_req;
136         int                     readonly;
137         int                     pages_per_rd_bio;
138         u32                     sectorsize;
139         u32                     nodesize;
140
141         int                     is_dev_replace;
142         struct scrub_wr_ctx     wr_ctx;
143
144         /*
145          * statistics
146          */
147         struct btrfs_scrub_progress stat;
148         spinlock_t              stat_lock;
149 };
150
151 struct scrub_fixup_nodatasum {
152         struct scrub_ctx        *sctx;
153         struct btrfs_device     *dev;
154         u64                     logical;
155         struct btrfs_root       *root;
156         struct btrfs_work       work;
157         int                     mirror_num;
158 };
159
160 struct scrub_nocow_inode {
161         u64                     inum;
162         u64                     offset;
163         u64                     root;
164         struct list_head        list;
165 };
166
167 struct scrub_copy_nocow_ctx {
168         struct scrub_ctx        *sctx;
169         u64                     logical;
170         u64                     len;
171         int                     mirror_num;
172         u64                     physical_for_dev_replace;
173         struct list_head        inodes;
174         struct btrfs_work       work;
175 };
176
177 struct scrub_warning {
178         struct btrfs_path       *path;
179         u64                     extent_item_size;
180         const char              *errstr;
181         sector_t                sector;
182         u64                     logical;
183         struct btrfs_device     *dev;
184 };
185
186 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
187 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
188 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
189 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
190 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
191 static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
192                                      struct btrfs_fs_info *fs_info,
193                                      struct scrub_block *original_sblock,
194                                      u64 length, u64 logical,
195                                      struct scrub_block *sblocks_for_recheck);
196 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
197                                 struct scrub_block *sblock, int is_metadata,
198                                 int have_csum, u8 *csum, u64 generation,
199                                 u16 csum_size);
200 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
201                                          struct scrub_block *sblock,
202                                          int is_metadata, int have_csum,
203                                          const u8 *csum, u64 generation,
204                                          u16 csum_size);
205 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
206                                              struct scrub_block *sblock_good,
207                                              int force_write);
208 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
209                                             struct scrub_block *sblock_good,
210                                             int page_num, int force_write);
211 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
212 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
213                                            int page_num);
214 static int scrub_checksum_data(struct scrub_block *sblock);
215 static int scrub_checksum_tree_block(struct scrub_block *sblock);
216 static int scrub_checksum_super(struct scrub_block *sblock);
217 static void scrub_block_get(struct scrub_block *sblock);
218 static void scrub_block_put(struct scrub_block *sblock);
219 static void scrub_page_get(struct scrub_page *spage);
220 static void scrub_page_put(struct scrub_page *spage);
221 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
222                                     struct scrub_page *spage);
223 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
224                        u64 physical, struct btrfs_device *dev, u64 flags,
225                        u64 gen, int mirror_num, u8 *csum, int force,
226                        u64 physical_for_dev_replace);
227 static void scrub_bio_end_io(struct bio *bio, int err);
228 static void scrub_bio_end_io_worker(struct btrfs_work *work);
229 static void scrub_block_complete(struct scrub_block *sblock);
230 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
231                                u64 extent_logical, u64 extent_len,
232                                u64 *extent_physical,
233                                struct btrfs_device **extent_dev,
234                                int *extent_mirror_num);
235 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
236                               struct scrub_wr_ctx *wr_ctx,
237                               struct btrfs_fs_info *fs_info,
238                               struct btrfs_device *dev,
239                               int is_dev_replace);
240 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
241 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
242                                     struct scrub_page *spage);
243 static void scrub_wr_submit(struct scrub_ctx *sctx);
244 static void scrub_wr_bio_end_io(struct bio *bio, int err);
245 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
246 static int write_page_nocow(struct scrub_ctx *sctx,
247                             u64 physical_for_dev_replace, struct page *page);
248 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
249                                       struct scrub_copy_nocow_ctx *ctx);
250 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
251                             int mirror_num, u64 physical_for_dev_replace);
252 static void copy_nocow_pages_worker(struct btrfs_work *work);
253 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
254 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
255
256
257 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
258 {
259         atomic_inc(&sctx->bios_in_flight);
260 }
261
262 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
263 {
264         atomic_dec(&sctx->bios_in_flight);
265         wake_up(&sctx->list_wait);
266 }
267
268 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
269 {
270         while (atomic_read(&fs_info->scrub_pause_req)) {
271                 mutex_unlock(&fs_info->scrub_lock);
272                 wait_event(fs_info->scrub_pause_wait,
273                    atomic_read(&fs_info->scrub_pause_req) == 0);
274                 mutex_lock(&fs_info->scrub_lock);
275         }
276 }
277
278 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
279 {
280         atomic_inc(&fs_info->scrubs_paused);
281         wake_up(&fs_info->scrub_pause_wait);
282
283         mutex_lock(&fs_info->scrub_lock);
284         __scrub_blocked_if_needed(fs_info);
285         atomic_dec(&fs_info->scrubs_paused);
286         mutex_unlock(&fs_info->scrub_lock);
287
288         wake_up(&fs_info->scrub_pause_wait);
289 }
290
291 /*
292  * used for workers that require transaction commits (i.e., for the
293  * NOCOW case)
294  */
295 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
296 {
297         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
298
299         /*
300          * increment scrubs_running to prevent cancel requests from
301          * completing as long as a worker is running. we must also
302          * increment scrubs_paused to prevent deadlocking on pause
303          * requests used for transactions commits (as the worker uses a
304          * transaction context). it is safe to regard the worker
305          * as paused for all matters practical. effectively, we only
306          * avoid cancellation requests from completing.
307          */
308         mutex_lock(&fs_info->scrub_lock);
309         atomic_inc(&fs_info->scrubs_running);
310         atomic_inc(&fs_info->scrubs_paused);
311         mutex_unlock(&fs_info->scrub_lock);
312
313         /*
314          * check if @scrubs_running=@scrubs_paused condition
315          * inside wait_event() is not an atomic operation.
316          * which means we may inc/dec @scrub_running/paused
317          * at any time. Let's wake up @scrub_pause_wait as
318          * much as we can to let commit transaction blocked less.
319          */
320         wake_up(&fs_info->scrub_pause_wait);
321
322         atomic_inc(&sctx->workers_pending);
323 }
324
325 /* used for workers that require transaction commits */
326 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
327 {
328         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
329
330         /*
331          * see scrub_pending_trans_workers_inc() why we're pretending
332          * to be paused in the scrub counters
333          */
334         mutex_lock(&fs_info->scrub_lock);
335         atomic_dec(&fs_info->scrubs_running);
336         atomic_dec(&fs_info->scrubs_paused);
337         mutex_unlock(&fs_info->scrub_lock);
338         atomic_dec(&sctx->workers_pending);
339         wake_up(&fs_info->scrub_pause_wait);
340         wake_up(&sctx->list_wait);
341 }
342
343 static void scrub_free_csums(struct scrub_ctx *sctx)
344 {
345         while (!list_empty(&sctx->csum_list)) {
346                 struct btrfs_ordered_sum *sum;
347                 sum = list_first_entry(&sctx->csum_list,
348                                        struct btrfs_ordered_sum, list);
349                 list_del(&sum->list);
350                 kfree(sum);
351         }
352 }
353
354 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
355 {
356         int i;
357
358         if (!sctx)
359                 return;
360
361         scrub_free_wr_ctx(&sctx->wr_ctx);
362
363         /* this can happen when scrub is cancelled */
364         if (sctx->curr != -1) {
365                 struct scrub_bio *sbio = sctx->bios[sctx->curr];
366
367                 for (i = 0; i < sbio->page_count; i++) {
368                         WARN_ON(!sbio->pagev[i]->page);
369                         scrub_block_put(sbio->pagev[i]->sblock);
370                 }
371                 bio_put(sbio->bio);
372         }
373
374         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
375                 struct scrub_bio *sbio = sctx->bios[i];
376
377                 if (!sbio)
378                         break;
379                 kfree(sbio);
380         }
381
382         scrub_free_csums(sctx);
383         kfree(sctx);
384 }
385
386 static noinline_for_stack
387 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
388 {
389         struct scrub_ctx *sctx;
390         int             i;
391         struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
392         int pages_per_rd_bio;
393         int ret;
394
395         /*
396          * the setting of pages_per_rd_bio is correct for scrub but might
397          * be wrong for the dev_replace code where we might read from
398          * different devices in the initial huge bios. However, that
399          * code is able to correctly handle the case when adding a page
400          * to a bio fails.
401          */
402         if (dev->bdev)
403                 pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
404                                          bio_get_nr_vecs(dev->bdev));
405         else
406                 pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
407         sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
408         if (!sctx)
409                 goto nomem;
410         sctx->is_dev_replace = is_dev_replace;
411         sctx->pages_per_rd_bio = pages_per_rd_bio;
412         sctx->curr = -1;
413         sctx->dev_root = dev->dev_root;
414         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
415                 struct scrub_bio *sbio;
416
417                 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
418                 if (!sbio)
419                         goto nomem;
420                 sctx->bios[i] = sbio;
421
422                 sbio->index = i;
423                 sbio->sctx = sctx;
424                 sbio->page_count = 0;
425                 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
426                                 scrub_bio_end_io_worker, NULL, NULL);
427
428                 if (i != SCRUB_BIOS_PER_SCTX - 1)
429                         sctx->bios[i]->next_free = i + 1;
430                 else
431                         sctx->bios[i]->next_free = -1;
432         }
433         sctx->first_free = 0;
434         sctx->nodesize = dev->dev_root->nodesize;
435         sctx->sectorsize = dev->dev_root->sectorsize;
436         atomic_set(&sctx->bios_in_flight, 0);
437         atomic_set(&sctx->workers_pending, 0);
438         atomic_set(&sctx->cancel_req, 0);
439         sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
440         INIT_LIST_HEAD(&sctx->csum_list);
441
442         spin_lock_init(&sctx->list_lock);
443         spin_lock_init(&sctx->stat_lock);
444         init_waitqueue_head(&sctx->list_wait);
445
446         ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
447                                  fs_info->dev_replace.tgtdev, is_dev_replace);
448         if (ret) {
449                 scrub_free_ctx(sctx);
450                 return ERR_PTR(ret);
451         }
452         return sctx;
453
454 nomem:
455         scrub_free_ctx(sctx);
456         return ERR_PTR(-ENOMEM);
457 }
458
459 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
460                                      void *warn_ctx)
461 {
462         u64 isize;
463         u32 nlink;
464         int ret;
465         int i;
466         struct extent_buffer *eb;
467         struct btrfs_inode_item *inode_item;
468         struct scrub_warning *swarn = warn_ctx;
469         struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
470         struct inode_fs_paths *ipath = NULL;
471         struct btrfs_root *local_root;
472         struct btrfs_key root_key;
473
474         root_key.objectid = root;
475         root_key.type = BTRFS_ROOT_ITEM_KEY;
476         root_key.offset = (u64)-1;
477         local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
478         if (IS_ERR(local_root)) {
479                 ret = PTR_ERR(local_root);
480                 goto err;
481         }
482
483         ret = inode_item_info(inum, 0, local_root, swarn->path);
484         if (ret) {
485                 btrfs_release_path(swarn->path);
486                 goto err;
487         }
488
489         eb = swarn->path->nodes[0];
490         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
491                                         struct btrfs_inode_item);
492         isize = btrfs_inode_size(eb, inode_item);
493         nlink = btrfs_inode_nlink(eb, inode_item);
494         btrfs_release_path(swarn->path);
495
496         ipath = init_ipath(4096, local_root, swarn->path);
497         if (IS_ERR(ipath)) {
498                 ret = PTR_ERR(ipath);
499                 ipath = NULL;
500                 goto err;
501         }
502         ret = paths_from_inode(inum, ipath);
503
504         if (ret < 0)
505                 goto err;
506
507         /*
508          * we deliberately ignore the bit ipath might have been too small to
509          * hold all of the paths here
510          */
511         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
512                 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
513                         "%s, sector %llu, root %llu, inode %llu, offset %llu, "
514                         "length %llu, links %u (path: %s)\n", swarn->errstr,
515                         swarn->logical, rcu_str_deref(swarn->dev->name),
516                         (unsigned long long)swarn->sector, root, inum, offset,
517                         min(isize - offset, (u64)PAGE_SIZE), nlink,
518                         (char *)(unsigned long)ipath->fspath->val[i]);
519
520         free_ipath(ipath);
521         return 0;
522
523 err:
524         printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
525                 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
526                 "resolving failed with ret=%d\n", swarn->errstr,
527                 swarn->logical, rcu_str_deref(swarn->dev->name),
528                 (unsigned long long)swarn->sector, root, inum, offset, ret);
529
530         free_ipath(ipath);
531         return 0;
532 }
533
534 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
535 {
536         struct btrfs_device *dev;
537         struct btrfs_fs_info *fs_info;
538         struct btrfs_path *path;
539         struct btrfs_key found_key;
540         struct extent_buffer *eb;
541         struct btrfs_extent_item *ei;
542         struct scrub_warning swarn;
543         unsigned long ptr = 0;
544         u64 extent_item_pos;
545         u64 flags = 0;
546         u64 ref_root;
547         u32 item_size;
548         u8 ref_level;
549         int ret;
550
551         WARN_ON(sblock->page_count < 1);
552         dev = sblock->pagev[0]->dev;
553         fs_info = sblock->sctx->dev_root->fs_info;
554
555         path = btrfs_alloc_path();
556         if (!path)
557                 return;
558
559         swarn.sector = (sblock->pagev[0]->physical) >> 9;
560         swarn.logical = sblock->pagev[0]->logical;
561         swarn.errstr = errstr;
562         swarn.dev = NULL;
563
564         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
565                                   &flags);
566         if (ret < 0)
567                 goto out;
568
569         extent_item_pos = swarn.logical - found_key.objectid;
570         swarn.extent_item_size = found_key.offset;
571
572         eb = path->nodes[0];
573         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
574         item_size = btrfs_item_size_nr(eb, path->slots[0]);
575
576         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
577                 do {
578                         ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
579                                                       item_size, &ref_root,
580                                                       &ref_level);
581                         printk_in_rcu(KERN_WARNING
582                                 "BTRFS: %s at logical %llu on dev %s, "
583                                 "sector %llu: metadata %s (level %d) in tree "
584                                 "%llu\n", errstr, swarn.logical,
585                                 rcu_str_deref(dev->name),
586                                 (unsigned long long)swarn.sector,
587                                 ref_level ? "node" : "leaf",
588                                 ret < 0 ? -1 : ref_level,
589                                 ret < 0 ? -1 : ref_root);
590                 } while (ret != 1);
591                 btrfs_release_path(path);
592         } else {
593                 btrfs_release_path(path);
594                 swarn.path = path;
595                 swarn.dev = dev;
596                 iterate_extent_inodes(fs_info, found_key.objectid,
597                                         extent_item_pos, 1,
598                                         scrub_print_warning_inode, &swarn);
599         }
600
601 out:
602         btrfs_free_path(path);
603 }
604
605 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
606 {
607         struct page *page = NULL;
608         unsigned long index;
609         struct scrub_fixup_nodatasum *fixup = fixup_ctx;
610         int ret;
611         int corrected = 0;
612         struct btrfs_key key;
613         struct inode *inode = NULL;
614         struct btrfs_fs_info *fs_info;
615         u64 end = offset + PAGE_SIZE - 1;
616         struct btrfs_root *local_root;
617         int srcu_index;
618
619         key.objectid = root;
620         key.type = BTRFS_ROOT_ITEM_KEY;
621         key.offset = (u64)-1;
622
623         fs_info = fixup->root->fs_info;
624         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
625
626         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
627         if (IS_ERR(local_root)) {
628                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
629                 return PTR_ERR(local_root);
630         }
631
632         key.type = BTRFS_INODE_ITEM_KEY;
633         key.objectid = inum;
634         key.offset = 0;
635         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
636         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
637         if (IS_ERR(inode))
638                 return PTR_ERR(inode);
639
640         index = offset >> PAGE_CACHE_SHIFT;
641
642         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
643         if (!page) {
644                 ret = -ENOMEM;
645                 goto out;
646         }
647
648         if (PageUptodate(page)) {
649                 if (PageDirty(page)) {
650                         /*
651                          * we need to write the data to the defect sector. the
652                          * data that was in that sector is not in memory,
653                          * because the page was modified. we must not write the
654                          * modified page to that sector.
655                          *
656                          * TODO: what could be done here: wait for the delalloc
657                          *       runner to write out that page (might involve
658                          *       COW) and see whether the sector is still
659                          *       referenced afterwards.
660                          *
661                          * For the meantime, we'll treat this error
662                          * incorrectable, although there is a chance that a
663                          * later scrub will find the bad sector again and that
664                          * there's no dirty page in memory, then.
665                          */
666                         ret = -EIO;
667                         goto out;
668                 }
669                 ret = repair_io_failure(inode, offset, PAGE_SIZE,
670                                         fixup->logical, page,
671                                         offset - page_offset(page),
672                                         fixup->mirror_num);
673                 unlock_page(page);
674                 corrected = !ret;
675         } else {
676                 /*
677                  * we need to get good data first. the general readpage path
678                  * will call repair_io_failure for us, we just have to make
679                  * sure we read the bad mirror.
680                  */
681                 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
682                                         EXTENT_DAMAGED, GFP_NOFS);
683                 if (ret) {
684                         /* set_extent_bits should give proper error */
685                         WARN_ON(ret > 0);
686                         if (ret > 0)
687                                 ret = -EFAULT;
688                         goto out;
689                 }
690
691                 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
692                                                 btrfs_get_extent,
693                                                 fixup->mirror_num);
694                 wait_on_page_locked(page);
695
696                 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
697                                                 end, EXTENT_DAMAGED, 0, NULL);
698                 if (!corrected)
699                         clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
700                                                 EXTENT_DAMAGED, GFP_NOFS);
701         }
702
703 out:
704         if (page)
705                 put_page(page);
706
707         iput(inode);
708
709         if (ret < 0)
710                 return ret;
711
712         if (ret == 0 && corrected) {
713                 /*
714                  * we only need to call readpage for one of the inodes belonging
715                  * to this extent. so make iterate_extent_inodes stop
716                  */
717                 return 1;
718         }
719
720         return -EIO;
721 }
722
723 static void scrub_fixup_nodatasum(struct btrfs_work *work)
724 {
725         int ret;
726         struct scrub_fixup_nodatasum *fixup;
727         struct scrub_ctx *sctx;
728         struct btrfs_trans_handle *trans = NULL;
729         struct btrfs_path *path;
730         int uncorrectable = 0;
731
732         fixup = container_of(work, struct scrub_fixup_nodatasum, work);
733         sctx = fixup->sctx;
734
735         path = btrfs_alloc_path();
736         if (!path) {
737                 spin_lock(&sctx->stat_lock);
738                 ++sctx->stat.malloc_errors;
739                 spin_unlock(&sctx->stat_lock);
740                 uncorrectable = 1;
741                 goto out;
742         }
743
744         trans = btrfs_join_transaction(fixup->root);
745         if (IS_ERR(trans)) {
746                 uncorrectable = 1;
747                 goto out;
748         }
749
750         /*
751          * the idea is to trigger a regular read through the standard path. we
752          * read a page from the (failed) logical address by specifying the
753          * corresponding copynum of the failed sector. thus, that readpage is
754          * expected to fail.
755          * that is the point where on-the-fly error correction will kick in
756          * (once it's finished) and rewrite the failed sector if a good copy
757          * can be found.
758          */
759         ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
760                                                 path, scrub_fixup_readpage,
761                                                 fixup);
762         if (ret < 0) {
763                 uncorrectable = 1;
764                 goto out;
765         }
766         WARN_ON(ret != 1);
767
768         spin_lock(&sctx->stat_lock);
769         ++sctx->stat.corrected_errors;
770         spin_unlock(&sctx->stat_lock);
771
772 out:
773         if (trans && !IS_ERR(trans))
774                 btrfs_end_transaction(trans, fixup->root);
775         if (uncorrectable) {
776                 spin_lock(&sctx->stat_lock);
777                 ++sctx->stat.uncorrectable_errors;
778                 spin_unlock(&sctx->stat_lock);
779                 btrfs_dev_replace_stats_inc(
780                         &sctx->dev_root->fs_info->dev_replace.
781                         num_uncorrectable_read_errors);
782                 printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
783                     "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
784                         fixup->logical, rcu_str_deref(fixup->dev->name));
785         }
786
787         btrfs_free_path(path);
788         kfree(fixup);
789
790         scrub_pending_trans_workers_dec(sctx);
791 }
792
793 /*
794  * scrub_handle_errored_block gets called when either verification of the
795  * pages failed or the bio failed to read, e.g. with EIO. In the latter
796  * case, this function handles all pages in the bio, even though only one
797  * may be bad.
798  * The goal of this function is to repair the errored block by using the
799  * contents of one of the mirrors.
800  */
801 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
802 {
803         struct scrub_ctx *sctx = sblock_to_check->sctx;
804         struct btrfs_device *dev;
805         struct btrfs_fs_info *fs_info;
806         u64 length;
807         u64 logical;
808         u64 generation;
809         unsigned int failed_mirror_index;
810         unsigned int is_metadata;
811         unsigned int have_csum;
812         u8 *csum;
813         struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
814         struct scrub_block *sblock_bad;
815         int ret;
816         int mirror_index;
817         int page_num;
818         int success;
819         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
820                                       DEFAULT_RATELIMIT_BURST);
821
822         BUG_ON(sblock_to_check->page_count < 1);
823         fs_info = sctx->dev_root->fs_info;
824         if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
825                 /*
826                  * if we find an error in a super block, we just report it.
827                  * They will get written with the next transaction commit
828                  * anyway
829                  */
830                 spin_lock(&sctx->stat_lock);
831                 ++sctx->stat.super_errors;
832                 spin_unlock(&sctx->stat_lock);
833                 return 0;
834         }
835         length = sblock_to_check->page_count * PAGE_SIZE;
836         logical = sblock_to_check->pagev[0]->logical;
837         generation = sblock_to_check->pagev[0]->generation;
838         BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
839         failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
840         is_metadata = !(sblock_to_check->pagev[0]->flags &
841                         BTRFS_EXTENT_FLAG_DATA);
842         have_csum = sblock_to_check->pagev[0]->have_csum;
843         csum = sblock_to_check->pagev[0]->csum;
844         dev = sblock_to_check->pagev[0]->dev;
845
846         if (sctx->is_dev_replace && !is_metadata && !have_csum) {
847                 sblocks_for_recheck = NULL;
848                 goto nodatasum_case;
849         }
850
851         /*
852          * read all mirrors one after the other. This includes to
853          * re-read the extent or metadata block that failed (that was
854          * the cause that this fixup code is called) another time,
855          * page by page this time in order to know which pages
856          * caused I/O errors and which ones are good (for all mirrors).
857          * It is the goal to handle the situation when more than one
858          * mirror contains I/O errors, but the errors do not
859          * overlap, i.e. the data can be repaired by selecting the
860          * pages from those mirrors without I/O error on the
861          * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
862          * would be that mirror #1 has an I/O error on the first page,
863          * the second page is good, and mirror #2 has an I/O error on
864          * the second page, but the first page is good.
865          * Then the first page of the first mirror can be repaired by
866          * taking the first page of the second mirror, and the
867          * second page of the second mirror can be repaired by
868          * copying the contents of the 2nd page of the 1st mirror.
869          * One more note: if the pages of one mirror contain I/O
870          * errors, the checksum cannot be verified. In order to get
871          * the best data for repairing, the first attempt is to find
872          * a mirror without I/O errors and with a validated checksum.
873          * Only if this is not possible, the pages are picked from
874          * mirrors with I/O errors without considering the checksum.
875          * If the latter is the case, at the end, the checksum of the
876          * repaired area is verified in order to correctly maintain
877          * the statistics.
878          */
879
880         sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
881                                      sizeof(*sblocks_for_recheck),
882                                      GFP_NOFS);
883         if (!sblocks_for_recheck) {
884                 spin_lock(&sctx->stat_lock);
885                 sctx->stat.malloc_errors++;
886                 sctx->stat.read_errors++;
887                 sctx->stat.uncorrectable_errors++;
888                 spin_unlock(&sctx->stat_lock);
889                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
890                 goto out;
891         }
892
893         /* setup the context, map the logical blocks and alloc the pages */
894         ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
895                                         logical, sblocks_for_recheck);
896         if (ret) {
897                 spin_lock(&sctx->stat_lock);
898                 sctx->stat.read_errors++;
899                 sctx->stat.uncorrectable_errors++;
900                 spin_unlock(&sctx->stat_lock);
901                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
902                 goto out;
903         }
904         BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
905         sblock_bad = sblocks_for_recheck + failed_mirror_index;
906
907         /* build and submit the bios for the failed mirror, check checksums */
908         scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
909                             csum, generation, sctx->csum_size);
910
911         if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
912             sblock_bad->no_io_error_seen) {
913                 /*
914                  * the error disappeared after reading page by page, or
915                  * the area was part of a huge bio and other parts of the
916                  * bio caused I/O errors, or the block layer merged several
917                  * read requests into one and the error is caused by a
918                  * different bio (usually one of the two latter cases is
919                  * the cause)
920                  */
921                 spin_lock(&sctx->stat_lock);
922                 sctx->stat.unverified_errors++;
923                 spin_unlock(&sctx->stat_lock);
924
925                 if (sctx->is_dev_replace)
926                         scrub_write_block_to_dev_replace(sblock_bad);
927                 goto out;
928         }
929
930         if (!sblock_bad->no_io_error_seen) {
931                 spin_lock(&sctx->stat_lock);
932                 sctx->stat.read_errors++;
933                 spin_unlock(&sctx->stat_lock);
934                 if (__ratelimit(&_rs))
935                         scrub_print_warning("i/o error", sblock_to_check);
936                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
937         } else if (sblock_bad->checksum_error) {
938                 spin_lock(&sctx->stat_lock);
939                 sctx->stat.csum_errors++;
940                 spin_unlock(&sctx->stat_lock);
941                 if (__ratelimit(&_rs))
942                         scrub_print_warning("checksum error", sblock_to_check);
943                 btrfs_dev_stat_inc_and_print(dev,
944                                              BTRFS_DEV_STAT_CORRUPTION_ERRS);
945         } else if (sblock_bad->header_error) {
946                 spin_lock(&sctx->stat_lock);
947                 sctx->stat.verify_errors++;
948                 spin_unlock(&sctx->stat_lock);
949                 if (__ratelimit(&_rs))
950                         scrub_print_warning("checksum/header error",
951                                             sblock_to_check);
952                 if (sblock_bad->generation_error)
953                         btrfs_dev_stat_inc_and_print(dev,
954                                 BTRFS_DEV_STAT_GENERATION_ERRS);
955                 else
956                         btrfs_dev_stat_inc_and_print(dev,
957                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
958         }
959
960         if (sctx->readonly) {
961                 ASSERT(!sctx->is_dev_replace);
962                 goto out;
963         }
964
965         if (!is_metadata && !have_csum) {
966                 struct scrub_fixup_nodatasum *fixup_nodatasum;
967
968 nodatasum_case:
969                 WARN_ON(sctx->is_dev_replace);
970
971                 /*
972                  * !is_metadata and !have_csum, this means that the data
973                  * might not be COW'ed, that it might be modified
974                  * concurrently. The general strategy to work on the
975                  * commit root does not help in the case when COW is not
976                  * used.
977                  */
978                 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
979                 if (!fixup_nodatasum)
980                         goto did_not_correct_error;
981                 fixup_nodatasum->sctx = sctx;
982                 fixup_nodatasum->dev = dev;
983                 fixup_nodatasum->logical = logical;
984                 fixup_nodatasum->root = fs_info->extent_root;
985                 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
986                 scrub_pending_trans_workers_inc(sctx);
987                 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
988                                 scrub_fixup_nodatasum, NULL, NULL);
989                 btrfs_queue_work(fs_info->scrub_workers,
990                                  &fixup_nodatasum->work);
991                 goto out;
992         }
993
994         /*
995          * now build and submit the bios for the other mirrors, check
996          * checksums.
997          * First try to pick the mirror which is completely without I/O
998          * errors and also does not have a checksum error.
999          * If one is found, and if a checksum is present, the full block
1000          * that is known to contain an error is rewritten. Afterwards
1001          * the block is known to be corrected.
1002          * If a mirror is found which is completely correct, and no
1003          * checksum is present, only those pages are rewritten that had
1004          * an I/O error in the block to be repaired, since it cannot be
1005          * determined, which copy of the other pages is better (and it
1006          * could happen otherwise that a correct page would be
1007          * overwritten by a bad one).
1008          */
1009         for (mirror_index = 0;
1010              mirror_index < BTRFS_MAX_MIRRORS &&
1011              sblocks_for_recheck[mirror_index].page_count > 0;
1012              mirror_index++) {
1013                 struct scrub_block *sblock_other;
1014
1015                 if (mirror_index == failed_mirror_index)
1016                         continue;
1017                 sblock_other = sblocks_for_recheck + mirror_index;
1018
1019                 /* build and submit the bios, check checksums */
1020                 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1021                                     have_csum, csum, generation,
1022                                     sctx->csum_size);
1023
1024                 if (!sblock_other->header_error &&
1025                     !sblock_other->checksum_error &&
1026                     sblock_other->no_io_error_seen) {
1027                         if (sctx->is_dev_replace) {
1028                                 scrub_write_block_to_dev_replace(sblock_other);
1029                         } else {
1030                                 int force_write = is_metadata || have_csum;
1031
1032                                 ret = scrub_repair_block_from_good_copy(
1033                                                 sblock_bad, sblock_other,
1034                                                 force_write);
1035                         }
1036                         if (0 == ret)
1037                                 goto corrected_error;
1038                 }
1039         }
1040
1041         /*
1042          * for dev_replace, pick good pages and write to the target device.
1043          */
1044         if (sctx->is_dev_replace) {
1045                 success = 1;
1046                 for (page_num = 0; page_num < sblock_bad->page_count;
1047                      page_num++) {
1048                         int sub_success;
1049
1050                         sub_success = 0;
1051                         for (mirror_index = 0;
1052                              mirror_index < BTRFS_MAX_MIRRORS &&
1053                              sblocks_for_recheck[mirror_index].page_count > 0;
1054                              mirror_index++) {
1055                                 struct scrub_block *sblock_other =
1056                                         sblocks_for_recheck + mirror_index;
1057                                 struct scrub_page *page_other =
1058                                         sblock_other->pagev[page_num];
1059
1060                                 if (!page_other->io_error) {
1061                                         ret = scrub_write_page_to_dev_replace(
1062                                                         sblock_other, page_num);
1063                                         if (ret == 0) {
1064                                                 /* succeeded for this page */
1065                                                 sub_success = 1;
1066                                                 break;
1067                                         } else {
1068                                                 btrfs_dev_replace_stats_inc(
1069                                                         &sctx->dev_root->
1070                                                         fs_info->dev_replace.
1071                                                         num_write_errors);
1072                                         }
1073                                 }
1074                         }
1075
1076                         if (!sub_success) {
1077                                 /*
1078                                  * did not find a mirror to fetch the page
1079                                  * from. scrub_write_page_to_dev_replace()
1080                                  * handles this case (page->io_error), by
1081                                  * filling the block with zeros before
1082                                  * submitting the write request
1083                                  */
1084                                 success = 0;
1085                                 ret = scrub_write_page_to_dev_replace(
1086                                                 sblock_bad, page_num);
1087                                 if (ret)
1088                                         btrfs_dev_replace_stats_inc(
1089                                                 &sctx->dev_root->fs_info->
1090                                                 dev_replace.num_write_errors);
1091                         }
1092                 }
1093
1094                 goto out;
1095         }
1096
1097         /*
1098          * for regular scrub, repair those pages that are errored.
1099          * In case of I/O errors in the area that is supposed to be
1100          * repaired, continue by picking good copies of those pages.
1101          * Select the good pages from mirrors to rewrite bad pages from
1102          * the area to fix. Afterwards verify the checksum of the block
1103          * that is supposed to be repaired. This verification step is
1104          * only done for the purpose of statistic counting and for the
1105          * final scrub report, whether errors remain.
1106          * A perfect algorithm could make use of the checksum and try
1107          * all possible combinations of pages from the different mirrors
1108          * until the checksum verification succeeds. For example, when
1109          * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1110          * of mirror #2 is readable but the final checksum test fails,
1111          * then the 2nd page of mirror #3 could be tried, whether now
1112          * the final checksum succeedes. But this would be a rare
1113          * exception and is therefore not implemented. At least it is
1114          * avoided that the good copy is overwritten.
1115          * A more useful improvement would be to pick the sectors
1116          * without I/O error based on sector sizes (512 bytes on legacy
1117          * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1118          * mirror could be repaired by taking 512 byte of a different
1119          * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1120          * area are unreadable.
1121          */
1122
1123         /* can only fix I/O errors from here on */
1124         if (sblock_bad->no_io_error_seen)
1125                 goto did_not_correct_error;
1126
1127         success = 1;
1128         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1129                 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1130
1131                 if (!page_bad->io_error)
1132                         continue;
1133
1134                 for (mirror_index = 0;
1135                      mirror_index < BTRFS_MAX_MIRRORS &&
1136                      sblocks_for_recheck[mirror_index].page_count > 0;
1137                      mirror_index++) {
1138                         struct scrub_block *sblock_other = sblocks_for_recheck +
1139                                                            mirror_index;
1140                         struct scrub_page *page_other = sblock_other->pagev[
1141                                                         page_num];
1142
1143                         if (!page_other->io_error) {
1144                                 ret = scrub_repair_page_from_good_copy(
1145                                         sblock_bad, sblock_other, page_num, 0);
1146                                 if (0 == ret) {
1147                                         page_bad->io_error = 0;
1148                                         break; /* succeeded for this page */
1149                                 }
1150                         }
1151                 }
1152
1153                 if (page_bad->io_error) {
1154                         /* did not find a mirror to copy the page from */
1155                         success = 0;
1156                 }
1157         }
1158
1159         if (success) {
1160                 if (is_metadata || have_csum) {
1161                         /*
1162                          * need to verify the checksum now that all
1163                          * sectors on disk are repaired (the write
1164                          * request for data to be repaired is on its way).
1165                          * Just be lazy and use scrub_recheck_block()
1166                          * which re-reads the data before the checksum
1167                          * is verified, but most likely the data comes out
1168                          * of the page cache.
1169                          */
1170                         scrub_recheck_block(fs_info, sblock_bad,
1171                                             is_metadata, have_csum, csum,
1172                                             generation, sctx->csum_size);
1173                         if (!sblock_bad->header_error &&
1174                             !sblock_bad->checksum_error &&
1175                             sblock_bad->no_io_error_seen)
1176                                 goto corrected_error;
1177                         else
1178                                 goto did_not_correct_error;
1179                 } else {
1180 corrected_error:
1181                         spin_lock(&sctx->stat_lock);
1182                         sctx->stat.corrected_errors++;
1183                         spin_unlock(&sctx->stat_lock);
1184                         printk_ratelimited_in_rcu(KERN_ERR
1185                                 "BTRFS: fixed up error at logical %llu on dev %s\n",
1186                                 logical, rcu_str_deref(dev->name));
1187                 }
1188         } else {
1189 did_not_correct_error:
1190                 spin_lock(&sctx->stat_lock);
1191                 sctx->stat.uncorrectable_errors++;
1192                 spin_unlock(&sctx->stat_lock);
1193                 printk_ratelimited_in_rcu(KERN_ERR
1194                         "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
1195                         logical, rcu_str_deref(dev->name));
1196         }
1197
1198 out:
1199         if (sblocks_for_recheck) {
1200                 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1201                      mirror_index++) {
1202                         struct scrub_block *sblock = sblocks_for_recheck +
1203                                                      mirror_index;
1204                         int page_index;
1205
1206                         for (page_index = 0; page_index < sblock->page_count;
1207                              page_index++) {
1208                                 sblock->pagev[page_index]->sblock = NULL;
1209                                 scrub_page_put(sblock->pagev[page_index]);
1210                         }
1211                 }
1212                 kfree(sblocks_for_recheck);
1213         }
1214
1215         return 0;
1216 }
1217
1218 static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
1219                                      struct btrfs_fs_info *fs_info,
1220                                      struct scrub_block *original_sblock,
1221                                      u64 length, u64 logical,
1222                                      struct scrub_block *sblocks_for_recheck)
1223 {
1224         int page_index;
1225         int mirror_index;
1226         int ret;
1227
1228         /*
1229          * note: the two members ref_count and outstanding_pages
1230          * are not used (and not set) in the blocks that are used for
1231          * the recheck procedure
1232          */
1233
1234         page_index = 0;
1235         while (length > 0) {
1236                 u64 sublen = min_t(u64, length, PAGE_SIZE);
1237                 u64 mapped_length = sublen;
1238                 struct btrfs_bio *bbio = NULL;
1239
1240                 /*
1241                  * with a length of PAGE_SIZE, each returned stripe
1242                  * represents one mirror
1243                  */
1244                 ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical,
1245                                       &mapped_length, &bbio, 0);
1246                 if (ret || !bbio || mapped_length < sublen) {
1247                         kfree(bbio);
1248                         return -EIO;
1249                 }
1250
1251                 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1252                 for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
1253                      mirror_index++) {
1254                         struct scrub_block *sblock;
1255                         struct scrub_page *page;
1256
1257                         if (mirror_index >= BTRFS_MAX_MIRRORS)
1258                                 continue;
1259
1260                         sblock = sblocks_for_recheck + mirror_index;
1261                         sblock->sctx = sctx;
1262                         page = kzalloc(sizeof(*page), GFP_NOFS);
1263                         if (!page) {
1264 leave_nomem:
1265                                 spin_lock(&sctx->stat_lock);
1266                                 sctx->stat.malloc_errors++;
1267                                 spin_unlock(&sctx->stat_lock);
1268                                 kfree(bbio);
1269                                 return -ENOMEM;
1270                         }
1271                         scrub_page_get(page);
1272                         sblock->pagev[page_index] = page;
1273                         page->logical = logical;
1274                         page->physical = bbio->stripes[mirror_index].physical;
1275                         BUG_ON(page_index >= original_sblock->page_count);
1276                         page->physical_for_dev_replace =
1277                                 original_sblock->pagev[page_index]->
1278                                 physical_for_dev_replace;
1279                         /* for missing devices, dev->bdev is NULL */
1280                         page->dev = bbio->stripes[mirror_index].dev;
1281                         page->mirror_num = mirror_index + 1;
1282                         sblock->page_count++;
1283                         page->page = alloc_page(GFP_NOFS);
1284                         if (!page->page)
1285                                 goto leave_nomem;
1286                 }
1287                 kfree(bbio);
1288                 length -= sublen;
1289                 logical += sublen;
1290                 page_index++;
1291         }
1292
1293         return 0;
1294 }
1295
1296 /*
1297  * this function will check the on disk data for checksum errors, header
1298  * errors and read I/O errors. If any I/O errors happen, the exact pages
1299  * which are errored are marked as being bad. The goal is to enable scrub
1300  * to take those pages that are not errored from all the mirrors so that
1301  * the pages that are errored in the just handled mirror can be repaired.
1302  */
1303 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1304                                 struct scrub_block *sblock, int is_metadata,
1305                                 int have_csum, u8 *csum, u64 generation,
1306                                 u16 csum_size)
1307 {
1308         int page_num;
1309
1310         sblock->no_io_error_seen = 1;
1311         sblock->header_error = 0;
1312         sblock->checksum_error = 0;
1313
1314         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1315                 struct bio *bio;
1316                 struct scrub_page *page = sblock->pagev[page_num];
1317
1318                 if (page->dev->bdev == NULL) {
1319                         page->io_error = 1;
1320                         sblock->no_io_error_seen = 0;
1321                         continue;
1322                 }
1323
1324                 WARN_ON(!page->page);
1325                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1326                 if (!bio) {
1327                         page->io_error = 1;
1328                         sblock->no_io_error_seen = 0;
1329                         continue;
1330                 }
1331                 bio->bi_bdev = page->dev->bdev;
1332                 bio->bi_iter.bi_sector = page->physical >> 9;
1333
1334                 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1335                 if (btrfsic_submit_bio_wait(READ, bio))
1336                         sblock->no_io_error_seen = 0;
1337
1338                 bio_put(bio);
1339         }
1340
1341         if (sblock->no_io_error_seen)
1342                 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1343                                              have_csum, csum, generation,
1344                                              csum_size);
1345
1346         return;
1347 }
1348
1349 static inline int scrub_check_fsid(u8 fsid[],
1350                                    struct scrub_page *spage)
1351 {
1352         struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1353         int ret;
1354
1355         ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1356         return !ret;
1357 }
1358
1359 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1360                                          struct scrub_block *sblock,
1361                                          int is_metadata, int have_csum,
1362                                          const u8 *csum, u64 generation,
1363                                          u16 csum_size)
1364 {
1365         int page_num;
1366         u8 calculated_csum[BTRFS_CSUM_SIZE];
1367         u32 crc = ~(u32)0;
1368         void *mapped_buffer;
1369
1370         WARN_ON(!sblock->pagev[0]->page);
1371         if (is_metadata) {
1372                 struct btrfs_header *h;
1373
1374                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1375                 h = (struct btrfs_header *)mapped_buffer;
1376
1377                 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
1378                     !scrub_check_fsid(h->fsid, sblock->pagev[0]) ||
1379                     memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1380                            BTRFS_UUID_SIZE)) {
1381                         sblock->header_error = 1;
1382                 } else if (generation != btrfs_stack_header_generation(h)) {
1383                         sblock->header_error = 1;
1384                         sblock->generation_error = 1;
1385                 }
1386                 csum = h->csum;
1387         } else {
1388                 if (!have_csum)
1389                         return;
1390
1391                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1392         }
1393
1394         for (page_num = 0;;) {
1395                 if (page_num == 0 && is_metadata)
1396                         crc = btrfs_csum_data(
1397                                 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1398                                 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1399                 else
1400                         crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
1401
1402                 kunmap_atomic(mapped_buffer);
1403                 page_num++;
1404                 if (page_num >= sblock->page_count)
1405                         break;
1406                 WARN_ON(!sblock->pagev[page_num]->page);
1407
1408                 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
1409         }
1410
1411         btrfs_csum_final(crc, calculated_csum);
1412         if (memcmp(calculated_csum, csum, csum_size))
1413                 sblock->checksum_error = 1;
1414 }
1415
1416 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1417                                              struct scrub_block *sblock_good,
1418                                              int force_write)
1419 {
1420         int page_num;
1421         int ret = 0;
1422
1423         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1424                 int ret_sub;
1425
1426                 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1427                                                            sblock_good,
1428                                                            page_num,
1429                                                            force_write);
1430                 if (ret_sub)
1431                         ret = ret_sub;
1432         }
1433
1434         return ret;
1435 }
1436
1437 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1438                                             struct scrub_block *sblock_good,
1439                                             int page_num, int force_write)
1440 {
1441         struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1442         struct scrub_page *page_good = sblock_good->pagev[page_num];
1443
1444         BUG_ON(page_bad->page == NULL);
1445         BUG_ON(page_good->page == NULL);
1446         if (force_write || sblock_bad->header_error ||
1447             sblock_bad->checksum_error || page_bad->io_error) {
1448                 struct bio *bio;
1449                 int ret;
1450
1451                 if (!page_bad->dev->bdev) {
1452                         printk_ratelimited(KERN_WARNING "BTRFS: "
1453                                 "scrub_repair_page_from_good_copy(bdev == NULL) "
1454                                 "is unexpected!\n");
1455                         return -EIO;
1456                 }
1457
1458                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1459                 if (!bio)
1460                         return -EIO;
1461                 bio->bi_bdev = page_bad->dev->bdev;
1462                 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1463
1464                 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1465                 if (PAGE_SIZE != ret) {
1466                         bio_put(bio);
1467                         return -EIO;
1468                 }
1469
1470                 if (btrfsic_submit_bio_wait(WRITE, bio)) {
1471                         btrfs_dev_stat_inc_and_print(page_bad->dev,
1472                                 BTRFS_DEV_STAT_WRITE_ERRS);
1473                         btrfs_dev_replace_stats_inc(
1474                                 &sblock_bad->sctx->dev_root->fs_info->
1475                                 dev_replace.num_write_errors);
1476                         bio_put(bio);
1477                         return -EIO;
1478                 }
1479                 bio_put(bio);
1480         }
1481
1482         return 0;
1483 }
1484
1485 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1486 {
1487         int page_num;
1488
1489         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1490                 int ret;
1491
1492                 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1493                 if (ret)
1494                         btrfs_dev_replace_stats_inc(
1495                                 &sblock->sctx->dev_root->fs_info->dev_replace.
1496                                 num_write_errors);
1497         }
1498 }
1499
1500 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1501                                            int page_num)
1502 {
1503         struct scrub_page *spage = sblock->pagev[page_num];
1504
1505         BUG_ON(spage->page == NULL);
1506         if (spage->io_error) {
1507                 void *mapped_buffer = kmap_atomic(spage->page);
1508
1509                 memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
1510                 flush_dcache_page(spage->page);
1511                 kunmap_atomic(mapped_buffer);
1512         }
1513         return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1514 }
1515
1516 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1517                                     struct scrub_page *spage)
1518 {
1519         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1520         struct scrub_bio *sbio;
1521         int ret;
1522
1523         mutex_lock(&wr_ctx->wr_lock);
1524 again:
1525         if (!wr_ctx->wr_curr_bio) {
1526                 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1527                                               GFP_NOFS);
1528                 if (!wr_ctx->wr_curr_bio) {
1529                         mutex_unlock(&wr_ctx->wr_lock);
1530                         return -ENOMEM;
1531                 }
1532                 wr_ctx->wr_curr_bio->sctx = sctx;
1533                 wr_ctx->wr_curr_bio->page_count = 0;
1534         }
1535         sbio = wr_ctx->wr_curr_bio;
1536         if (sbio->page_count == 0) {
1537                 struct bio *bio;
1538
1539                 sbio->physical = spage->physical_for_dev_replace;
1540                 sbio->logical = spage->logical;
1541                 sbio->dev = wr_ctx->tgtdev;
1542                 bio = sbio->bio;
1543                 if (!bio) {
1544                         bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
1545                         if (!bio) {
1546                                 mutex_unlock(&wr_ctx->wr_lock);
1547                                 return -ENOMEM;
1548                         }
1549                         sbio->bio = bio;
1550                 }
1551
1552                 bio->bi_private = sbio;
1553                 bio->bi_end_io = scrub_wr_bio_end_io;
1554                 bio->bi_bdev = sbio->dev->bdev;
1555                 bio->bi_iter.bi_sector = sbio->physical >> 9;
1556                 sbio->err = 0;
1557         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1558                    spage->physical_for_dev_replace ||
1559                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1560                    spage->logical) {
1561                 scrub_wr_submit(sctx);
1562                 goto again;
1563         }
1564
1565         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1566         if (ret != PAGE_SIZE) {
1567                 if (sbio->page_count < 1) {
1568                         bio_put(sbio->bio);
1569                         sbio->bio = NULL;
1570                         mutex_unlock(&wr_ctx->wr_lock);
1571                         return -EIO;
1572                 }
1573                 scrub_wr_submit(sctx);
1574                 goto again;
1575         }
1576
1577         sbio->pagev[sbio->page_count] = spage;
1578         scrub_page_get(spage);
1579         sbio->page_count++;
1580         if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1581                 scrub_wr_submit(sctx);
1582         mutex_unlock(&wr_ctx->wr_lock);
1583
1584         return 0;
1585 }
1586
1587 static void scrub_wr_submit(struct scrub_ctx *sctx)
1588 {
1589         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1590         struct scrub_bio *sbio;
1591
1592         if (!wr_ctx->wr_curr_bio)
1593                 return;
1594
1595         sbio = wr_ctx->wr_curr_bio;
1596         wr_ctx->wr_curr_bio = NULL;
1597         WARN_ON(!sbio->bio->bi_bdev);
1598         scrub_pending_bio_inc(sctx);
1599         /* process all writes in a single worker thread. Then the block layer
1600          * orders the requests before sending them to the driver which
1601          * doubled the write performance on spinning disks when measured
1602          * with Linux 3.5 */
1603         btrfsic_submit_bio(WRITE, sbio->bio);
1604 }
1605
1606 static void scrub_wr_bio_end_io(struct bio *bio, int err)
1607 {
1608         struct scrub_bio *sbio = bio->bi_private;
1609         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1610
1611         sbio->err = err;
1612         sbio->bio = bio;
1613
1614         btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1615                          scrub_wr_bio_end_io_worker, NULL, NULL);
1616         btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1617 }
1618
1619 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1620 {
1621         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1622         struct scrub_ctx *sctx = sbio->sctx;
1623         int i;
1624
1625         WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1626         if (sbio->err) {
1627                 struct btrfs_dev_replace *dev_replace =
1628                         &sbio->sctx->dev_root->fs_info->dev_replace;
1629
1630                 for (i = 0; i < sbio->page_count; i++) {
1631                         struct scrub_page *spage = sbio->pagev[i];
1632
1633                         spage->io_error = 1;
1634                         btrfs_dev_replace_stats_inc(&dev_replace->
1635                                                     num_write_errors);
1636                 }
1637         }
1638
1639         for (i = 0; i < sbio->page_count; i++)
1640                 scrub_page_put(sbio->pagev[i]);
1641
1642         bio_put(sbio->bio);
1643         kfree(sbio);
1644         scrub_pending_bio_dec(sctx);
1645 }
1646
1647 static int scrub_checksum(struct scrub_block *sblock)
1648 {
1649         u64 flags;
1650         int ret;
1651
1652         WARN_ON(sblock->page_count < 1);
1653         flags = sblock->pagev[0]->flags;
1654         ret = 0;
1655         if (flags & BTRFS_EXTENT_FLAG_DATA)
1656                 ret = scrub_checksum_data(sblock);
1657         else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1658                 ret = scrub_checksum_tree_block(sblock);
1659         else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1660                 (void)scrub_checksum_super(sblock);
1661         else
1662                 WARN_ON(1);
1663         if (ret)
1664                 scrub_handle_errored_block(sblock);
1665
1666         return ret;
1667 }
1668
1669 static int scrub_checksum_data(struct scrub_block *sblock)
1670 {
1671         struct scrub_ctx *sctx = sblock->sctx;
1672         u8 csum[BTRFS_CSUM_SIZE];
1673         u8 *on_disk_csum;
1674         struct page *page;
1675         void *buffer;
1676         u32 crc = ~(u32)0;
1677         int fail = 0;
1678         u64 len;
1679         int index;
1680
1681         BUG_ON(sblock->page_count < 1);
1682         if (!sblock->pagev[0]->have_csum)
1683                 return 0;
1684
1685         on_disk_csum = sblock->pagev[0]->csum;
1686         page = sblock->pagev[0]->page;
1687         buffer = kmap_atomic(page);
1688
1689         len = sctx->sectorsize;
1690         index = 0;
1691         for (;;) {
1692                 u64 l = min_t(u64, len, PAGE_SIZE);
1693
1694                 crc = btrfs_csum_data(buffer, crc, l);
1695                 kunmap_atomic(buffer);
1696                 len -= l;
1697                 if (len == 0)
1698                         break;
1699                 index++;
1700                 BUG_ON(index >= sblock->page_count);
1701                 BUG_ON(!sblock->pagev[index]->page);
1702                 page = sblock->pagev[index]->page;
1703                 buffer = kmap_atomic(page);
1704         }
1705
1706         btrfs_csum_final(crc, csum);
1707         if (memcmp(csum, on_disk_csum, sctx->csum_size))
1708                 fail = 1;
1709
1710         return fail;
1711 }
1712
1713 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1714 {
1715         struct scrub_ctx *sctx = sblock->sctx;
1716         struct btrfs_header *h;
1717         struct btrfs_root *root = sctx->dev_root;
1718         struct btrfs_fs_info *fs_info = root->fs_info;
1719         u8 calculated_csum[BTRFS_CSUM_SIZE];
1720         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1721         struct page *page;
1722         void *mapped_buffer;
1723         u64 mapped_size;
1724         void *p;
1725         u32 crc = ~(u32)0;
1726         int fail = 0;
1727         int crc_fail = 0;
1728         u64 len;
1729         int index;
1730
1731         BUG_ON(sblock->page_count < 1);
1732         page = sblock->pagev[0]->page;
1733         mapped_buffer = kmap_atomic(page);
1734         h = (struct btrfs_header *)mapped_buffer;
1735         memcpy(on_disk_csum, h->csum, sctx->csum_size);
1736
1737         /*
1738          * we don't use the getter functions here, as we
1739          * a) don't have an extent buffer and
1740          * b) the page is already kmapped
1741          */
1742
1743         if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1744                 ++fail;
1745
1746         if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
1747                 ++fail;
1748
1749         if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
1750                 ++fail;
1751
1752         if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1753                    BTRFS_UUID_SIZE))
1754                 ++fail;
1755
1756         len = sctx->nodesize - BTRFS_CSUM_SIZE;
1757         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1758         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1759         index = 0;
1760         for (;;) {
1761                 u64 l = min_t(u64, len, mapped_size);
1762
1763                 crc = btrfs_csum_data(p, crc, l);
1764                 kunmap_atomic(mapped_buffer);
1765                 len -= l;
1766                 if (len == 0)
1767                         break;
1768                 index++;
1769                 BUG_ON(index >= sblock->page_count);
1770                 BUG_ON(!sblock->pagev[index]->page);
1771                 page = sblock->pagev[index]->page;
1772                 mapped_buffer = kmap_atomic(page);
1773                 mapped_size = PAGE_SIZE;
1774                 p = mapped_buffer;
1775         }
1776
1777         btrfs_csum_final(crc, calculated_csum);
1778         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1779                 ++crc_fail;
1780
1781         return fail || crc_fail;
1782 }
1783
1784 static int scrub_checksum_super(struct scrub_block *sblock)
1785 {
1786         struct btrfs_super_block *s;
1787         struct scrub_ctx *sctx = sblock->sctx;
1788         u8 calculated_csum[BTRFS_CSUM_SIZE];
1789         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1790         struct page *page;
1791         void *mapped_buffer;
1792         u64 mapped_size;
1793         void *p;
1794         u32 crc = ~(u32)0;
1795         int fail_gen = 0;
1796         int fail_cor = 0;
1797         u64 len;
1798         int index;
1799
1800         BUG_ON(sblock->page_count < 1);
1801         page = sblock->pagev[0]->page;
1802         mapped_buffer = kmap_atomic(page);
1803         s = (struct btrfs_super_block *)mapped_buffer;
1804         memcpy(on_disk_csum, s->csum, sctx->csum_size);
1805
1806         if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
1807                 ++fail_cor;
1808
1809         if (sblock->pagev[0]->generation != btrfs_super_generation(s))
1810                 ++fail_gen;
1811
1812         if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
1813                 ++fail_cor;
1814
1815         len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1816         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1817         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1818         index = 0;
1819         for (;;) {
1820                 u64 l = min_t(u64, len, mapped_size);
1821
1822                 crc = btrfs_csum_data(p, crc, l);
1823                 kunmap_atomic(mapped_buffer);
1824                 len -= l;
1825                 if (len == 0)
1826                         break;
1827                 index++;
1828                 BUG_ON(index >= sblock->page_count);
1829                 BUG_ON(!sblock->pagev[index]->page);
1830                 page = sblock->pagev[index]->page;
1831                 mapped_buffer = kmap_atomic(page);
1832                 mapped_size = PAGE_SIZE;
1833                 p = mapped_buffer;
1834         }
1835
1836         btrfs_csum_final(crc, calculated_csum);
1837         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1838                 ++fail_cor;
1839
1840         if (fail_cor + fail_gen) {
1841                 /*
1842                  * if we find an error in a super block, we just report it.
1843                  * They will get written with the next transaction commit
1844                  * anyway
1845                  */
1846                 spin_lock(&sctx->stat_lock);
1847                 ++sctx->stat.super_errors;
1848                 spin_unlock(&sctx->stat_lock);
1849                 if (fail_cor)
1850                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1851                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1852                 else
1853                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1854                                 BTRFS_DEV_STAT_GENERATION_ERRS);
1855         }
1856
1857         return fail_cor + fail_gen;
1858 }
1859
1860 static void scrub_block_get(struct scrub_block *sblock)
1861 {
1862         atomic_inc(&sblock->ref_count);
1863 }
1864
1865 static void scrub_block_put(struct scrub_block *sblock)
1866 {
1867         if (atomic_dec_and_test(&sblock->ref_count)) {
1868                 int i;
1869
1870                 for (i = 0; i < sblock->page_count; i++)
1871                         scrub_page_put(sblock->pagev[i]);
1872                 kfree(sblock);
1873         }
1874 }
1875
1876 static void scrub_page_get(struct scrub_page *spage)
1877 {
1878         atomic_inc(&spage->ref_count);
1879 }
1880
1881 static void scrub_page_put(struct scrub_page *spage)
1882 {
1883         if (atomic_dec_and_test(&spage->ref_count)) {
1884                 if (spage->page)
1885                         __free_page(spage->page);
1886                 kfree(spage);
1887         }
1888 }
1889
1890 static void scrub_submit(struct scrub_ctx *sctx)
1891 {
1892         struct scrub_bio *sbio;
1893
1894         if (sctx->curr == -1)
1895                 return;
1896
1897         sbio = sctx->bios[sctx->curr];
1898         sctx->curr = -1;
1899         scrub_pending_bio_inc(sctx);
1900
1901         if (!sbio->bio->bi_bdev) {
1902                 /*
1903                  * this case should not happen. If btrfs_map_block() is
1904                  * wrong, it could happen for dev-replace operations on
1905                  * missing devices when no mirrors are available, but in
1906                  * this case it should already fail the mount.
1907                  * This case is handled correctly (but _very_ slowly).
1908                  */
1909                 printk_ratelimited(KERN_WARNING
1910                         "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
1911                 bio_endio(sbio->bio, -EIO);
1912         } else {
1913                 btrfsic_submit_bio(READ, sbio->bio);
1914         }
1915 }
1916
1917 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
1918                                     struct scrub_page *spage)
1919 {
1920         struct scrub_block *sblock = spage->sblock;
1921         struct scrub_bio *sbio;
1922         int ret;
1923
1924 again:
1925         /*
1926          * grab a fresh bio or wait for one to become available
1927          */
1928         while (sctx->curr == -1) {
1929                 spin_lock(&sctx->list_lock);
1930                 sctx->curr = sctx->first_free;
1931                 if (sctx->curr != -1) {
1932                         sctx->first_free = sctx->bios[sctx->curr]->next_free;
1933                         sctx->bios[sctx->curr]->next_free = -1;
1934                         sctx->bios[sctx->curr]->page_count = 0;
1935                         spin_unlock(&sctx->list_lock);
1936                 } else {
1937                         spin_unlock(&sctx->list_lock);
1938                         wait_event(sctx->list_wait, sctx->first_free != -1);
1939                 }
1940         }
1941         sbio = sctx->bios[sctx->curr];
1942         if (sbio->page_count == 0) {
1943                 struct bio *bio;
1944
1945                 sbio->physical = spage->physical;
1946                 sbio->logical = spage->logical;
1947                 sbio->dev = spage->dev;
1948                 bio = sbio->bio;
1949                 if (!bio) {
1950                         bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
1951                         if (!bio)
1952                                 return -ENOMEM;
1953                         sbio->bio = bio;
1954                 }
1955
1956                 bio->bi_private = sbio;
1957                 bio->bi_end_io = scrub_bio_end_io;
1958                 bio->bi_bdev = sbio->dev->bdev;
1959                 bio->bi_iter.bi_sector = sbio->physical >> 9;
1960                 sbio->err = 0;
1961         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1962                    spage->physical ||
1963                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1964                    spage->logical ||
1965                    sbio->dev != spage->dev) {
1966                 scrub_submit(sctx);
1967                 goto again;
1968         }
1969
1970         sbio->pagev[sbio->page_count] = spage;
1971         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1972         if (ret != PAGE_SIZE) {
1973                 if (sbio->page_count < 1) {
1974                         bio_put(sbio->bio);
1975                         sbio->bio = NULL;
1976                         return -EIO;
1977                 }
1978                 scrub_submit(sctx);
1979                 goto again;
1980         }
1981
1982         scrub_block_get(sblock); /* one for the page added to the bio */
1983         atomic_inc(&sblock->outstanding_pages);
1984         sbio->page_count++;
1985         if (sbio->page_count == sctx->pages_per_rd_bio)
1986                 scrub_submit(sctx);
1987
1988         return 0;
1989 }
1990
1991 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
1992                        u64 physical, struct btrfs_device *dev, u64 flags,
1993                        u64 gen, int mirror_num, u8 *csum, int force,
1994                        u64 physical_for_dev_replace)
1995 {
1996         struct scrub_block *sblock;
1997         int index;
1998
1999         sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2000         if (!sblock) {
2001                 spin_lock(&sctx->stat_lock);
2002                 sctx->stat.malloc_errors++;
2003                 spin_unlock(&sctx->stat_lock);
2004                 return -ENOMEM;
2005         }
2006
2007         /* one ref inside this function, plus one for each page added to
2008          * a bio later on */
2009         atomic_set(&sblock->ref_count, 1);
2010         sblock->sctx = sctx;
2011         sblock->no_io_error_seen = 1;
2012
2013         for (index = 0; len > 0; index++) {
2014                 struct scrub_page *spage;
2015                 u64 l = min_t(u64, len, PAGE_SIZE);
2016
2017                 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2018                 if (!spage) {
2019 leave_nomem:
2020                         spin_lock(&sctx->stat_lock);
2021                         sctx->stat.malloc_errors++;
2022                         spin_unlock(&sctx->stat_lock);
2023                         scrub_block_put(sblock);
2024                         return -ENOMEM;
2025                 }
2026                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2027                 scrub_page_get(spage);
2028                 sblock->pagev[index] = spage;
2029                 spage->sblock = sblock;
2030                 spage->dev = dev;
2031                 spage->flags = flags;
2032                 spage->generation = gen;
2033                 spage->logical = logical;
2034                 spage->physical = physical;
2035                 spage->physical_for_dev_replace = physical_for_dev_replace;
2036                 spage->mirror_num = mirror_num;
2037                 if (csum) {
2038                         spage->have_csum = 1;
2039                         memcpy(spage->csum, csum, sctx->csum_size);
2040                 } else {
2041                         spage->have_csum = 0;
2042                 }
2043                 sblock->page_count++;
2044                 spage->page = alloc_page(GFP_NOFS);
2045                 if (!spage->page)
2046                         goto leave_nomem;
2047                 len -= l;
2048                 logical += l;
2049                 physical += l;
2050                 physical_for_dev_replace += l;
2051         }
2052
2053         WARN_ON(sblock->page_count == 0);
2054         for (index = 0; index < sblock->page_count; index++) {
2055                 struct scrub_page *spage = sblock->pagev[index];
2056                 int ret;
2057
2058                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2059                 if (ret) {
2060                         scrub_block_put(sblock);
2061                         return ret;
2062                 }
2063         }
2064
2065         if (force)
2066                 scrub_submit(sctx);
2067
2068         /* last one frees, either here or in bio completion for last page */
2069         scrub_block_put(sblock);
2070         return 0;
2071 }
2072
2073 static void scrub_bio_end_io(struct bio *bio, int err)
2074 {
2075         struct scrub_bio *sbio = bio->bi_private;
2076         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
2077
2078         sbio->err = err;
2079         sbio->bio = bio;
2080
2081         btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2082 }
2083
2084 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2085 {
2086         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2087         struct scrub_ctx *sctx = sbio->sctx;
2088         int i;
2089
2090         BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2091         if (sbio->err) {
2092                 for (i = 0; i < sbio->page_count; i++) {
2093                         struct scrub_page *spage = sbio->pagev[i];
2094
2095                         spage->io_error = 1;
2096                         spage->sblock->no_io_error_seen = 0;
2097                 }
2098         }
2099
2100         /* now complete the scrub_block items that have all pages completed */
2101         for (i = 0; i < sbio->page_count; i++) {
2102                 struct scrub_page *spage = sbio->pagev[i];
2103                 struct scrub_block *sblock = spage->sblock;
2104
2105                 if (atomic_dec_and_test(&sblock->outstanding_pages))
2106                         scrub_block_complete(sblock);
2107                 scrub_block_put(sblock);
2108         }
2109
2110         bio_put(sbio->bio);
2111         sbio->bio = NULL;
2112         spin_lock(&sctx->list_lock);
2113         sbio->next_free = sctx->first_free;
2114         sctx->first_free = sbio->index;
2115         spin_unlock(&sctx->list_lock);
2116
2117         if (sctx->is_dev_replace &&
2118             atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2119                 mutex_lock(&sctx->wr_ctx.wr_lock);
2120                 scrub_wr_submit(sctx);
2121                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2122         }
2123
2124         scrub_pending_bio_dec(sctx);
2125 }
2126
2127 static void scrub_block_complete(struct scrub_block *sblock)
2128 {
2129         if (!sblock->no_io_error_seen) {
2130                 scrub_handle_errored_block(sblock);
2131         } else {
2132                 /*
2133                  * if has checksum error, write via repair mechanism in
2134                  * dev replace case, otherwise write here in dev replace
2135                  * case.
2136                  */
2137                 if (!scrub_checksum(sblock) && sblock->sctx->is_dev_replace)
2138                         scrub_write_block_to_dev_replace(sblock);
2139         }
2140 }
2141
2142 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
2143                            u8 *csum)
2144 {
2145         struct btrfs_ordered_sum *sum = NULL;
2146         unsigned long index;
2147         unsigned long num_sectors;
2148
2149         while (!list_empty(&sctx->csum_list)) {
2150                 sum = list_first_entry(&sctx->csum_list,
2151                                        struct btrfs_ordered_sum, list);
2152                 if (sum->bytenr > logical)
2153                         return 0;
2154                 if (sum->bytenr + sum->len > logical)
2155                         break;
2156
2157                 ++sctx->stat.csum_discards;
2158                 list_del(&sum->list);
2159                 kfree(sum);
2160                 sum = NULL;
2161         }
2162         if (!sum)
2163                 return 0;
2164
2165         index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2166         num_sectors = sum->len / sctx->sectorsize;
2167         memcpy(csum, sum->sums + index, sctx->csum_size);
2168         if (index == num_sectors - 1) {
2169                 list_del(&sum->list);
2170                 kfree(sum);
2171         }
2172         return 1;
2173 }
2174
2175 /* scrub extent tries to collect up to 64 kB for each bio */
2176 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2177                         u64 physical, struct btrfs_device *dev, u64 flags,
2178                         u64 gen, int mirror_num, u64 physical_for_dev_replace)
2179 {
2180         int ret;
2181         u8 csum[BTRFS_CSUM_SIZE];
2182         u32 blocksize;
2183
2184         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2185                 blocksize = sctx->sectorsize;
2186                 spin_lock(&sctx->stat_lock);
2187                 sctx->stat.data_extents_scrubbed++;
2188                 sctx->stat.data_bytes_scrubbed += len;
2189                 spin_unlock(&sctx->stat_lock);
2190         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2191                 blocksize = sctx->nodesize;
2192                 spin_lock(&sctx->stat_lock);
2193                 sctx->stat.tree_extents_scrubbed++;
2194                 sctx->stat.tree_bytes_scrubbed += len;
2195                 spin_unlock(&sctx->stat_lock);
2196         } else {
2197                 blocksize = sctx->sectorsize;
2198                 WARN_ON(1);
2199         }
2200
2201         while (len) {
2202                 u64 l = min_t(u64, len, blocksize);
2203                 int have_csum = 0;
2204
2205                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2206                         /* push csums to sbio */
2207                         have_csum = scrub_find_csum(sctx, logical, l, csum);
2208                         if (have_csum == 0)
2209                                 ++sctx->stat.no_csum;
2210                         if (sctx->is_dev_replace && !have_csum) {
2211                                 ret = copy_nocow_pages(sctx, logical, l,
2212                                                        mirror_num,
2213                                                       physical_for_dev_replace);
2214                                 goto behind_scrub_pages;
2215                         }
2216                 }
2217                 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2218                                   mirror_num, have_csum ? csum : NULL, 0,
2219                                   physical_for_dev_replace);
2220 behind_scrub_pages:
2221                 if (ret)
2222                         return ret;
2223                 len -= l;
2224                 logical += l;
2225                 physical += l;
2226                 physical_for_dev_replace += l;
2227         }
2228         return 0;
2229 }
2230
2231 /*
2232  * Given a physical address, this will calculate it's
2233  * logical offset. if this is a parity stripe, it will return
2234  * the most left data stripe's logical offset.
2235  *
2236  * return 0 if it is a data stripe, 1 means parity stripe.
2237  */
2238 static int get_raid56_logic_offset(u64 physical, int num,
2239                                    struct map_lookup *map, u64 *offset)
2240 {
2241         int i;
2242         int j = 0;
2243         u64 stripe_nr;
2244         u64 last_offset;
2245         int stripe_index;
2246         int rot;
2247
2248         last_offset = (physical - map->stripes[num].physical) *
2249                       nr_data_stripes(map);
2250         *offset = last_offset;
2251         for (i = 0; i < nr_data_stripes(map); i++) {
2252                 *offset = last_offset + i * map->stripe_len;
2253
2254                 stripe_nr = *offset;
2255                 do_div(stripe_nr, map->stripe_len);
2256                 do_div(stripe_nr, nr_data_stripes(map));
2257
2258                 /* Work out the disk rotation on this stripe-set */
2259                 rot = do_div(stripe_nr, map->num_stripes);
2260                 /* calculate which stripe this data locates */
2261                 rot += i;
2262                 stripe_index = rot % map->num_stripes;
2263                 if (stripe_index == num)
2264                         return 0;
2265                 if (stripe_index < num)
2266                         j++;
2267         }
2268         *offset = last_offset + j * map->stripe_len;
2269         return 1;
2270 }
2271
2272 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2273                                            struct map_lookup *map,
2274                                            struct btrfs_device *scrub_dev,
2275                                            int num, u64 base, u64 length,
2276                                            int is_dev_replace)
2277 {
2278         struct btrfs_path *path;
2279         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2280         struct btrfs_root *root = fs_info->extent_root;
2281         struct btrfs_root *csum_root = fs_info->csum_root;
2282         struct btrfs_extent_item *extent;
2283         struct blk_plug plug;
2284         u64 flags;
2285         int ret;
2286         int slot;
2287         u64 nstripes;
2288         struct extent_buffer *l;
2289         struct btrfs_key key;
2290         u64 physical;
2291         u64 logical;
2292         u64 logic_end;
2293         u64 physical_end;
2294         u64 generation;
2295         int mirror_num;
2296         struct reada_control *reada1;
2297         struct reada_control *reada2;
2298         struct btrfs_key key_start;
2299         struct btrfs_key key_end;
2300         u64 increment = map->stripe_len;
2301         u64 offset;
2302         u64 extent_logical;
2303         u64 extent_physical;
2304         u64 extent_len;
2305         struct btrfs_device *extent_dev;
2306         int extent_mirror_num;
2307         int stop_loop = 0;
2308
2309         nstripes = length;
2310         physical = map->stripes[num].physical;
2311         offset = 0;
2312         do_div(nstripes, map->stripe_len);
2313         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2314                 offset = map->stripe_len * num;
2315                 increment = map->stripe_len * map->num_stripes;
2316                 mirror_num = 1;
2317         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2318                 int factor = map->num_stripes / map->sub_stripes;
2319                 offset = map->stripe_len * (num / map->sub_stripes);
2320                 increment = map->stripe_len * factor;
2321                 mirror_num = num % map->sub_stripes + 1;
2322         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2323                 increment = map->stripe_len;
2324                 mirror_num = num % map->num_stripes + 1;
2325         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2326                 increment = map->stripe_len;
2327                 mirror_num = num % map->num_stripes + 1;
2328         } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2329                                 BTRFS_BLOCK_GROUP_RAID6)) {
2330                 get_raid56_logic_offset(physical, num, map, &offset);
2331                 increment = map->stripe_len * nr_data_stripes(map);
2332                 mirror_num = 1;
2333         } else {
2334                 increment = map->stripe_len;
2335                 mirror_num = 1;
2336         }
2337
2338         path = btrfs_alloc_path();
2339         if (!path)
2340                 return -ENOMEM;
2341
2342         /*
2343          * work on commit root. The related disk blocks are static as
2344          * long as COW is applied. This means, it is save to rewrite
2345          * them to repair disk errors without any race conditions
2346          */
2347         path->search_commit_root = 1;
2348         path->skip_locking = 1;
2349
2350         /*
2351          * trigger the readahead for extent tree csum tree and wait for
2352          * completion. During readahead, the scrub is officially paused
2353          * to not hold off transaction commits
2354          */
2355         logical = base + offset;
2356         physical_end = physical + nstripes * map->stripe_len;
2357         if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2358                          BTRFS_BLOCK_GROUP_RAID6)) {
2359                 get_raid56_logic_offset(physical_end, num,
2360                                         map, &logic_end);
2361                 logic_end += base;
2362         } else {
2363                 logic_end = logical + increment * nstripes;
2364         }
2365         wait_event(sctx->list_wait,
2366                    atomic_read(&sctx->bios_in_flight) == 0);
2367         scrub_blocked_if_needed(fs_info);
2368
2369         /* FIXME it might be better to start readahead at commit root */
2370         key_start.objectid = logical;
2371         key_start.type = BTRFS_EXTENT_ITEM_KEY;
2372         key_start.offset = (u64)0;
2373         key_end.objectid = logic_end;
2374         key_end.type = BTRFS_METADATA_ITEM_KEY;
2375         key_end.offset = (u64)-1;
2376         reada1 = btrfs_reada_add(root, &key_start, &key_end);
2377
2378         key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
2379         key_start.type = BTRFS_EXTENT_CSUM_KEY;
2380         key_start.offset = logical;
2381         key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
2382         key_end.type = BTRFS_EXTENT_CSUM_KEY;
2383         key_end.offset = logic_end;
2384         reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
2385
2386         if (!IS_ERR(reada1))
2387                 btrfs_reada_wait(reada1);
2388         if (!IS_ERR(reada2))
2389                 btrfs_reada_wait(reada2);
2390
2391
2392         /*
2393          * collect all data csums for the stripe to avoid seeking during
2394          * the scrub. This might currently (crc32) end up to be about 1MB
2395          */
2396         blk_start_plug(&plug);
2397
2398         /*
2399          * now find all extents for each stripe and scrub them
2400          */
2401         ret = 0;
2402         while (physical < physical_end) {
2403                 /* for raid56, we skip parity stripe */
2404                 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2405                                 BTRFS_BLOCK_GROUP_RAID6)) {
2406                         ret = get_raid56_logic_offset(physical, num,
2407                                         map, &logical);
2408                         logical += base;
2409                         if (ret)
2410                                 goto skip;
2411                 }
2412                 /*
2413                  * canceled?
2414                  */
2415                 if (atomic_read(&fs_info->scrub_cancel_req) ||
2416                     atomic_read(&sctx->cancel_req)) {
2417                         ret = -ECANCELED;
2418                         goto out;
2419                 }
2420                 /*
2421                  * check to see if we have to pause
2422                  */
2423                 if (atomic_read(&fs_info->scrub_pause_req)) {
2424                         /* push queued extents */
2425                         atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
2426                         scrub_submit(sctx);
2427                         mutex_lock(&sctx->wr_ctx.wr_lock);
2428                         scrub_wr_submit(sctx);
2429                         mutex_unlock(&sctx->wr_ctx.wr_lock);
2430                         wait_event(sctx->list_wait,
2431                                    atomic_read(&sctx->bios_in_flight) == 0);
2432                         atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
2433                         scrub_blocked_if_needed(fs_info);
2434                 }
2435
2436                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2437                         key.type = BTRFS_METADATA_ITEM_KEY;
2438                 else
2439                         key.type = BTRFS_EXTENT_ITEM_KEY;
2440                 key.objectid = logical;
2441                 key.offset = (u64)-1;
2442
2443                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2444                 if (ret < 0)
2445                         goto out;
2446
2447                 if (ret > 0) {
2448                         ret = btrfs_previous_extent_item(root, path, 0);
2449                         if (ret < 0)
2450                                 goto out;
2451                         if (ret > 0) {
2452                                 /* there's no smaller item, so stick with the
2453                                  * larger one */
2454                                 btrfs_release_path(path);
2455                                 ret = btrfs_search_slot(NULL, root, &key,
2456                                                         path, 0, 0);
2457                                 if (ret < 0)
2458                                         goto out;
2459                         }
2460                 }
2461
2462                 stop_loop = 0;
2463                 while (1) {
2464                         u64 bytes;
2465
2466                         l = path->nodes[0];
2467                         slot = path->slots[0];
2468                         if (slot >= btrfs_header_nritems(l)) {
2469                                 ret = btrfs_next_leaf(root, path);
2470                                 if (ret == 0)
2471                                         continue;
2472                                 if (ret < 0)
2473                                         goto out;
2474
2475                                 stop_loop = 1;
2476                                 break;
2477                         }
2478                         btrfs_item_key_to_cpu(l, &key, slot);
2479
2480                         if (key.type == BTRFS_METADATA_ITEM_KEY)
2481                                 bytes = root->nodesize;
2482                         else
2483                                 bytes = key.offset;
2484
2485                         if (key.objectid + bytes <= logical)
2486                                 goto next;
2487
2488                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2489                             key.type != BTRFS_METADATA_ITEM_KEY)
2490                                 goto next;
2491
2492                         if (key.objectid >= logical + map->stripe_len) {
2493                                 /* out of this device extent */
2494                                 if (key.objectid >= logic_end)
2495                                         stop_loop = 1;
2496                                 break;
2497                         }
2498
2499                         extent = btrfs_item_ptr(l, slot,
2500                                                 struct btrfs_extent_item);
2501                         flags = btrfs_extent_flags(l, extent);
2502                         generation = btrfs_extent_generation(l, extent);
2503
2504                         if (key.objectid < logical &&
2505                             (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
2506                                 btrfs_err(fs_info,
2507                                            "scrub: tree block %llu spanning "
2508                                            "stripes, ignored. logical=%llu",
2509                                        key.objectid, logical);
2510                                 goto next;
2511                         }
2512
2513 again:
2514                         extent_logical = key.objectid;
2515                         extent_len = bytes;
2516
2517                         /*
2518                          * trim extent to this stripe
2519                          */
2520                         if (extent_logical < logical) {
2521                                 extent_len -= logical - extent_logical;
2522                                 extent_logical = logical;
2523                         }
2524                         if (extent_logical + extent_len >
2525                             logical + map->stripe_len) {
2526                                 extent_len = logical + map->stripe_len -
2527                                              extent_logical;
2528                         }
2529
2530                         extent_physical = extent_logical - logical + physical;
2531                         extent_dev = scrub_dev;
2532                         extent_mirror_num = mirror_num;
2533                         if (is_dev_replace)
2534                                 scrub_remap_extent(fs_info, extent_logical,
2535                                                    extent_len, &extent_physical,
2536                                                    &extent_dev,
2537                                                    &extent_mirror_num);
2538
2539                         ret = btrfs_lookup_csums_range(csum_root, logical,
2540                                                 logical + map->stripe_len - 1,
2541                                                 &sctx->csum_list, 1);
2542                         if (ret)
2543                                 goto out;
2544
2545                         ret = scrub_extent(sctx, extent_logical, extent_len,
2546                                            extent_physical, extent_dev, flags,
2547                                            generation, extent_mirror_num,
2548                                            extent_logical - logical + physical);
2549                         if (ret)
2550                                 goto out;
2551
2552                         scrub_free_csums(sctx);
2553                         if (extent_logical + extent_len <
2554                             key.objectid + bytes) {
2555                                 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2556                                         BTRFS_BLOCK_GROUP_RAID6)) {
2557                                         /*
2558                                          * loop until we find next data stripe
2559                                          * or we have finished all stripes.
2560                                          */
2561                                         do {
2562                                                 physical += map->stripe_len;
2563                                                 ret = get_raid56_logic_offset(
2564                                                                 physical, num,
2565                                                                 map, &logical);
2566                                                 logical += base;
2567                                         } while (physical < physical_end && ret);
2568                                 } else {
2569                                         physical += map->stripe_len;
2570                                         logical += increment;
2571                                 }
2572                                 if (logical < key.objectid + bytes) {
2573                                         cond_resched();
2574                                         goto again;
2575                                 }
2576
2577                                 if (physical >= physical_end) {
2578                                         stop_loop = 1;
2579                                         break;
2580                                 }
2581                         }
2582 next:
2583                         path->slots[0]++;
2584                 }
2585                 btrfs_release_path(path);
2586 skip:
2587                 logical += increment;
2588                 physical += map->stripe_len;
2589                 spin_lock(&sctx->stat_lock);
2590                 if (stop_loop)
2591                         sctx->stat.last_physical = map->stripes[num].physical +
2592                                                    length;
2593                 else
2594                         sctx->stat.last_physical = physical;
2595                 spin_unlock(&sctx->stat_lock);
2596                 if (stop_loop)
2597                         break;
2598         }
2599 out:
2600         /* push queued extents */
2601         scrub_submit(sctx);
2602         mutex_lock(&sctx->wr_ctx.wr_lock);
2603         scrub_wr_submit(sctx);
2604         mutex_unlock(&sctx->wr_ctx.wr_lock);
2605
2606         blk_finish_plug(&plug);
2607         btrfs_free_path(path);
2608         return ret < 0 ? ret : 0;
2609 }
2610
2611 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2612                                           struct btrfs_device *scrub_dev,
2613                                           u64 chunk_tree, u64 chunk_objectid,
2614                                           u64 chunk_offset, u64 length,
2615                                           u64 dev_offset, int is_dev_replace)
2616 {
2617         struct btrfs_mapping_tree *map_tree =
2618                 &sctx->dev_root->fs_info->mapping_tree;
2619         struct map_lookup *map;
2620         struct extent_map *em;
2621         int i;
2622         int ret = 0;
2623
2624         read_lock(&map_tree->map_tree.lock);
2625         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2626         read_unlock(&map_tree->map_tree.lock);
2627
2628         if (!em)
2629                 return -EINVAL;
2630
2631         map = (struct map_lookup *)em->bdev;
2632         if (em->start != chunk_offset)
2633                 goto out;
2634
2635         if (em->len < length)
2636                 goto out;
2637
2638         for (i = 0; i < map->num_stripes; ++i) {
2639                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2640                     map->stripes[i].physical == dev_offset) {
2641                         ret = scrub_stripe(sctx, map, scrub_dev, i,
2642                                            chunk_offset, length,
2643                                            is_dev_replace);
2644                         if (ret)
2645                                 goto out;
2646                 }
2647         }
2648 out:
2649         free_extent_map(em);
2650
2651         return ret;
2652 }
2653
2654 static noinline_for_stack
2655 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2656                            struct btrfs_device *scrub_dev, u64 start, u64 end,
2657                            int is_dev_replace)
2658 {
2659         struct btrfs_dev_extent *dev_extent = NULL;
2660         struct btrfs_path *path;
2661         struct btrfs_root *root = sctx->dev_root;
2662         struct btrfs_fs_info *fs_info = root->fs_info;
2663         u64 length;
2664         u64 chunk_tree;
2665         u64 chunk_objectid;
2666         u64 chunk_offset;
2667         int ret;
2668         int slot;
2669         struct extent_buffer *l;
2670         struct btrfs_key key;
2671         struct btrfs_key found_key;
2672         struct btrfs_block_group_cache *cache;
2673         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2674
2675         path = btrfs_alloc_path();
2676         if (!path)
2677                 return -ENOMEM;
2678
2679         path->reada = 2;
2680         path->search_commit_root = 1;
2681         path->skip_locking = 1;
2682
2683         key.objectid = scrub_dev->devid;
2684         key.offset = 0ull;
2685         key.type = BTRFS_DEV_EXTENT_KEY;
2686
2687         while (1) {
2688                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2689                 if (ret < 0)
2690                         break;
2691                 if (ret > 0) {
2692                         if (path->slots[0] >=
2693                             btrfs_header_nritems(path->nodes[0])) {
2694                                 ret = btrfs_next_leaf(root, path);
2695                                 if (ret)
2696                                         break;
2697                         }
2698                 }
2699
2700                 l = path->nodes[0];
2701                 slot = path->slots[0];
2702
2703                 btrfs_item_key_to_cpu(l, &found_key, slot);
2704
2705                 if (found_key.objectid != scrub_dev->devid)
2706                         break;
2707
2708                 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
2709                         break;
2710
2711                 if (found_key.offset >= end)
2712                         break;
2713
2714                 if (found_key.offset < key.offset)
2715                         break;
2716
2717                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2718                 length = btrfs_dev_extent_length(l, dev_extent);
2719
2720                 if (found_key.offset + length <= start)
2721                         goto skip;
2722
2723                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2724                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2725                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2726
2727                 /*
2728                  * get a reference on the corresponding block group to prevent
2729                  * the chunk from going away while we scrub it
2730                  */
2731                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2732
2733                 /* some chunks are removed but not committed to disk yet,
2734                  * continue scrubbing */
2735                 if (!cache)
2736                         goto skip;
2737
2738                 dev_replace->cursor_right = found_key.offset + length;
2739                 dev_replace->cursor_left = found_key.offset;
2740                 dev_replace->item_needs_writeback = 1;
2741                 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
2742                                   chunk_offset, length, found_key.offset,
2743                                   is_dev_replace);
2744
2745                 /*
2746                  * flush, submit all pending read and write bios, afterwards
2747                  * wait for them.
2748                  * Note that in the dev replace case, a read request causes
2749                  * write requests that are submitted in the read completion
2750                  * worker. Therefore in the current situation, it is required
2751                  * that all write requests are flushed, so that all read and
2752                  * write requests are really completed when bios_in_flight
2753                  * changes to 0.
2754                  */
2755                 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
2756                 scrub_submit(sctx);
2757                 mutex_lock(&sctx->wr_ctx.wr_lock);
2758                 scrub_wr_submit(sctx);
2759                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2760
2761                 wait_event(sctx->list_wait,
2762                            atomic_read(&sctx->bios_in_flight) == 0);
2763                 atomic_inc(&fs_info->scrubs_paused);
2764                 wake_up(&fs_info->scrub_pause_wait);
2765
2766                 /*
2767                  * must be called before we decrease @scrub_paused.
2768                  * make sure we don't block transaction commit while
2769                  * we are waiting pending workers finished.
2770                  */
2771                 wait_event(sctx->list_wait,
2772                            atomic_read(&sctx->workers_pending) == 0);
2773                 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
2774
2775                 mutex_lock(&fs_info->scrub_lock);
2776                 __scrub_blocked_if_needed(fs_info);
2777                 atomic_dec(&fs_info->scrubs_paused);
2778                 mutex_unlock(&fs_info->scrub_lock);
2779                 wake_up(&fs_info->scrub_pause_wait);
2780
2781                 btrfs_put_block_group(cache);
2782                 if (ret)
2783                         break;
2784                 if (is_dev_replace &&
2785                     atomic64_read(&dev_replace->num_write_errors) > 0) {
2786                         ret = -EIO;
2787                         break;
2788                 }
2789                 if (sctx->stat.malloc_errors > 0) {
2790                         ret = -ENOMEM;
2791                         break;
2792                 }
2793
2794                 dev_replace->cursor_left = dev_replace->cursor_right;
2795                 dev_replace->item_needs_writeback = 1;
2796 skip:
2797                 key.offset = found_key.offset + length;
2798                 btrfs_release_path(path);
2799         }
2800
2801         btrfs_free_path(path);
2802
2803         /*
2804          * ret can still be 1 from search_slot or next_leaf,
2805          * that's not an error
2806          */
2807         return ret < 0 ? ret : 0;
2808 }
2809
2810 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2811                                            struct btrfs_device *scrub_dev)
2812 {
2813         int     i;
2814         u64     bytenr;
2815         u64     gen;
2816         int     ret;
2817         struct btrfs_root *root = sctx->dev_root;
2818
2819         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
2820                 return -EIO;
2821
2822         /* Seed devices of a new filesystem has their own generation. */
2823         if (scrub_dev->fs_devices != root->fs_info->fs_devices)
2824                 gen = scrub_dev->generation;
2825         else
2826                 gen = root->fs_info->last_trans_committed;
2827
2828         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2829                 bytenr = btrfs_sb_offset(i);
2830                 if (bytenr + BTRFS_SUPER_INFO_SIZE >
2831                     scrub_dev->commit_total_bytes)
2832                         break;
2833
2834                 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
2835                                   scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
2836                                   NULL, 1, bytenr);
2837                 if (ret)
2838                         return ret;
2839         }
2840         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
2841
2842         return 0;
2843 }
2844
2845 /*
2846  * get a reference count on fs_info->scrub_workers. start worker if necessary
2847  */
2848 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
2849                                                 int is_dev_replace)
2850 {
2851         int ret = 0;
2852         int flags = WQ_FREEZABLE | WQ_UNBOUND;
2853         int max_active = fs_info->thread_pool_size;
2854
2855         if (fs_info->scrub_workers_refcnt == 0) {
2856                 if (is_dev_replace)
2857                         fs_info->scrub_workers =
2858                                 btrfs_alloc_workqueue("btrfs-scrub", flags,
2859                                                       1, 4);
2860                 else
2861                         fs_info->scrub_workers =
2862                                 btrfs_alloc_workqueue("btrfs-scrub", flags,
2863                                                       max_active, 4);
2864                 if (!fs_info->scrub_workers) {
2865                         ret = -ENOMEM;
2866                         goto out;
2867                 }
2868                 fs_info->scrub_wr_completion_workers =
2869                         btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
2870                                               max_active, 2);
2871                 if (!fs_info->scrub_wr_completion_workers) {
2872                         ret = -ENOMEM;
2873                         goto out;
2874                 }
2875                 fs_info->scrub_nocow_workers =
2876                         btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
2877                 if (!fs_info->scrub_nocow_workers) {
2878                         ret = -ENOMEM;
2879                         goto out;
2880                 }
2881         }
2882         ++fs_info->scrub_workers_refcnt;
2883 out:
2884         return ret;
2885 }
2886
2887 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
2888 {
2889         if (--fs_info->scrub_workers_refcnt == 0) {
2890                 btrfs_destroy_workqueue(fs_info->scrub_workers);
2891                 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
2892                 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
2893         }
2894         WARN_ON(fs_info->scrub_workers_refcnt < 0);
2895 }
2896
2897 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2898                     u64 end, struct btrfs_scrub_progress *progress,
2899                     int readonly, int is_dev_replace)
2900 {
2901         struct scrub_ctx *sctx;
2902         int ret;
2903         struct btrfs_device *dev;
2904         struct rcu_string *name;
2905
2906         if (btrfs_fs_closing(fs_info))
2907                 return -EINVAL;
2908
2909         if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
2910                 /*
2911                  * in this case scrub is unable to calculate the checksum
2912                  * the way scrub is implemented. Do not handle this
2913                  * situation at all because it won't ever happen.
2914                  */
2915                 btrfs_err(fs_info,
2916                            "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
2917                        fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
2918                 return -EINVAL;
2919         }
2920
2921         if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
2922                 /* not supported for data w/o checksums */
2923                 btrfs_err(fs_info,
2924                            "scrub: size assumption sectorsize != PAGE_SIZE "
2925                            "(%d != %lu) fails",
2926                        fs_info->chunk_root->sectorsize, PAGE_SIZE);
2927                 return -EINVAL;
2928         }
2929
2930         if (fs_info->chunk_root->nodesize >
2931             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
2932             fs_info->chunk_root->sectorsize >
2933             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
2934                 /*
2935                  * would exhaust the array bounds of pagev member in
2936                  * struct scrub_block
2937                  */
2938                 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
2939                            "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
2940                        fs_info->chunk_root->nodesize,
2941                        SCRUB_MAX_PAGES_PER_BLOCK,
2942                        fs_info->chunk_root->sectorsize,
2943                        SCRUB_MAX_PAGES_PER_BLOCK);
2944                 return -EINVAL;
2945         }
2946
2947
2948         mutex_lock(&fs_info->fs_devices->device_list_mutex);
2949         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
2950         if (!dev || (dev->missing && !is_dev_replace)) {
2951                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2952                 return -ENODEV;
2953         }
2954
2955         if (!is_dev_replace && !readonly && !dev->writeable) {
2956                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2957                 rcu_read_lock();
2958                 name = rcu_dereference(dev->name);
2959                 btrfs_err(fs_info, "scrub: device %s is not writable",
2960                           name->str);
2961                 rcu_read_unlock();
2962                 return -EROFS;
2963         }
2964
2965         mutex_lock(&fs_info->scrub_lock);
2966         if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
2967                 mutex_unlock(&fs_info->scrub_lock);
2968                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2969                 return -EIO;
2970         }
2971
2972         btrfs_dev_replace_lock(&fs_info->dev_replace);
2973         if (dev->scrub_device ||
2974             (!is_dev_replace &&
2975              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2976                 btrfs_dev_replace_unlock(&fs_info->dev_replace);
2977                 mutex_unlock(&fs_info->scrub_lock);
2978                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2979                 return -EINPROGRESS;
2980         }
2981         btrfs_dev_replace_unlock(&fs_info->dev_replace);
2982
2983         ret = scrub_workers_get(fs_info, is_dev_replace);
2984         if (ret) {
2985                 mutex_unlock(&fs_info->scrub_lock);
2986                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2987                 return ret;
2988         }
2989
2990         sctx = scrub_setup_ctx(dev, is_dev_replace);
2991         if (IS_ERR(sctx)) {
2992                 mutex_unlock(&fs_info->scrub_lock);
2993                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2994                 scrub_workers_put(fs_info);
2995                 return PTR_ERR(sctx);
2996         }
2997         sctx->readonly = readonly;
2998         dev->scrub_device = sctx;
2999         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3000
3001         /*
3002          * checking @scrub_pause_req here, we can avoid
3003          * race between committing transaction and scrubbing.
3004          */
3005         __scrub_blocked_if_needed(fs_info);
3006         atomic_inc(&fs_info->scrubs_running);
3007         mutex_unlock(&fs_info->scrub_lock);
3008
3009         if (!is_dev_replace) {
3010                 /*
3011                  * by holding device list mutex, we can
3012                  * kick off writing super in log tree sync.
3013                  */
3014                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3015                 ret = scrub_supers(sctx, dev);
3016                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3017         }
3018
3019         if (!ret)
3020                 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3021                                              is_dev_replace);
3022
3023         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3024         atomic_dec(&fs_info->scrubs_running);
3025         wake_up(&fs_info->scrub_pause_wait);
3026
3027         wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3028
3029         if (progress)
3030                 memcpy(progress, &sctx->stat, sizeof(*progress));
3031
3032         mutex_lock(&fs_info->scrub_lock);
3033         dev->scrub_device = NULL;
3034         scrub_workers_put(fs_info);
3035         mutex_unlock(&fs_info->scrub_lock);
3036
3037         scrub_free_ctx(sctx);
3038
3039         return ret;
3040 }
3041
3042 void btrfs_scrub_pause(struct btrfs_root *root)
3043 {
3044         struct btrfs_fs_info *fs_info = root->fs_info;
3045
3046         mutex_lock(&fs_info->scrub_lock);
3047         atomic_inc(&fs_info->scrub_pause_req);
3048         while (atomic_read(&fs_info->scrubs_paused) !=
3049                atomic_read(&fs_info->scrubs_running)) {
3050                 mutex_unlock(&fs_info->scrub_lock);
3051                 wait_event(fs_info->scrub_pause_wait,
3052                            atomic_read(&fs_info->scrubs_paused) ==
3053                            atomic_read(&fs_info->scrubs_running));
3054                 mutex_lock(&fs_info->scrub_lock);
3055         }
3056         mutex_unlock(&fs_info->scrub_lock);
3057 }
3058
3059 void btrfs_scrub_continue(struct btrfs_root *root)
3060 {
3061         struct btrfs_fs_info *fs_info = root->fs_info;
3062
3063         atomic_dec(&fs_info->scrub_pause_req);
3064         wake_up(&fs_info->scrub_pause_wait);
3065 }
3066
3067 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3068 {
3069         mutex_lock(&fs_info->scrub_lock);
3070         if (!atomic_read(&fs_info->scrubs_running)) {
3071                 mutex_unlock(&fs_info->scrub_lock);
3072                 return -ENOTCONN;
3073         }
3074
3075         atomic_inc(&fs_info->scrub_cancel_req);
3076         while (atomic_read(&fs_info->scrubs_running)) {
3077                 mutex_unlock(&fs_info->scrub_lock);
3078                 wait_event(fs_info->scrub_pause_wait,
3079                            atomic_read(&fs_info->scrubs_running) == 0);
3080                 mutex_lock(&fs_info->scrub_lock);
3081         }
3082         atomic_dec(&fs_info->scrub_cancel_req);
3083         mutex_unlock(&fs_info->scrub_lock);
3084
3085         return 0;
3086 }
3087
3088 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3089                            struct btrfs_device *dev)
3090 {
3091         struct scrub_ctx *sctx;
3092
3093         mutex_lock(&fs_info->scrub_lock);
3094         sctx = dev->scrub_device;
3095         if (!sctx) {
3096                 mutex_unlock(&fs_info->scrub_lock);
3097                 return -ENOTCONN;
3098         }
3099         atomic_inc(&sctx->cancel_req);
3100         while (dev->scrub_device) {
3101                 mutex_unlock(&fs_info->scrub_lock);
3102                 wait_event(fs_info->scrub_pause_wait,
3103                            dev->scrub_device == NULL);
3104                 mutex_lock(&fs_info->scrub_lock);
3105         }
3106         mutex_unlock(&fs_info->scrub_lock);
3107
3108         return 0;
3109 }
3110
3111 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
3112                          struct btrfs_scrub_progress *progress)
3113 {
3114         struct btrfs_device *dev;
3115         struct scrub_ctx *sctx = NULL;
3116
3117         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3118         dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
3119         if (dev)
3120                 sctx = dev->scrub_device;
3121         if (sctx)
3122                 memcpy(progress, &sctx->stat, sizeof(*progress));
3123         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3124
3125         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3126 }
3127
3128 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
3129                                u64 extent_logical, u64 extent_len,
3130                                u64 *extent_physical,
3131                                struct btrfs_device **extent_dev,
3132                                int *extent_mirror_num)
3133 {
3134         u64 mapped_length;
3135         struct btrfs_bio *bbio = NULL;
3136         int ret;
3137
3138         mapped_length = extent_len;
3139         ret = btrfs_map_block(fs_info, READ, extent_logical,
3140                               &mapped_length, &bbio, 0);
3141         if (ret || !bbio || mapped_length < extent_len ||
3142             !bbio->stripes[0].dev->bdev) {
3143                 kfree(bbio);
3144                 return;
3145         }
3146
3147         *extent_physical = bbio->stripes[0].physical;
3148         *extent_mirror_num = bbio->mirror_num;
3149         *extent_dev = bbio->stripes[0].dev;
3150         kfree(bbio);
3151 }
3152
3153 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
3154                               struct scrub_wr_ctx *wr_ctx,
3155                               struct btrfs_fs_info *fs_info,
3156                               struct btrfs_device *dev,
3157                               int is_dev_replace)
3158 {
3159         WARN_ON(wr_ctx->wr_curr_bio != NULL);
3160
3161         mutex_init(&wr_ctx->wr_lock);
3162         wr_ctx->wr_curr_bio = NULL;
3163         if (!is_dev_replace)
3164                 return 0;
3165
3166         WARN_ON(!dev->bdev);
3167         wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
3168                                          bio_get_nr_vecs(dev->bdev));
3169         wr_ctx->tgtdev = dev;
3170         atomic_set(&wr_ctx->flush_all_writes, 0);
3171         return 0;
3172 }
3173
3174 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
3175 {
3176         mutex_lock(&wr_ctx->wr_lock);
3177         kfree(wr_ctx->wr_curr_bio);
3178         wr_ctx->wr_curr_bio = NULL;
3179         mutex_unlock(&wr_ctx->wr_lock);
3180 }
3181
3182 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
3183                             int mirror_num, u64 physical_for_dev_replace)
3184 {
3185         struct scrub_copy_nocow_ctx *nocow_ctx;
3186         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
3187
3188         nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
3189         if (!nocow_ctx) {
3190                 spin_lock(&sctx->stat_lock);
3191                 sctx->stat.malloc_errors++;
3192                 spin_unlock(&sctx->stat_lock);
3193                 return -ENOMEM;
3194         }
3195
3196         scrub_pending_trans_workers_inc(sctx);
3197
3198         nocow_ctx->sctx = sctx;
3199         nocow_ctx->logical = logical;
3200         nocow_ctx->len = len;
3201         nocow_ctx->mirror_num = mirror_num;
3202         nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
3203         btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
3204                         copy_nocow_pages_worker, NULL, NULL);
3205         INIT_LIST_HEAD(&nocow_ctx->inodes);
3206         btrfs_queue_work(fs_info->scrub_nocow_workers,
3207                          &nocow_ctx->work);
3208
3209         return 0;
3210 }
3211
3212 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
3213 {
3214         struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
3215         struct scrub_nocow_inode *nocow_inode;
3216
3217         nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
3218         if (!nocow_inode)
3219                 return -ENOMEM;
3220         nocow_inode->inum = inum;
3221         nocow_inode->offset = offset;
3222         nocow_inode->root = root;
3223         list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
3224         return 0;
3225 }
3226
3227 #define COPY_COMPLETE 1
3228
3229 static void copy_nocow_pages_worker(struct btrfs_work *work)
3230 {
3231         struct scrub_copy_nocow_ctx *nocow_ctx =
3232                 container_of(work, struct scrub_copy_nocow_ctx, work);
3233         struct scrub_ctx *sctx = nocow_ctx->sctx;
3234         u64 logical = nocow_ctx->logical;
3235         u64 len = nocow_ctx->len;
3236         int mirror_num = nocow_ctx->mirror_num;
3237         u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3238         int ret;
3239         struct btrfs_trans_handle *trans = NULL;
3240         struct btrfs_fs_info *fs_info;
3241         struct btrfs_path *path;
3242         struct btrfs_root *root;
3243         int not_written = 0;
3244
3245         fs_info = sctx->dev_root->fs_info;
3246         root = fs_info->extent_root;
3247
3248         path = btrfs_alloc_path();
3249         if (!path) {
3250                 spin_lock(&sctx->stat_lock);
3251                 sctx->stat.malloc_errors++;
3252                 spin_unlock(&sctx->stat_lock);
3253                 not_written = 1;
3254                 goto out;
3255         }
3256
3257         trans = btrfs_join_transaction(root);
3258         if (IS_ERR(trans)) {
3259                 not_written = 1;
3260                 goto out;
3261         }
3262
3263         ret = iterate_inodes_from_logical(logical, fs_info, path,
3264                                           record_inode_for_nocow, nocow_ctx);
3265         if (ret != 0 && ret != -ENOENT) {
3266                 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
3267                         "phys %llu, len %llu, mir %u, ret %d",
3268                         logical, physical_for_dev_replace, len, mirror_num,
3269                         ret);
3270                 not_written = 1;
3271                 goto out;
3272         }
3273
3274         btrfs_end_transaction(trans, root);
3275         trans = NULL;
3276         while (!list_empty(&nocow_ctx->inodes)) {
3277                 struct scrub_nocow_inode *entry;
3278                 entry = list_first_entry(&nocow_ctx->inodes,
3279                                          struct scrub_nocow_inode,
3280                                          list);
3281                 list_del_init(&entry->list);
3282                 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
3283                                                  entry->root, nocow_ctx);
3284                 kfree(entry);
3285                 if (ret == COPY_COMPLETE) {
3286                         ret = 0;
3287                         break;
3288                 } else if (ret) {
3289                         break;
3290                 }
3291         }
3292 out:
3293         while (!list_empty(&nocow_ctx->inodes)) {
3294                 struct scrub_nocow_inode *entry;
3295                 entry = list_first_entry(&nocow_ctx->inodes,
3296                                          struct scrub_nocow_inode,
3297                                          list);
3298                 list_del_init(&entry->list);
3299                 kfree(entry);
3300         }
3301         if (trans && !IS_ERR(trans))
3302                 btrfs_end_transaction(trans, root);
3303         if (not_written)
3304                 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
3305                                             num_uncorrectable_read_errors);
3306
3307         btrfs_free_path(path);
3308         kfree(nocow_ctx);
3309
3310         scrub_pending_trans_workers_dec(sctx);
3311 }
3312
3313 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
3314                                       struct scrub_copy_nocow_ctx *nocow_ctx)
3315 {
3316         struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
3317         struct btrfs_key key;
3318         struct inode *inode;
3319         struct page *page;
3320         struct btrfs_root *local_root;
3321         struct btrfs_ordered_extent *ordered;
3322         struct extent_map *em;
3323         struct extent_state *cached_state = NULL;
3324         struct extent_io_tree *io_tree;
3325         u64 physical_for_dev_replace;
3326         u64 len = nocow_ctx->len;
3327         u64 lockstart = offset, lockend = offset + len - 1;
3328         unsigned long index;
3329         int srcu_index;
3330         int ret = 0;
3331         int err = 0;
3332
3333         key.objectid = root;
3334         key.type = BTRFS_ROOT_ITEM_KEY;
3335         key.offset = (u64)-1;
3336
3337         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
3338
3339         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
3340         if (IS_ERR(local_root)) {
3341                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
3342                 return PTR_ERR(local_root);
3343         }
3344
3345         key.type = BTRFS_INODE_ITEM_KEY;
3346         key.objectid = inum;
3347         key.offset = 0;
3348         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
3349         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
3350         if (IS_ERR(inode))
3351                 return PTR_ERR(inode);
3352
3353         /* Avoid truncate/dio/punch hole.. */
3354         mutex_lock(&inode->i_mutex);
3355         inode_dio_wait(inode);
3356
3357         physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3358         io_tree = &BTRFS_I(inode)->io_tree;
3359
3360         lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
3361         ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
3362         if (ordered) {
3363                 btrfs_put_ordered_extent(ordered);
3364                 goto out_unlock;
3365         }
3366
3367         em = btrfs_get_extent(inode, NULL, 0, lockstart, len, 0);
3368         if (IS_ERR(em)) {
3369                 ret = PTR_ERR(em);
3370                 goto out_unlock;
3371         }
3372
3373         /*
3374          * This extent does not actually cover the logical extent anymore,
3375          * move on to the next inode.
3376          */
3377         if (em->block_start > nocow_ctx->logical ||
3378             em->block_start + em->block_len < nocow_ctx->logical + len) {
3379                 free_extent_map(em);
3380                 goto out_unlock;
3381         }
3382         free_extent_map(em);
3383
3384         while (len >= PAGE_CACHE_SIZE) {
3385                 index = offset >> PAGE_CACHE_SHIFT;
3386 again:
3387                 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
3388                 if (!page) {
3389                         btrfs_err(fs_info, "find_or_create_page() failed");
3390                         ret = -ENOMEM;
3391                         goto out;
3392                 }
3393
3394                 if (PageUptodate(page)) {
3395                         if (PageDirty(page))
3396                                 goto next_page;
3397                 } else {
3398                         ClearPageError(page);
3399                         err = extent_read_full_page_nolock(io_tree, page,
3400                                                            btrfs_get_extent,
3401                                                            nocow_ctx->mirror_num);
3402                         if (err) {
3403                                 ret = err;
3404                                 goto next_page;
3405                         }
3406
3407                         lock_page(page);
3408                         /*
3409                          * If the page has been remove from the page cache,
3410                          * the data on it is meaningless, because it may be
3411                          * old one, the new data may be written into the new
3412                          * page in the page cache.
3413                          */
3414                         if (page->mapping != inode->i_mapping) {
3415                                 unlock_page(page);
3416                                 page_cache_release(page);
3417                                 goto again;
3418                         }
3419                         if (!PageUptodate(page)) {
3420                                 ret = -EIO;
3421                                 goto next_page;
3422                         }
3423                 }
3424                 err = write_page_nocow(nocow_ctx->sctx,
3425                                        physical_for_dev_replace, page);
3426                 if (err)
3427                         ret = err;
3428 next_page:
3429                 unlock_page(page);
3430                 page_cache_release(page);
3431
3432                 if (ret)
3433                         break;
3434
3435                 offset += PAGE_CACHE_SIZE;
3436                 physical_for_dev_replace += PAGE_CACHE_SIZE;
3437                 len -= PAGE_CACHE_SIZE;
3438         }
3439         ret = COPY_COMPLETE;
3440 out_unlock:
3441         unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
3442                              GFP_NOFS);
3443 out:
3444         mutex_unlock(&inode->i_mutex);
3445         iput(inode);
3446         return ret;
3447 }
3448
3449 static int write_page_nocow(struct scrub_ctx *sctx,
3450                             u64 physical_for_dev_replace, struct page *page)
3451 {
3452         struct bio *bio;
3453         struct btrfs_device *dev;
3454         int ret;
3455
3456         dev = sctx->wr_ctx.tgtdev;
3457         if (!dev)
3458                 return -EIO;
3459         if (!dev->bdev) {
3460                 printk_ratelimited(KERN_WARNING
3461                         "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
3462                 return -EIO;
3463         }
3464         bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
3465         if (!bio) {
3466                 spin_lock(&sctx->stat_lock);
3467                 sctx->stat.malloc_errors++;
3468                 spin_unlock(&sctx->stat_lock);
3469                 return -ENOMEM;
3470         }
3471         bio->bi_iter.bi_size = 0;
3472         bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
3473         bio->bi_bdev = dev->bdev;
3474         ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
3475         if (ret != PAGE_CACHE_SIZE) {
3476 leave_with_eio:
3477                 bio_put(bio);
3478                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
3479                 return -EIO;
3480         }
3481
3482         if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
3483                 goto leave_with_eio;
3484
3485         bio_put(bio);
3486         return 0;
3487 }