btrfs: Replace fs_info->scrub_* workqueue with btrfs_workqueue.
[cascardo/linux.git] / fs / btrfs / scrub.c
1 /*
2  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include "ctree.h"
22 #include "volumes.h"
23 #include "disk-io.h"
24 #include "ordered-data.h"
25 #include "transaction.h"
26 #include "backref.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
31 #include "raid56.h"
32
33 /*
34  * This is only the first step towards a full-features scrub. It reads all
35  * extent and super block and verifies the checksums. In case a bad checksum
36  * is found or the extent cannot be read, good data will be written back if
37  * any can be found.
38  *
39  * Future enhancements:
40  *  - In case an unrepairable extent is encountered, track which files are
41  *    affected and report them
42  *  - track and record media errors, throw out bad devices
43  *  - add a mode to also read unallocated space
44  */
45
46 struct scrub_block;
47 struct scrub_ctx;
48
49 /*
50  * the following three values only influence the performance.
51  * The last one configures the number of parallel and outstanding I/O
52  * operations. The first two values configure an upper limit for the number
53  * of (dynamically allocated) pages that are added to a bio.
54  */
55 #define SCRUB_PAGES_PER_RD_BIO  32      /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO  32      /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX     64      /* 8MB per device in flight */
58
59 /*
60  * the following value times PAGE_SIZE needs to be large enough to match the
61  * largest node/leaf/sector size that shall be supported.
62  * Values larger than BTRFS_STRIPE_LEN are not supported.
63  */
64 #define SCRUB_MAX_PAGES_PER_BLOCK       16      /* 64k per node/leaf/sector */
65
66 struct scrub_page {
67         struct scrub_block      *sblock;
68         struct page             *page;
69         struct btrfs_device     *dev;
70         u64                     flags;  /* extent flags */
71         u64                     generation;
72         u64                     logical;
73         u64                     physical;
74         u64                     physical_for_dev_replace;
75         atomic_t                ref_count;
76         struct {
77                 unsigned int    mirror_num:8;
78                 unsigned int    have_csum:1;
79                 unsigned int    io_error:1;
80         };
81         u8                      csum[BTRFS_CSUM_SIZE];
82 };
83
84 struct scrub_bio {
85         int                     index;
86         struct scrub_ctx        *sctx;
87         struct btrfs_device     *dev;
88         struct bio              *bio;
89         int                     err;
90         u64                     logical;
91         u64                     physical;
92 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
93         struct scrub_page       *pagev[SCRUB_PAGES_PER_WR_BIO];
94 #else
95         struct scrub_page       *pagev[SCRUB_PAGES_PER_RD_BIO];
96 #endif
97         int                     page_count;
98         int                     next_free;
99         struct btrfs_work_struct
100                                 work;
101 };
102
103 struct scrub_block {
104         struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
105         int                     page_count;
106         atomic_t                outstanding_pages;
107         atomic_t                ref_count; /* free mem on transition to zero */
108         struct scrub_ctx        *sctx;
109         struct {
110                 unsigned int    header_error:1;
111                 unsigned int    checksum_error:1;
112                 unsigned int    no_io_error_seen:1;
113                 unsigned int    generation_error:1; /* also sets header_error */
114         };
115 };
116
117 struct scrub_wr_ctx {
118         struct scrub_bio *wr_curr_bio;
119         struct btrfs_device *tgtdev;
120         int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
121         atomic_t flush_all_writes;
122         struct mutex wr_lock;
123 };
124
125 struct scrub_ctx {
126         struct scrub_bio        *bios[SCRUB_BIOS_PER_SCTX];
127         struct btrfs_root       *dev_root;
128         int                     first_free;
129         int                     curr;
130         atomic_t                bios_in_flight;
131         atomic_t                workers_pending;
132         spinlock_t              list_lock;
133         wait_queue_head_t       list_wait;
134         u16                     csum_size;
135         struct list_head        csum_list;
136         atomic_t                cancel_req;
137         int                     readonly;
138         int                     pages_per_rd_bio;
139         u32                     sectorsize;
140         u32                     nodesize;
141         u32                     leafsize;
142
143         int                     is_dev_replace;
144         struct scrub_wr_ctx     wr_ctx;
145
146         /*
147          * statistics
148          */
149         struct btrfs_scrub_progress stat;
150         spinlock_t              stat_lock;
151 };
152
153 struct scrub_fixup_nodatasum {
154         struct scrub_ctx        *sctx;
155         struct btrfs_device     *dev;
156         u64                     logical;
157         struct btrfs_root       *root;
158         struct btrfs_work_struct
159                                 work;
160         int                     mirror_num;
161 };
162
163 struct scrub_nocow_inode {
164         u64                     inum;
165         u64                     offset;
166         u64                     root;
167         struct list_head        list;
168 };
169
170 struct scrub_copy_nocow_ctx {
171         struct scrub_ctx        *sctx;
172         u64                     logical;
173         u64                     len;
174         int                     mirror_num;
175         u64                     physical_for_dev_replace;
176         struct list_head        inodes;
177         struct btrfs_work_struct
178                                 work;
179 };
180
181 struct scrub_warning {
182         struct btrfs_path       *path;
183         u64                     extent_item_size;
184         char                    *scratch_buf;
185         char                    *msg_buf;
186         const char              *errstr;
187         sector_t                sector;
188         u64                     logical;
189         struct btrfs_device     *dev;
190         int                     msg_bufsize;
191         int                     scratch_bufsize;
192 };
193
194
195 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
196 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
197 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
198 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
199 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
200 static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
201                                      struct btrfs_fs_info *fs_info,
202                                      struct scrub_block *original_sblock,
203                                      u64 length, u64 logical,
204                                      struct scrub_block *sblocks_for_recheck);
205 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
206                                 struct scrub_block *sblock, int is_metadata,
207                                 int have_csum, u8 *csum, u64 generation,
208                                 u16 csum_size);
209 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
210                                          struct scrub_block *sblock,
211                                          int is_metadata, int have_csum,
212                                          const u8 *csum, u64 generation,
213                                          u16 csum_size);
214 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
215                                              struct scrub_block *sblock_good,
216                                              int force_write);
217 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
218                                             struct scrub_block *sblock_good,
219                                             int page_num, int force_write);
220 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
221 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
222                                            int page_num);
223 static int scrub_checksum_data(struct scrub_block *sblock);
224 static int scrub_checksum_tree_block(struct scrub_block *sblock);
225 static int scrub_checksum_super(struct scrub_block *sblock);
226 static void scrub_block_get(struct scrub_block *sblock);
227 static void scrub_block_put(struct scrub_block *sblock);
228 static void scrub_page_get(struct scrub_page *spage);
229 static void scrub_page_put(struct scrub_page *spage);
230 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
231                                     struct scrub_page *spage);
232 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
233                        u64 physical, struct btrfs_device *dev, u64 flags,
234                        u64 gen, int mirror_num, u8 *csum, int force,
235                        u64 physical_for_dev_replace);
236 static void scrub_bio_end_io(struct bio *bio, int err);
237 static void scrub_bio_end_io_worker(struct btrfs_work_struct *work);
238 static void scrub_block_complete(struct scrub_block *sblock);
239 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
240                                u64 extent_logical, u64 extent_len,
241                                u64 *extent_physical,
242                                struct btrfs_device **extent_dev,
243                                int *extent_mirror_num);
244 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
245                               struct scrub_wr_ctx *wr_ctx,
246                               struct btrfs_fs_info *fs_info,
247                               struct btrfs_device *dev,
248                               int is_dev_replace);
249 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
250 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
251                                     struct scrub_page *spage);
252 static void scrub_wr_submit(struct scrub_ctx *sctx);
253 static void scrub_wr_bio_end_io(struct bio *bio, int err);
254 static void scrub_wr_bio_end_io_worker(struct btrfs_work_struct *work);
255 static int write_page_nocow(struct scrub_ctx *sctx,
256                             u64 physical_for_dev_replace, struct page *page);
257 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
258                                       struct scrub_copy_nocow_ctx *ctx);
259 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
260                             int mirror_num, u64 physical_for_dev_replace);
261 static void copy_nocow_pages_worker(struct btrfs_work_struct *work);
262 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
263 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
264
265
266 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
267 {
268         atomic_inc(&sctx->bios_in_flight);
269 }
270
271 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
272 {
273         atomic_dec(&sctx->bios_in_flight);
274         wake_up(&sctx->list_wait);
275 }
276
277 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
278 {
279         while (atomic_read(&fs_info->scrub_pause_req)) {
280                 mutex_unlock(&fs_info->scrub_lock);
281                 wait_event(fs_info->scrub_pause_wait,
282                    atomic_read(&fs_info->scrub_pause_req) == 0);
283                 mutex_lock(&fs_info->scrub_lock);
284         }
285 }
286
287 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
288 {
289         atomic_inc(&fs_info->scrubs_paused);
290         wake_up(&fs_info->scrub_pause_wait);
291
292         mutex_lock(&fs_info->scrub_lock);
293         __scrub_blocked_if_needed(fs_info);
294         atomic_dec(&fs_info->scrubs_paused);
295         mutex_unlock(&fs_info->scrub_lock);
296
297         wake_up(&fs_info->scrub_pause_wait);
298 }
299
300 /*
301  * used for workers that require transaction commits (i.e., for the
302  * NOCOW case)
303  */
304 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
305 {
306         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
307
308         /*
309          * increment scrubs_running to prevent cancel requests from
310          * completing as long as a worker is running. we must also
311          * increment scrubs_paused to prevent deadlocking on pause
312          * requests used for transactions commits (as the worker uses a
313          * transaction context). it is safe to regard the worker
314          * as paused for all matters practical. effectively, we only
315          * avoid cancellation requests from completing.
316          */
317         mutex_lock(&fs_info->scrub_lock);
318         atomic_inc(&fs_info->scrubs_running);
319         atomic_inc(&fs_info->scrubs_paused);
320         mutex_unlock(&fs_info->scrub_lock);
321
322         /*
323          * check if @scrubs_running=@scrubs_paused condition
324          * inside wait_event() is not an atomic operation.
325          * which means we may inc/dec @scrub_running/paused
326          * at any time. Let's wake up @scrub_pause_wait as
327          * much as we can to let commit transaction blocked less.
328          */
329         wake_up(&fs_info->scrub_pause_wait);
330
331         atomic_inc(&sctx->workers_pending);
332 }
333
334 /* used for workers that require transaction commits */
335 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
336 {
337         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
338
339         /*
340          * see scrub_pending_trans_workers_inc() why we're pretending
341          * to be paused in the scrub counters
342          */
343         mutex_lock(&fs_info->scrub_lock);
344         atomic_dec(&fs_info->scrubs_running);
345         atomic_dec(&fs_info->scrubs_paused);
346         mutex_unlock(&fs_info->scrub_lock);
347         atomic_dec(&sctx->workers_pending);
348         wake_up(&fs_info->scrub_pause_wait);
349         wake_up(&sctx->list_wait);
350 }
351
352 static void scrub_free_csums(struct scrub_ctx *sctx)
353 {
354         while (!list_empty(&sctx->csum_list)) {
355                 struct btrfs_ordered_sum *sum;
356                 sum = list_first_entry(&sctx->csum_list,
357                                        struct btrfs_ordered_sum, list);
358                 list_del(&sum->list);
359                 kfree(sum);
360         }
361 }
362
363 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
364 {
365         int i;
366
367         if (!sctx)
368                 return;
369
370         scrub_free_wr_ctx(&sctx->wr_ctx);
371
372         /* this can happen when scrub is cancelled */
373         if (sctx->curr != -1) {
374                 struct scrub_bio *sbio = sctx->bios[sctx->curr];
375
376                 for (i = 0; i < sbio->page_count; i++) {
377                         WARN_ON(!sbio->pagev[i]->page);
378                         scrub_block_put(sbio->pagev[i]->sblock);
379                 }
380                 bio_put(sbio->bio);
381         }
382
383         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
384                 struct scrub_bio *sbio = sctx->bios[i];
385
386                 if (!sbio)
387                         break;
388                 kfree(sbio);
389         }
390
391         scrub_free_csums(sctx);
392         kfree(sctx);
393 }
394
395 static noinline_for_stack
396 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
397 {
398         struct scrub_ctx *sctx;
399         int             i;
400         struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
401         int pages_per_rd_bio;
402         int ret;
403
404         /*
405          * the setting of pages_per_rd_bio is correct for scrub but might
406          * be wrong for the dev_replace code where we might read from
407          * different devices in the initial huge bios. However, that
408          * code is able to correctly handle the case when adding a page
409          * to a bio fails.
410          */
411         if (dev->bdev)
412                 pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
413                                          bio_get_nr_vecs(dev->bdev));
414         else
415                 pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
416         sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
417         if (!sctx)
418                 goto nomem;
419         sctx->is_dev_replace = is_dev_replace;
420         sctx->pages_per_rd_bio = pages_per_rd_bio;
421         sctx->curr = -1;
422         sctx->dev_root = dev->dev_root;
423         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
424                 struct scrub_bio *sbio;
425
426                 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
427                 if (!sbio)
428                         goto nomem;
429                 sctx->bios[i] = sbio;
430
431                 sbio->index = i;
432                 sbio->sctx = sctx;
433                 sbio->page_count = 0;
434                 btrfs_init_work(&sbio->work, scrub_bio_end_io_worker,
435                                 NULL, NULL);
436
437                 if (i != SCRUB_BIOS_PER_SCTX - 1)
438                         sctx->bios[i]->next_free = i + 1;
439                 else
440                         sctx->bios[i]->next_free = -1;
441         }
442         sctx->first_free = 0;
443         sctx->nodesize = dev->dev_root->nodesize;
444         sctx->leafsize = dev->dev_root->leafsize;
445         sctx->sectorsize = dev->dev_root->sectorsize;
446         atomic_set(&sctx->bios_in_flight, 0);
447         atomic_set(&sctx->workers_pending, 0);
448         atomic_set(&sctx->cancel_req, 0);
449         sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
450         INIT_LIST_HEAD(&sctx->csum_list);
451
452         spin_lock_init(&sctx->list_lock);
453         spin_lock_init(&sctx->stat_lock);
454         init_waitqueue_head(&sctx->list_wait);
455
456         ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
457                                  fs_info->dev_replace.tgtdev, is_dev_replace);
458         if (ret) {
459                 scrub_free_ctx(sctx);
460                 return ERR_PTR(ret);
461         }
462         return sctx;
463
464 nomem:
465         scrub_free_ctx(sctx);
466         return ERR_PTR(-ENOMEM);
467 }
468
469 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
470                                      void *warn_ctx)
471 {
472         u64 isize;
473         u32 nlink;
474         int ret;
475         int i;
476         struct extent_buffer *eb;
477         struct btrfs_inode_item *inode_item;
478         struct scrub_warning *swarn = warn_ctx;
479         struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
480         struct inode_fs_paths *ipath = NULL;
481         struct btrfs_root *local_root;
482         struct btrfs_key root_key;
483
484         root_key.objectid = root;
485         root_key.type = BTRFS_ROOT_ITEM_KEY;
486         root_key.offset = (u64)-1;
487         local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
488         if (IS_ERR(local_root)) {
489                 ret = PTR_ERR(local_root);
490                 goto err;
491         }
492
493         ret = inode_item_info(inum, 0, local_root, swarn->path);
494         if (ret) {
495                 btrfs_release_path(swarn->path);
496                 goto err;
497         }
498
499         eb = swarn->path->nodes[0];
500         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
501                                         struct btrfs_inode_item);
502         isize = btrfs_inode_size(eb, inode_item);
503         nlink = btrfs_inode_nlink(eb, inode_item);
504         btrfs_release_path(swarn->path);
505
506         ipath = init_ipath(4096, local_root, swarn->path);
507         if (IS_ERR(ipath)) {
508                 ret = PTR_ERR(ipath);
509                 ipath = NULL;
510                 goto err;
511         }
512         ret = paths_from_inode(inum, ipath);
513
514         if (ret < 0)
515                 goto err;
516
517         /*
518          * we deliberately ignore the bit ipath might have been too small to
519          * hold all of the paths here
520          */
521         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
522                 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
523                         "%s, sector %llu, root %llu, inode %llu, offset %llu, "
524                         "length %llu, links %u (path: %s)\n", swarn->errstr,
525                         swarn->logical, rcu_str_deref(swarn->dev->name),
526                         (unsigned long long)swarn->sector, root, inum, offset,
527                         min(isize - offset, (u64)PAGE_SIZE), nlink,
528                         (char *)(unsigned long)ipath->fspath->val[i]);
529
530         free_ipath(ipath);
531         return 0;
532
533 err:
534         printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
535                 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
536                 "resolving failed with ret=%d\n", swarn->errstr,
537                 swarn->logical, rcu_str_deref(swarn->dev->name),
538                 (unsigned long long)swarn->sector, root, inum, offset, ret);
539
540         free_ipath(ipath);
541         return 0;
542 }
543
544 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
545 {
546         struct btrfs_device *dev;
547         struct btrfs_fs_info *fs_info;
548         struct btrfs_path *path;
549         struct btrfs_key found_key;
550         struct extent_buffer *eb;
551         struct btrfs_extent_item *ei;
552         struct scrub_warning swarn;
553         unsigned long ptr = 0;
554         u64 extent_item_pos;
555         u64 flags = 0;
556         u64 ref_root;
557         u32 item_size;
558         u8 ref_level;
559         const int bufsize = 4096;
560         int ret;
561
562         WARN_ON(sblock->page_count < 1);
563         dev = sblock->pagev[0]->dev;
564         fs_info = sblock->sctx->dev_root->fs_info;
565
566         path = btrfs_alloc_path();
567
568         swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
569         swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
570         swarn.sector = (sblock->pagev[0]->physical) >> 9;
571         swarn.logical = sblock->pagev[0]->logical;
572         swarn.errstr = errstr;
573         swarn.dev = NULL;
574         swarn.msg_bufsize = bufsize;
575         swarn.scratch_bufsize = bufsize;
576
577         if (!path || !swarn.scratch_buf || !swarn.msg_buf)
578                 goto out;
579
580         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
581                                   &flags);
582         if (ret < 0)
583                 goto out;
584
585         extent_item_pos = swarn.logical - found_key.objectid;
586         swarn.extent_item_size = found_key.offset;
587
588         eb = path->nodes[0];
589         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
590         item_size = btrfs_item_size_nr(eb, path->slots[0]);
591
592         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
593                 do {
594                         ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
595                                                         &ref_root, &ref_level);
596                         printk_in_rcu(KERN_WARNING
597                                 "BTRFS: %s at logical %llu on dev %s, "
598                                 "sector %llu: metadata %s (level %d) in tree "
599                                 "%llu\n", errstr, swarn.logical,
600                                 rcu_str_deref(dev->name),
601                                 (unsigned long long)swarn.sector,
602                                 ref_level ? "node" : "leaf",
603                                 ret < 0 ? -1 : ref_level,
604                                 ret < 0 ? -1 : ref_root);
605                 } while (ret != 1);
606                 btrfs_release_path(path);
607         } else {
608                 btrfs_release_path(path);
609                 swarn.path = path;
610                 swarn.dev = dev;
611                 iterate_extent_inodes(fs_info, found_key.objectid,
612                                         extent_item_pos, 1,
613                                         scrub_print_warning_inode, &swarn);
614         }
615
616 out:
617         btrfs_free_path(path);
618         kfree(swarn.scratch_buf);
619         kfree(swarn.msg_buf);
620 }
621
622 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
623 {
624         struct page *page = NULL;
625         unsigned long index;
626         struct scrub_fixup_nodatasum *fixup = fixup_ctx;
627         int ret;
628         int corrected = 0;
629         struct btrfs_key key;
630         struct inode *inode = NULL;
631         struct btrfs_fs_info *fs_info;
632         u64 end = offset + PAGE_SIZE - 1;
633         struct btrfs_root *local_root;
634         int srcu_index;
635
636         key.objectid = root;
637         key.type = BTRFS_ROOT_ITEM_KEY;
638         key.offset = (u64)-1;
639
640         fs_info = fixup->root->fs_info;
641         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
642
643         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
644         if (IS_ERR(local_root)) {
645                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
646                 return PTR_ERR(local_root);
647         }
648
649         key.type = BTRFS_INODE_ITEM_KEY;
650         key.objectid = inum;
651         key.offset = 0;
652         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
653         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
654         if (IS_ERR(inode))
655                 return PTR_ERR(inode);
656
657         index = offset >> PAGE_CACHE_SHIFT;
658
659         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
660         if (!page) {
661                 ret = -ENOMEM;
662                 goto out;
663         }
664
665         if (PageUptodate(page)) {
666                 if (PageDirty(page)) {
667                         /*
668                          * we need to write the data to the defect sector. the
669                          * data that was in that sector is not in memory,
670                          * because the page was modified. we must not write the
671                          * modified page to that sector.
672                          *
673                          * TODO: what could be done here: wait for the delalloc
674                          *       runner to write out that page (might involve
675                          *       COW) and see whether the sector is still
676                          *       referenced afterwards.
677                          *
678                          * For the meantime, we'll treat this error
679                          * incorrectable, although there is a chance that a
680                          * later scrub will find the bad sector again and that
681                          * there's no dirty page in memory, then.
682                          */
683                         ret = -EIO;
684                         goto out;
685                 }
686                 fs_info = BTRFS_I(inode)->root->fs_info;
687                 ret = repair_io_failure(fs_info, offset, PAGE_SIZE,
688                                         fixup->logical, page,
689                                         fixup->mirror_num);
690                 unlock_page(page);
691                 corrected = !ret;
692         } else {
693                 /*
694                  * we need to get good data first. the general readpage path
695                  * will call repair_io_failure for us, we just have to make
696                  * sure we read the bad mirror.
697                  */
698                 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
699                                         EXTENT_DAMAGED, GFP_NOFS);
700                 if (ret) {
701                         /* set_extent_bits should give proper error */
702                         WARN_ON(ret > 0);
703                         if (ret > 0)
704                                 ret = -EFAULT;
705                         goto out;
706                 }
707
708                 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
709                                                 btrfs_get_extent,
710                                                 fixup->mirror_num);
711                 wait_on_page_locked(page);
712
713                 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
714                                                 end, EXTENT_DAMAGED, 0, NULL);
715                 if (!corrected)
716                         clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
717                                                 EXTENT_DAMAGED, GFP_NOFS);
718         }
719
720 out:
721         if (page)
722                 put_page(page);
723         if (inode)
724                 iput(inode);
725
726         if (ret < 0)
727                 return ret;
728
729         if (ret == 0 && corrected) {
730                 /*
731                  * we only need to call readpage for one of the inodes belonging
732                  * to this extent. so make iterate_extent_inodes stop
733                  */
734                 return 1;
735         }
736
737         return -EIO;
738 }
739
740 static void scrub_fixup_nodatasum(struct btrfs_work_struct *work)
741 {
742         int ret;
743         struct scrub_fixup_nodatasum *fixup;
744         struct scrub_ctx *sctx;
745         struct btrfs_trans_handle *trans = NULL;
746         struct btrfs_path *path;
747         int uncorrectable = 0;
748
749         fixup = container_of(work, struct scrub_fixup_nodatasum, work);
750         sctx = fixup->sctx;
751
752         path = btrfs_alloc_path();
753         if (!path) {
754                 spin_lock(&sctx->stat_lock);
755                 ++sctx->stat.malloc_errors;
756                 spin_unlock(&sctx->stat_lock);
757                 uncorrectable = 1;
758                 goto out;
759         }
760
761         trans = btrfs_join_transaction(fixup->root);
762         if (IS_ERR(trans)) {
763                 uncorrectable = 1;
764                 goto out;
765         }
766
767         /*
768          * the idea is to trigger a regular read through the standard path. we
769          * read a page from the (failed) logical address by specifying the
770          * corresponding copynum of the failed sector. thus, that readpage is
771          * expected to fail.
772          * that is the point where on-the-fly error correction will kick in
773          * (once it's finished) and rewrite the failed sector if a good copy
774          * can be found.
775          */
776         ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
777                                                 path, scrub_fixup_readpage,
778                                                 fixup);
779         if (ret < 0) {
780                 uncorrectable = 1;
781                 goto out;
782         }
783         WARN_ON(ret != 1);
784
785         spin_lock(&sctx->stat_lock);
786         ++sctx->stat.corrected_errors;
787         spin_unlock(&sctx->stat_lock);
788
789 out:
790         if (trans && !IS_ERR(trans))
791                 btrfs_end_transaction(trans, fixup->root);
792         if (uncorrectable) {
793                 spin_lock(&sctx->stat_lock);
794                 ++sctx->stat.uncorrectable_errors;
795                 spin_unlock(&sctx->stat_lock);
796                 btrfs_dev_replace_stats_inc(
797                         &sctx->dev_root->fs_info->dev_replace.
798                         num_uncorrectable_read_errors);
799                 printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
800                     "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
801                         fixup->logical, rcu_str_deref(fixup->dev->name));
802         }
803
804         btrfs_free_path(path);
805         kfree(fixup);
806
807         scrub_pending_trans_workers_dec(sctx);
808 }
809
810 /*
811  * scrub_handle_errored_block gets called when either verification of the
812  * pages failed or the bio failed to read, e.g. with EIO. In the latter
813  * case, this function handles all pages in the bio, even though only one
814  * may be bad.
815  * The goal of this function is to repair the errored block by using the
816  * contents of one of the mirrors.
817  */
818 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
819 {
820         struct scrub_ctx *sctx = sblock_to_check->sctx;
821         struct btrfs_device *dev;
822         struct btrfs_fs_info *fs_info;
823         u64 length;
824         u64 logical;
825         u64 generation;
826         unsigned int failed_mirror_index;
827         unsigned int is_metadata;
828         unsigned int have_csum;
829         u8 *csum;
830         struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
831         struct scrub_block *sblock_bad;
832         int ret;
833         int mirror_index;
834         int page_num;
835         int success;
836         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
837                                       DEFAULT_RATELIMIT_BURST);
838
839         BUG_ON(sblock_to_check->page_count < 1);
840         fs_info = sctx->dev_root->fs_info;
841         if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
842                 /*
843                  * if we find an error in a super block, we just report it.
844                  * They will get written with the next transaction commit
845                  * anyway
846                  */
847                 spin_lock(&sctx->stat_lock);
848                 ++sctx->stat.super_errors;
849                 spin_unlock(&sctx->stat_lock);
850                 return 0;
851         }
852         length = sblock_to_check->page_count * PAGE_SIZE;
853         logical = sblock_to_check->pagev[0]->logical;
854         generation = sblock_to_check->pagev[0]->generation;
855         BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
856         failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
857         is_metadata = !(sblock_to_check->pagev[0]->flags &
858                         BTRFS_EXTENT_FLAG_DATA);
859         have_csum = sblock_to_check->pagev[0]->have_csum;
860         csum = sblock_to_check->pagev[0]->csum;
861         dev = sblock_to_check->pagev[0]->dev;
862
863         if (sctx->is_dev_replace && !is_metadata && !have_csum) {
864                 sblocks_for_recheck = NULL;
865                 goto nodatasum_case;
866         }
867
868         /*
869          * read all mirrors one after the other. This includes to
870          * re-read the extent or metadata block that failed (that was
871          * the cause that this fixup code is called) another time,
872          * page by page this time in order to know which pages
873          * caused I/O errors and which ones are good (for all mirrors).
874          * It is the goal to handle the situation when more than one
875          * mirror contains I/O errors, but the errors do not
876          * overlap, i.e. the data can be repaired by selecting the
877          * pages from those mirrors without I/O error on the
878          * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
879          * would be that mirror #1 has an I/O error on the first page,
880          * the second page is good, and mirror #2 has an I/O error on
881          * the second page, but the first page is good.
882          * Then the first page of the first mirror can be repaired by
883          * taking the first page of the second mirror, and the
884          * second page of the second mirror can be repaired by
885          * copying the contents of the 2nd page of the 1st mirror.
886          * One more note: if the pages of one mirror contain I/O
887          * errors, the checksum cannot be verified. In order to get
888          * the best data for repairing, the first attempt is to find
889          * a mirror without I/O errors and with a validated checksum.
890          * Only if this is not possible, the pages are picked from
891          * mirrors with I/O errors without considering the checksum.
892          * If the latter is the case, at the end, the checksum of the
893          * repaired area is verified in order to correctly maintain
894          * the statistics.
895          */
896
897         sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
898                                      sizeof(*sblocks_for_recheck),
899                                      GFP_NOFS);
900         if (!sblocks_for_recheck) {
901                 spin_lock(&sctx->stat_lock);
902                 sctx->stat.malloc_errors++;
903                 sctx->stat.read_errors++;
904                 sctx->stat.uncorrectable_errors++;
905                 spin_unlock(&sctx->stat_lock);
906                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
907                 goto out;
908         }
909
910         /* setup the context, map the logical blocks and alloc the pages */
911         ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
912                                         logical, sblocks_for_recheck);
913         if (ret) {
914                 spin_lock(&sctx->stat_lock);
915                 sctx->stat.read_errors++;
916                 sctx->stat.uncorrectable_errors++;
917                 spin_unlock(&sctx->stat_lock);
918                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
919                 goto out;
920         }
921         BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
922         sblock_bad = sblocks_for_recheck + failed_mirror_index;
923
924         /* build and submit the bios for the failed mirror, check checksums */
925         scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
926                             csum, generation, sctx->csum_size);
927
928         if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
929             sblock_bad->no_io_error_seen) {
930                 /*
931                  * the error disappeared after reading page by page, or
932                  * the area was part of a huge bio and other parts of the
933                  * bio caused I/O errors, or the block layer merged several
934                  * read requests into one and the error is caused by a
935                  * different bio (usually one of the two latter cases is
936                  * the cause)
937                  */
938                 spin_lock(&sctx->stat_lock);
939                 sctx->stat.unverified_errors++;
940                 spin_unlock(&sctx->stat_lock);
941
942                 if (sctx->is_dev_replace)
943                         scrub_write_block_to_dev_replace(sblock_bad);
944                 goto out;
945         }
946
947         if (!sblock_bad->no_io_error_seen) {
948                 spin_lock(&sctx->stat_lock);
949                 sctx->stat.read_errors++;
950                 spin_unlock(&sctx->stat_lock);
951                 if (__ratelimit(&_rs))
952                         scrub_print_warning("i/o error", sblock_to_check);
953                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
954         } else if (sblock_bad->checksum_error) {
955                 spin_lock(&sctx->stat_lock);
956                 sctx->stat.csum_errors++;
957                 spin_unlock(&sctx->stat_lock);
958                 if (__ratelimit(&_rs))
959                         scrub_print_warning("checksum error", sblock_to_check);
960                 btrfs_dev_stat_inc_and_print(dev,
961                                              BTRFS_DEV_STAT_CORRUPTION_ERRS);
962         } else if (sblock_bad->header_error) {
963                 spin_lock(&sctx->stat_lock);
964                 sctx->stat.verify_errors++;
965                 spin_unlock(&sctx->stat_lock);
966                 if (__ratelimit(&_rs))
967                         scrub_print_warning("checksum/header error",
968                                             sblock_to_check);
969                 if (sblock_bad->generation_error)
970                         btrfs_dev_stat_inc_and_print(dev,
971                                 BTRFS_DEV_STAT_GENERATION_ERRS);
972                 else
973                         btrfs_dev_stat_inc_and_print(dev,
974                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
975         }
976
977         if (sctx->readonly) {
978                 ASSERT(!sctx->is_dev_replace);
979                 goto out;
980         }
981
982         if (!is_metadata && !have_csum) {
983                 struct scrub_fixup_nodatasum *fixup_nodatasum;
984
985 nodatasum_case:
986                 WARN_ON(sctx->is_dev_replace);
987
988                 /*
989                  * !is_metadata and !have_csum, this means that the data
990                  * might not be COW'ed, that it might be modified
991                  * concurrently. The general strategy to work on the
992                  * commit root does not help in the case when COW is not
993                  * used.
994                  */
995                 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
996                 if (!fixup_nodatasum)
997                         goto did_not_correct_error;
998                 fixup_nodatasum->sctx = sctx;
999                 fixup_nodatasum->dev = dev;
1000                 fixup_nodatasum->logical = logical;
1001                 fixup_nodatasum->root = fs_info->extent_root;
1002                 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1003                 scrub_pending_trans_workers_inc(sctx);
1004                 btrfs_init_work(&fixup_nodatasum->work, scrub_fixup_nodatasum,
1005                                 NULL, NULL);
1006                 btrfs_queue_work(fs_info->scrub_workers,
1007                                  &fixup_nodatasum->work);
1008                 goto out;
1009         }
1010
1011         /*
1012          * now build and submit the bios for the other mirrors, check
1013          * checksums.
1014          * First try to pick the mirror which is completely without I/O
1015          * errors and also does not have a checksum error.
1016          * If one is found, and if a checksum is present, the full block
1017          * that is known to contain an error is rewritten. Afterwards
1018          * the block is known to be corrected.
1019          * If a mirror is found which is completely correct, and no
1020          * checksum is present, only those pages are rewritten that had
1021          * an I/O error in the block to be repaired, since it cannot be
1022          * determined, which copy of the other pages is better (and it
1023          * could happen otherwise that a correct page would be
1024          * overwritten by a bad one).
1025          */
1026         for (mirror_index = 0;
1027              mirror_index < BTRFS_MAX_MIRRORS &&
1028              sblocks_for_recheck[mirror_index].page_count > 0;
1029              mirror_index++) {
1030                 struct scrub_block *sblock_other;
1031
1032                 if (mirror_index == failed_mirror_index)
1033                         continue;
1034                 sblock_other = sblocks_for_recheck + mirror_index;
1035
1036                 /* build and submit the bios, check checksums */
1037                 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1038                                     have_csum, csum, generation,
1039                                     sctx->csum_size);
1040
1041                 if (!sblock_other->header_error &&
1042                     !sblock_other->checksum_error &&
1043                     sblock_other->no_io_error_seen) {
1044                         if (sctx->is_dev_replace) {
1045                                 scrub_write_block_to_dev_replace(sblock_other);
1046                         } else {
1047                                 int force_write = is_metadata || have_csum;
1048
1049                                 ret = scrub_repair_block_from_good_copy(
1050                                                 sblock_bad, sblock_other,
1051                                                 force_write);
1052                         }
1053                         if (0 == ret)
1054                                 goto corrected_error;
1055                 }
1056         }
1057
1058         /*
1059          * for dev_replace, pick good pages and write to the target device.
1060          */
1061         if (sctx->is_dev_replace) {
1062                 success = 1;
1063                 for (page_num = 0; page_num < sblock_bad->page_count;
1064                      page_num++) {
1065                         int sub_success;
1066
1067                         sub_success = 0;
1068                         for (mirror_index = 0;
1069                              mirror_index < BTRFS_MAX_MIRRORS &&
1070                              sblocks_for_recheck[mirror_index].page_count > 0;
1071                              mirror_index++) {
1072                                 struct scrub_block *sblock_other =
1073                                         sblocks_for_recheck + mirror_index;
1074                                 struct scrub_page *page_other =
1075                                         sblock_other->pagev[page_num];
1076
1077                                 if (!page_other->io_error) {
1078                                         ret = scrub_write_page_to_dev_replace(
1079                                                         sblock_other, page_num);
1080                                         if (ret == 0) {
1081                                                 /* succeeded for this page */
1082                                                 sub_success = 1;
1083                                                 break;
1084                                         } else {
1085                                                 btrfs_dev_replace_stats_inc(
1086                                                         &sctx->dev_root->
1087                                                         fs_info->dev_replace.
1088                                                         num_write_errors);
1089                                         }
1090                                 }
1091                         }
1092
1093                         if (!sub_success) {
1094                                 /*
1095                                  * did not find a mirror to fetch the page
1096                                  * from. scrub_write_page_to_dev_replace()
1097                                  * handles this case (page->io_error), by
1098                                  * filling the block with zeros before
1099                                  * submitting the write request
1100                                  */
1101                                 success = 0;
1102                                 ret = scrub_write_page_to_dev_replace(
1103                                                 sblock_bad, page_num);
1104                                 if (ret)
1105                                         btrfs_dev_replace_stats_inc(
1106                                                 &sctx->dev_root->fs_info->
1107                                                 dev_replace.num_write_errors);
1108                         }
1109                 }
1110
1111                 goto out;
1112         }
1113
1114         /*
1115          * for regular scrub, repair those pages that are errored.
1116          * In case of I/O errors in the area that is supposed to be
1117          * repaired, continue by picking good copies of those pages.
1118          * Select the good pages from mirrors to rewrite bad pages from
1119          * the area to fix. Afterwards verify the checksum of the block
1120          * that is supposed to be repaired. This verification step is
1121          * only done for the purpose of statistic counting and for the
1122          * final scrub report, whether errors remain.
1123          * A perfect algorithm could make use of the checksum and try
1124          * all possible combinations of pages from the different mirrors
1125          * until the checksum verification succeeds. For example, when
1126          * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1127          * of mirror #2 is readable but the final checksum test fails,
1128          * then the 2nd page of mirror #3 could be tried, whether now
1129          * the final checksum succeedes. But this would be a rare
1130          * exception and is therefore not implemented. At least it is
1131          * avoided that the good copy is overwritten.
1132          * A more useful improvement would be to pick the sectors
1133          * without I/O error based on sector sizes (512 bytes on legacy
1134          * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1135          * mirror could be repaired by taking 512 byte of a different
1136          * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1137          * area are unreadable.
1138          */
1139
1140         /* can only fix I/O errors from here on */
1141         if (sblock_bad->no_io_error_seen)
1142                 goto did_not_correct_error;
1143
1144         success = 1;
1145         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1146                 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1147
1148                 if (!page_bad->io_error)
1149                         continue;
1150
1151                 for (mirror_index = 0;
1152                      mirror_index < BTRFS_MAX_MIRRORS &&
1153                      sblocks_for_recheck[mirror_index].page_count > 0;
1154                      mirror_index++) {
1155                         struct scrub_block *sblock_other = sblocks_for_recheck +
1156                                                            mirror_index;
1157                         struct scrub_page *page_other = sblock_other->pagev[
1158                                                         page_num];
1159
1160                         if (!page_other->io_error) {
1161                                 ret = scrub_repair_page_from_good_copy(
1162                                         sblock_bad, sblock_other, page_num, 0);
1163                                 if (0 == ret) {
1164                                         page_bad->io_error = 0;
1165                                         break; /* succeeded for this page */
1166                                 }
1167                         }
1168                 }
1169
1170                 if (page_bad->io_error) {
1171                         /* did not find a mirror to copy the page from */
1172                         success = 0;
1173                 }
1174         }
1175
1176         if (success) {
1177                 if (is_metadata || have_csum) {
1178                         /*
1179                          * need to verify the checksum now that all
1180                          * sectors on disk are repaired (the write
1181                          * request for data to be repaired is on its way).
1182                          * Just be lazy and use scrub_recheck_block()
1183                          * which re-reads the data before the checksum
1184                          * is verified, but most likely the data comes out
1185                          * of the page cache.
1186                          */
1187                         scrub_recheck_block(fs_info, sblock_bad,
1188                                             is_metadata, have_csum, csum,
1189                                             generation, sctx->csum_size);
1190                         if (!sblock_bad->header_error &&
1191                             !sblock_bad->checksum_error &&
1192                             sblock_bad->no_io_error_seen)
1193                                 goto corrected_error;
1194                         else
1195                                 goto did_not_correct_error;
1196                 } else {
1197 corrected_error:
1198                         spin_lock(&sctx->stat_lock);
1199                         sctx->stat.corrected_errors++;
1200                         spin_unlock(&sctx->stat_lock);
1201                         printk_ratelimited_in_rcu(KERN_ERR
1202                                 "BTRFS: fixed up error at logical %llu on dev %s\n",
1203                                 logical, rcu_str_deref(dev->name));
1204                 }
1205         } else {
1206 did_not_correct_error:
1207                 spin_lock(&sctx->stat_lock);
1208                 sctx->stat.uncorrectable_errors++;
1209                 spin_unlock(&sctx->stat_lock);
1210                 printk_ratelimited_in_rcu(KERN_ERR
1211                         "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
1212                         logical, rcu_str_deref(dev->name));
1213         }
1214
1215 out:
1216         if (sblocks_for_recheck) {
1217                 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1218                      mirror_index++) {
1219                         struct scrub_block *sblock = sblocks_for_recheck +
1220                                                      mirror_index;
1221                         int page_index;
1222
1223                         for (page_index = 0; page_index < sblock->page_count;
1224                              page_index++) {
1225                                 sblock->pagev[page_index]->sblock = NULL;
1226                                 scrub_page_put(sblock->pagev[page_index]);
1227                         }
1228                 }
1229                 kfree(sblocks_for_recheck);
1230         }
1231
1232         return 0;
1233 }
1234
1235 static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
1236                                      struct btrfs_fs_info *fs_info,
1237                                      struct scrub_block *original_sblock,
1238                                      u64 length, u64 logical,
1239                                      struct scrub_block *sblocks_for_recheck)
1240 {
1241         int page_index;
1242         int mirror_index;
1243         int ret;
1244
1245         /*
1246          * note: the two members ref_count and outstanding_pages
1247          * are not used (and not set) in the blocks that are used for
1248          * the recheck procedure
1249          */
1250
1251         page_index = 0;
1252         while (length > 0) {
1253                 u64 sublen = min_t(u64, length, PAGE_SIZE);
1254                 u64 mapped_length = sublen;
1255                 struct btrfs_bio *bbio = NULL;
1256
1257                 /*
1258                  * with a length of PAGE_SIZE, each returned stripe
1259                  * represents one mirror
1260                  */
1261                 ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical,
1262                                       &mapped_length, &bbio, 0);
1263                 if (ret || !bbio || mapped_length < sublen) {
1264                         kfree(bbio);
1265                         return -EIO;
1266                 }
1267
1268                 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1269                 for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
1270                      mirror_index++) {
1271                         struct scrub_block *sblock;
1272                         struct scrub_page *page;
1273
1274                         if (mirror_index >= BTRFS_MAX_MIRRORS)
1275                                 continue;
1276
1277                         sblock = sblocks_for_recheck + mirror_index;
1278                         sblock->sctx = sctx;
1279                         page = kzalloc(sizeof(*page), GFP_NOFS);
1280                         if (!page) {
1281 leave_nomem:
1282                                 spin_lock(&sctx->stat_lock);
1283                                 sctx->stat.malloc_errors++;
1284                                 spin_unlock(&sctx->stat_lock);
1285                                 kfree(bbio);
1286                                 return -ENOMEM;
1287                         }
1288                         scrub_page_get(page);
1289                         sblock->pagev[page_index] = page;
1290                         page->logical = logical;
1291                         page->physical = bbio->stripes[mirror_index].physical;
1292                         BUG_ON(page_index >= original_sblock->page_count);
1293                         page->physical_for_dev_replace =
1294                                 original_sblock->pagev[page_index]->
1295                                 physical_for_dev_replace;
1296                         /* for missing devices, dev->bdev is NULL */
1297                         page->dev = bbio->stripes[mirror_index].dev;
1298                         page->mirror_num = mirror_index + 1;
1299                         sblock->page_count++;
1300                         page->page = alloc_page(GFP_NOFS);
1301                         if (!page->page)
1302                                 goto leave_nomem;
1303                 }
1304                 kfree(bbio);
1305                 length -= sublen;
1306                 logical += sublen;
1307                 page_index++;
1308         }
1309
1310         return 0;
1311 }
1312
1313 /*
1314  * this function will check the on disk data for checksum errors, header
1315  * errors and read I/O errors. If any I/O errors happen, the exact pages
1316  * which are errored are marked as being bad. The goal is to enable scrub
1317  * to take those pages that are not errored from all the mirrors so that
1318  * the pages that are errored in the just handled mirror can be repaired.
1319  */
1320 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1321                                 struct scrub_block *sblock, int is_metadata,
1322                                 int have_csum, u8 *csum, u64 generation,
1323                                 u16 csum_size)
1324 {
1325         int page_num;
1326
1327         sblock->no_io_error_seen = 1;
1328         sblock->header_error = 0;
1329         sblock->checksum_error = 0;
1330
1331         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1332                 struct bio *bio;
1333                 struct scrub_page *page = sblock->pagev[page_num];
1334
1335                 if (page->dev->bdev == NULL) {
1336                         page->io_error = 1;
1337                         sblock->no_io_error_seen = 0;
1338                         continue;
1339                 }
1340
1341                 WARN_ON(!page->page);
1342                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1343                 if (!bio) {
1344                         page->io_error = 1;
1345                         sblock->no_io_error_seen = 0;
1346                         continue;
1347                 }
1348                 bio->bi_bdev = page->dev->bdev;
1349                 bio->bi_sector = page->physical >> 9;
1350
1351                 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1352                 if (btrfsic_submit_bio_wait(READ, bio))
1353                         sblock->no_io_error_seen = 0;
1354
1355                 bio_put(bio);
1356         }
1357
1358         if (sblock->no_io_error_seen)
1359                 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1360                                              have_csum, csum, generation,
1361                                              csum_size);
1362
1363         return;
1364 }
1365
1366 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1367                                          struct scrub_block *sblock,
1368                                          int is_metadata, int have_csum,
1369                                          const u8 *csum, u64 generation,
1370                                          u16 csum_size)
1371 {
1372         int page_num;
1373         u8 calculated_csum[BTRFS_CSUM_SIZE];
1374         u32 crc = ~(u32)0;
1375         void *mapped_buffer;
1376
1377         WARN_ON(!sblock->pagev[0]->page);
1378         if (is_metadata) {
1379                 struct btrfs_header *h;
1380
1381                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1382                 h = (struct btrfs_header *)mapped_buffer;
1383
1384                 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
1385                     memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
1386                     memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1387                            BTRFS_UUID_SIZE)) {
1388                         sblock->header_error = 1;
1389                 } else if (generation != btrfs_stack_header_generation(h)) {
1390                         sblock->header_error = 1;
1391                         sblock->generation_error = 1;
1392                 }
1393                 csum = h->csum;
1394         } else {
1395                 if (!have_csum)
1396                         return;
1397
1398                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1399         }
1400
1401         for (page_num = 0;;) {
1402                 if (page_num == 0 && is_metadata)
1403                         crc = btrfs_csum_data(
1404                                 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1405                                 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1406                 else
1407                         crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
1408
1409                 kunmap_atomic(mapped_buffer);
1410                 page_num++;
1411                 if (page_num >= sblock->page_count)
1412                         break;
1413                 WARN_ON(!sblock->pagev[page_num]->page);
1414
1415                 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
1416         }
1417
1418         btrfs_csum_final(crc, calculated_csum);
1419         if (memcmp(calculated_csum, csum, csum_size))
1420                 sblock->checksum_error = 1;
1421 }
1422
1423 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1424                                              struct scrub_block *sblock_good,
1425                                              int force_write)
1426 {
1427         int page_num;
1428         int ret = 0;
1429
1430         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1431                 int ret_sub;
1432
1433                 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1434                                                            sblock_good,
1435                                                            page_num,
1436                                                            force_write);
1437                 if (ret_sub)
1438                         ret = ret_sub;
1439         }
1440
1441         return ret;
1442 }
1443
1444 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1445                                             struct scrub_block *sblock_good,
1446                                             int page_num, int force_write)
1447 {
1448         struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1449         struct scrub_page *page_good = sblock_good->pagev[page_num];
1450
1451         BUG_ON(page_bad->page == NULL);
1452         BUG_ON(page_good->page == NULL);
1453         if (force_write || sblock_bad->header_error ||
1454             sblock_bad->checksum_error || page_bad->io_error) {
1455                 struct bio *bio;
1456                 int ret;
1457
1458                 if (!page_bad->dev->bdev) {
1459                         printk_ratelimited(KERN_WARNING "BTRFS: "
1460                                 "scrub_repair_page_from_good_copy(bdev == NULL) "
1461                                 "is unexpected!\n");
1462                         return -EIO;
1463                 }
1464
1465                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1466                 if (!bio)
1467                         return -EIO;
1468                 bio->bi_bdev = page_bad->dev->bdev;
1469                 bio->bi_sector = page_bad->physical >> 9;
1470
1471                 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1472                 if (PAGE_SIZE != ret) {
1473                         bio_put(bio);
1474                         return -EIO;
1475                 }
1476
1477                 if (btrfsic_submit_bio_wait(WRITE, bio)) {
1478                         btrfs_dev_stat_inc_and_print(page_bad->dev,
1479                                 BTRFS_DEV_STAT_WRITE_ERRS);
1480                         btrfs_dev_replace_stats_inc(
1481                                 &sblock_bad->sctx->dev_root->fs_info->
1482                                 dev_replace.num_write_errors);
1483                         bio_put(bio);
1484                         return -EIO;
1485                 }
1486                 bio_put(bio);
1487         }
1488
1489         return 0;
1490 }
1491
1492 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1493 {
1494         int page_num;
1495
1496         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1497                 int ret;
1498
1499                 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1500                 if (ret)
1501                         btrfs_dev_replace_stats_inc(
1502                                 &sblock->sctx->dev_root->fs_info->dev_replace.
1503                                 num_write_errors);
1504         }
1505 }
1506
1507 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1508                                            int page_num)
1509 {
1510         struct scrub_page *spage = sblock->pagev[page_num];
1511
1512         BUG_ON(spage->page == NULL);
1513         if (spage->io_error) {
1514                 void *mapped_buffer = kmap_atomic(spage->page);
1515
1516                 memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
1517                 flush_dcache_page(spage->page);
1518                 kunmap_atomic(mapped_buffer);
1519         }
1520         return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1521 }
1522
1523 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1524                                     struct scrub_page *spage)
1525 {
1526         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1527         struct scrub_bio *sbio;
1528         int ret;
1529
1530         mutex_lock(&wr_ctx->wr_lock);
1531 again:
1532         if (!wr_ctx->wr_curr_bio) {
1533                 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1534                                               GFP_NOFS);
1535                 if (!wr_ctx->wr_curr_bio) {
1536                         mutex_unlock(&wr_ctx->wr_lock);
1537                         return -ENOMEM;
1538                 }
1539                 wr_ctx->wr_curr_bio->sctx = sctx;
1540                 wr_ctx->wr_curr_bio->page_count = 0;
1541         }
1542         sbio = wr_ctx->wr_curr_bio;
1543         if (sbio->page_count == 0) {
1544                 struct bio *bio;
1545
1546                 sbio->physical = spage->physical_for_dev_replace;
1547                 sbio->logical = spage->logical;
1548                 sbio->dev = wr_ctx->tgtdev;
1549                 bio = sbio->bio;
1550                 if (!bio) {
1551                         bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
1552                         if (!bio) {
1553                                 mutex_unlock(&wr_ctx->wr_lock);
1554                                 return -ENOMEM;
1555                         }
1556                         sbio->bio = bio;
1557                 }
1558
1559                 bio->bi_private = sbio;
1560                 bio->bi_end_io = scrub_wr_bio_end_io;
1561                 bio->bi_bdev = sbio->dev->bdev;
1562                 bio->bi_sector = sbio->physical >> 9;
1563                 sbio->err = 0;
1564         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1565                    spage->physical_for_dev_replace ||
1566                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1567                    spage->logical) {
1568                 scrub_wr_submit(sctx);
1569                 goto again;
1570         }
1571
1572         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1573         if (ret != PAGE_SIZE) {
1574                 if (sbio->page_count < 1) {
1575                         bio_put(sbio->bio);
1576                         sbio->bio = NULL;
1577                         mutex_unlock(&wr_ctx->wr_lock);
1578                         return -EIO;
1579                 }
1580                 scrub_wr_submit(sctx);
1581                 goto again;
1582         }
1583
1584         sbio->pagev[sbio->page_count] = spage;
1585         scrub_page_get(spage);
1586         sbio->page_count++;
1587         if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1588                 scrub_wr_submit(sctx);
1589         mutex_unlock(&wr_ctx->wr_lock);
1590
1591         return 0;
1592 }
1593
1594 static void scrub_wr_submit(struct scrub_ctx *sctx)
1595 {
1596         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1597         struct scrub_bio *sbio;
1598
1599         if (!wr_ctx->wr_curr_bio)
1600                 return;
1601
1602         sbio = wr_ctx->wr_curr_bio;
1603         wr_ctx->wr_curr_bio = NULL;
1604         WARN_ON(!sbio->bio->bi_bdev);
1605         scrub_pending_bio_inc(sctx);
1606         /* process all writes in a single worker thread. Then the block layer
1607          * orders the requests before sending them to the driver which
1608          * doubled the write performance on spinning disks when measured
1609          * with Linux 3.5 */
1610         btrfsic_submit_bio(WRITE, sbio->bio);
1611 }
1612
1613 static void scrub_wr_bio_end_io(struct bio *bio, int err)
1614 {
1615         struct scrub_bio *sbio = bio->bi_private;
1616         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1617
1618         sbio->err = err;
1619         sbio->bio = bio;
1620
1621         btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
1622         btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1623 }
1624
1625 static void scrub_wr_bio_end_io_worker(struct btrfs_work_struct *work)
1626 {
1627         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1628         struct scrub_ctx *sctx = sbio->sctx;
1629         int i;
1630
1631         WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1632         if (sbio->err) {
1633                 struct btrfs_dev_replace *dev_replace =
1634                         &sbio->sctx->dev_root->fs_info->dev_replace;
1635
1636                 for (i = 0; i < sbio->page_count; i++) {
1637                         struct scrub_page *spage = sbio->pagev[i];
1638
1639                         spage->io_error = 1;
1640                         btrfs_dev_replace_stats_inc(&dev_replace->
1641                                                     num_write_errors);
1642                 }
1643         }
1644
1645         for (i = 0; i < sbio->page_count; i++)
1646                 scrub_page_put(sbio->pagev[i]);
1647
1648         bio_put(sbio->bio);
1649         kfree(sbio);
1650         scrub_pending_bio_dec(sctx);
1651 }
1652
1653 static int scrub_checksum(struct scrub_block *sblock)
1654 {
1655         u64 flags;
1656         int ret;
1657
1658         WARN_ON(sblock->page_count < 1);
1659         flags = sblock->pagev[0]->flags;
1660         ret = 0;
1661         if (flags & BTRFS_EXTENT_FLAG_DATA)
1662                 ret = scrub_checksum_data(sblock);
1663         else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1664                 ret = scrub_checksum_tree_block(sblock);
1665         else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1666                 (void)scrub_checksum_super(sblock);
1667         else
1668                 WARN_ON(1);
1669         if (ret)
1670                 scrub_handle_errored_block(sblock);
1671
1672         return ret;
1673 }
1674
1675 static int scrub_checksum_data(struct scrub_block *sblock)
1676 {
1677         struct scrub_ctx *sctx = sblock->sctx;
1678         u8 csum[BTRFS_CSUM_SIZE];
1679         u8 *on_disk_csum;
1680         struct page *page;
1681         void *buffer;
1682         u32 crc = ~(u32)0;
1683         int fail = 0;
1684         u64 len;
1685         int index;
1686
1687         BUG_ON(sblock->page_count < 1);
1688         if (!sblock->pagev[0]->have_csum)
1689                 return 0;
1690
1691         on_disk_csum = sblock->pagev[0]->csum;
1692         page = sblock->pagev[0]->page;
1693         buffer = kmap_atomic(page);
1694
1695         len = sctx->sectorsize;
1696         index = 0;
1697         for (;;) {
1698                 u64 l = min_t(u64, len, PAGE_SIZE);
1699
1700                 crc = btrfs_csum_data(buffer, crc, l);
1701                 kunmap_atomic(buffer);
1702                 len -= l;
1703                 if (len == 0)
1704                         break;
1705                 index++;
1706                 BUG_ON(index >= sblock->page_count);
1707                 BUG_ON(!sblock->pagev[index]->page);
1708                 page = sblock->pagev[index]->page;
1709                 buffer = kmap_atomic(page);
1710         }
1711
1712         btrfs_csum_final(crc, csum);
1713         if (memcmp(csum, on_disk_csum, sctx->csum_size))
1714                 fail = 1;
1715
1716         return fail;
1717 }
1718
1719 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1720 {
1721         struct scrub_ctx *sctx = sblock->sctx;
1722         struct btrfs_header *h;
1723         struct btrfs_root *root = sctx->dev_root;
1724         struct btrfs_fs_info *fs_info = root->fs_info;
1725         u8 calculated_csum[BTRFS_CSUM_SIZE];
1726         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1727         struct page *page;
1728         void *mapped_buffer;
1729         u64 mapped_size;
1730         void *p;
1731         u32 crc = ~(u32)0;
1732         int fail = 0;
1733         int crc_fail = 0;
1734         u64 len;
1735         int index;
1736
1737         BUG_ON(sblock->page_count < 1);
1738         page = sblock->pagev[0]->page;
1739         mapped_buffer = kmap_atomic(page);
1740         h = (struct btrfs_header *)mapped_buffer;
1741         memcpy(on_disk_csum, h->csum, sctx->csum_size);
1742
1743         /*
1744          * we don't use the getter functions here, as we
1745          * a) don't have an extent buffer and
1746          * b) the page is already kmapped
1747          */
1748
1749         if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1750                 ++fail;
1751
1752         if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
1753                 ++fail;
1754
1755         if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1756                 ++fail;
1757
1758         if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1759                    BTRFS_UUID_SIZE))
1760                 ++fail;
1761
1762         WARN_ON(sctx->nodesize != sctx->leafsize);
1763         len = sctx->nodesize - BTRFS_CSUM_SIZE;
1764         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1765         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1766         index = 0;
1767         for (;;) {
1768                 u64 l = min_t(u64, len, mapped_size);
1769
1770                 crc = btrfs_csum_data(p, crc, l);
1771                 kunmap_atomic(mapped_buffer);
1772                 len -= l;
1773                 if (len == 0)
1774                         break;
1775                 index++;
1776                 BUG_ON(index >= sblock->page_count);
1777                 BUG_ON(!sblock->pagev[index]->page);
1778                 page = sblock->pagev[index]->page;
1779                 mapped_buffer = kmap_atomic(page);
1780                 mapped_size = PAGE_SIZE;
1781                 p = mapped_buffer;
1782         }
1783
1784         btrfs_csum_final(crc, calculated_csum);
1785         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1786                 ++crc_fail;
1787
1788         return fail || crc_fail;
1789 }
1790
1791 static int scrub_checksum_super(struct scrub_block *sblock)
1792 {
1793         struct btrfs_super_block *s;
1794         struct scrub_ctx *sctx = sblock->sctx;
1795         struct btrfs_root *root = sctx->dev_root;
1796         struct btrfs_fs_info *fs_info = root->fs_info;
1797         u8 calculated_csum[BTRFS_CSUM_SIZE];
1798         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1799         struct page *page;
1800         void *mapped_buffer;
1801         u64 mapped_size;
1802         void *p;
1803         u32 crc = ~(u32)0;
1804         int fail_gen = 0;
1805         int fail_cor = 0;
1806         u64 len;
1807         int index;
1808
1809         BUG_ON(sblock->page_count < 1);
1810         page = sblock->pagev[0]->page;
1811         mapped_buffer = kmap_atomic(page);
1812         s = (struct btrfs_super_block *)mapped_buffer;
1813         memcpy(on_disk_csum, s->csum, sctx->csum_size);
1814
1815         if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
1816                 ++fail_cor;
1817
1818         if (sblock->pagev[0]->generation != btrfs_super_generation(s))
1819                 ++fail_gen;
1820
1821         if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1822                 ++fail_cor;
1823
1824         len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1825         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1826         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1827         index = 0;
1828         for (;;) {
1829                 u64 l = min_t(u64, len, mapped_size);
1830
1831                 crc = btrfs_csum_data(p, crc, l);
1832                 kunmap_atomic(mapped_buffer);
1833                 len -= l;
1834                 if (len == 0)
1835                         break;
1836                 index++;
1837                 BUG_ON(index >= sblock->page_count);
1838                 BUG_ON(!sblock->pagev[index]->page);
1839                 page = sblock->pagev[index]->page;
1840                 mapped_buffer = kmap_atomic(page);
1841                 mapped_size = PAGE_SIZE;
1842                 p = mapped_buffer;
1843         }
1844
1845         btrfs_csum_final(crc, calculated_csum);
1846         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1847                 ++fail_cor;
1848
1849         if (fail_cor + fail_gen) {
1850                 /*
1851                  * if we find an error in a super block, we just report it.
1852                  * They will get written with the next transaction commit
1853                  * anyway
1854                  */
1855                 spin_lock(&sctx->stat_lock);
1856                 ++sctx->stat.super_errors;
1857                 spin_unlock(&sctx->stat_lock);
1858                 if (fail_cor)
1859                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1860                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1861                 else
1862                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1863                                 BTRFS_DEV_STAT_GENERATION_ERRS);
1864         }
1865
1866         return fail_cor + fail_gen;
1867 }
1868
1869 static void scrub_block_get(struct scrub_block *sblock)
1870 {
1871         atomic_inc(&sblock->ref_count);
1872 }
1873
1874 static void scrub_block_put(struct scrub_block *sblock)
1875 {
1876         if (atomic_dec_and_test(&sblock->ref_count)) {
1877                 int i;
1878
1879                 for (i = 0; i < sblock->page_count; i++)
1880                         scrub_page_put(sblock->pagev[i]);
1881                 kfree(sblock);
1882         }
1883 }
1884
1885 static void scrub_page_get(struct scrub_page *spage)
1886 {
1887         atomic_inc(&spage->ref_count);
1888 }
1889
1890 static void scrub_page_put(struct scrub_page *spage)
1891 {
1892         if (atomic_dec_and_test(&spage->ref_count)) {
1893                 if (spage->page)
1894                         __free_page(spage->page);
1895                 kfree(spage);
1896         }
1897 }
1898
1899 static void scrub_submit(struct scrub_ctx *sctx)
1900 {
1901         struct scrub_bio *sbio;
1902
1903         if (sctx->curr == -1)
1904                 return;
1905
1906         sbio = sctx->bios[sctx->curr];
1907         sctx->curr = -1;
1908         scrub_pending_bio_inc(sctx);
1909
1910         if (!sbio->bio->bi_bdev) {
1911                 /*
1912                  * this case should not happen. If btrfs_map_block() is
1913                  * wrong, it could happen for dev-replace operations on
1914                  * missing devices when no mirrors are available, but in
1915                  * this case it should already fail the mount.
1916                  * This case is handled correctly (but _very_ slowly).
1917                  */
1918                 printk_ratelimited(KERN_WARNING
1919                         "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
1920                 bio_endio(sbio->bio, -EIO);
1921         } else {
1922                 btrfsic_submit_bio(READ, sbio->bio);
1923         }
1924 }
1925
1926 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
1927                                     struct scrub_page *spage)
1928 {
1929         struct scrub_block *sblock = spage->sblock;
1930         struct scrub_bio *sbio;
1931         int ret;
1932
1933 again:
1934         /*
1935          * grab a fresh bio or wait for one to become available
1936          */
1937         while (sctx->curr == -1) {
1938                 spin_lock(&sctx->list_lock);
1939                 sctx->curr = sctx->first_free;
1940                 if (sctx->curr != -1) {
1941                         sctx->first_free = sctx->bios[sctx->curr]->next_free;
1942                         sctx->bios[sctx->curr]->next_free = -1;
1943                         sctx->bios[sctx->curr]->page_count = 0;
1944                         spin_unlock(&sctx->list_lock);
1945                 } else {
1946                         spin_unlock(&sctx->list_lock);
1947                         wait_event(sctx->list_wait, sctx->first_free != -1);
1948                 }
1949         }
1950         sbio = sctx->bios[sctx->curr];
1951         if (sbio->page_count == 0) {
1952                 struct bio *bio;
1953
1954                 sbio->physical = spage->physical;
1955                 sbio->logical = spage->logical;
1956                 sbio->dev = spage->dev;
1957                 bio = sbio->bio;
1958                 if (!bio) {
1959                         bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
1960                         if (!bio)
1961                                 return -ENOMEM;
1962                         sbio->bio = bio;
1963                 }
1964
1965                 bio->bi_private = sbio;
1966                 bio->bi_end_io = scrub_bio_end_io;
1967                 bio->bi_bdev = sbio->dev->bdev;
1968                 bio->bi_sector = sbio->physical >> 9;
1969                 sbio->err = 0;
1970         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1971                    spage->physical ||
1972                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1973                    spage->logical ||
1974                    sbio->dev != spage->dev) {
1975                 scrub_submit(sctx);
1976                 goto again;
1977         }
1978
1979         sbio->pagev[sbio->page_count] = spage;
1980         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1981         if (ret != PAGE_SIZE) {
1982                 if (sbio->page_count < 1) {
1983                         bio_put(sbio->bio);
1984                         sbio->bio = NULL;
1985                         return -EIO;
1986                 }
1987                 scrub_submit(sctx);
1988                 goto again;
1989         }
1990
1991         scrub_block_get(sblock); /* one for the page added to the bio */
1992         atomic_inc(&sblock->outstanding_pages);
1993         sbio->page_count++;
1994         if (sbio->page_count == sctx->pages_per_rd_bio)
1995                 scrub_submit(sctx);
1996
1997         return 0;
1998 }
1999
2000 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2001                        u64 physical, struct btrfs_device *dev, u64 flags,
2002                        u64 gen, int mirror_num, u8 *csum, int force,
2003                        u64 physical_for_dev_replace)
2004 {
2005         struct scrub_block *sblock;
2006         int index;
2007
2008         sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2009         if (!sblock) {
2010                 spin_lock(&sctx->stat_lock);
2011                 sctx->stat.malloc_errors++;
2012                 spin_unlock(&sctx->stat_lock);
2013                 return -ENOMEM;
2014         }
2015
2016         /* one ref inside this function, plus one for each page added to
2017          * a bio later on */
2018         atomic_set(&sblock->ref_count, 1);
2019         sblock->sctx = sctx;
2020         sblock->no_io_error_seen = 1;
2021
2022         for (index = 0; len > 0; index++) {
2023                 struct scrub_page *spage;
2024                 u64 l = min_t(u64, len, PAGE_SIZE);
2025
2026                 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2027                 if (!spage) {
2028 leave_nomem:
2029                         spin_lock(&sctx->stat_lock);
2030                         sctx->stat.malloc_errors++;
2031                         spin_unlock(&sctx->stat_lock);
2032                         scrub_block_put(sblock);
2033                         return -ENOMEM;
2034                 }
2035                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2036                 scrub_page_get(spage);
2037                 sblock->pagev[index] = spage;
2038                 spage->sblock = sblock;
2039                 spage->dev = dev;
2040                 spage->flags = flags;
2041                 spage->generation = gen;
2042                 spage->logical = logical;
2043                 spage->physical = physical;
2044                 spage->physical_for_dev_replace = physical_for_dev_replace;
2045                 spage->mirror_num = mirror_num;
2046                 if (csum) {
2047                         spage->have_csum = 1;
2048                         memcpy(spage->csum, csum, sctx->csum_size);
2049                 } else {
2050                         spage->have_csum = 0;
2051                 }
2052                 sblock->page_count++;
2053                 spage->page = alloc_page(GFP_NOFS);
2054                 if (!spage->page)
2055                         goto leave_nomem;
2056                 len -= l;
2057                 logical += l;
2058                 physical += l;
2059                 physical_for_dev_replace += l;
2060         }
2061
2062         WARN_ON(sblock->page_count == 0);
2063         for (index = 0; index < sblock->page_count; index++) {
2064                 struct scrub_page *spage = sblock->pagev[index];
2065                 int ret;
2066
2067                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2068                 if (ret) {
2069                         scrub_block_put(sblock);
2070                         return ret;
2071                 }
2072         }
2073
2074         if (force)
2075                 scrub_submit(sctx);
2076
2077         /* last one frees, either here or in bio completion for last page */
2078         scrub_block_put(sblock);
2079         return 0;
2080 }
2081
2082 static void scrub_bio_end_io(struct bio *bio, int err)
2083 {
2084         struct scrub_bio *sbio = bio->bi_private;
2085         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
2086
2087         sbio->err = err;
2088         sbio->bio = bio;
2089
2090         btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2091 }
2092
2093 static void scrub_bio_end_io_worker(struct btrfs_work_struct *work)
2094 {
2095         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2096         struct scrub_ctx *sctx = sbio->sctx;
2097         int i;
2098
2099         BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2100         if (sbio->err) {
2101                 for (i = 0; i < sbio->page_count; i++) {
2102                         struct scrub_page *spage = sbio->pagev[i];
2103
2104                         spage->io_error = 1;
2105                         spage->sblock->no_io_error_seen = 0;
2106                 }
2107         }
2108
2109         /* now complete the scrub_block items that have all pages completed */
2110         for (i = 0; i < sbio->page_count; i++) {
2111                 struct scrub_page *spage = sbio->pagev[i];
2112                 struct scrub_block *sblock = spage->sblock;
2113
2114                 if (atomic_dec_and_test(&sblock->outstanding_pages))
2115                         scrub_block_complete(sblock);
2116                 scrub_block_put(sblock);
2117         }
2118
2119         bio_put(sbio->bio);
2120         sbio->bio = NULL;
2121         spin_lock(&sctx->list_lock);
2122         sbio->next_free = sctx->first_free;
2123         sctx->first_free = sbio->index;
2124         spin_unlock(&sctx->list_lock);
2125
2126         if (sctx->is_dev_replace &&
2127             atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2128                 mutex_lock(&sctx->wr_ctx.wr_lock);
2129                 scrub_wr_submit(sctx);
2130                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2131         }
2132
2133         scrub_pending_bio_dec(sctx);
2134 }
2135
2136 static void scrub_block_complete(struct scrub_block *sblock)
2137 {
2138         if (!sblock->no_io_error_seen) {
2139                 scrub_handle_errored_block(sblock);
2140         } else {
2141                 /*
2142                  * if has checksum error, write via repair mechanism in
2143                  * dev replace case, otherwise write here in dev replace
2144                  * case.
2145                  */
2146                 if (!scrub_checksum(sblock) && sblock->sctx->is_dev_replace)
2147                         scrub_write_block_to_dev_replace(sblock);
2148         }
2149 }
2150
2151 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
2152                            u8 *csum)
2153 {
2154         struct btrfs_ordered_sum *sum = NULL;
2155         unsigned long index;
2156         unsigned long num_sectors;
2157
2158         while (!list_empty(&sctx->csum_list)) {
2159                 sum = list_first_entry(&sctx->csum_list,
2160                                        struct btrfs_ordered_sum, list);
2161                 if (sum->bytenr > logical)
2162                         return 0;
2163                 if (sum->bytenr + sum->len > logical)
2164                         break;
2165
2166                 ++sctx->stat.csum_discards;
2167                 list_del(&sum->list);
2168                 kfree(sum);
2169                 sum = NULL;
2170         }
2171         if (!sum)
2172                 return 0;
2173
2174         index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2175         num_sectors = sum->len / sctx->sectorsize;
2176         memcpy(csum, sum->sums + index, sctx->csum_size);
2177         if (index == num_sectors - 1) {
2178                 list_del(&sum->list);
2179                 kfree(sum);
2180         }
2181         return 1;
2182 }
2183
2184 /* scrub extent tries to collect up to 64 kB for each bio */
2185 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2186                         u64 physical, struct btrfs_device *dev, u64 flags,
2187                         u64 gen, int mirror_num, u64 physical_for_dev_replace)
2188 {
2189         int ret;
2190         u8 csum[BTRFS_CSUM_SIZE];
2191         u32 blocksize;
2192
2193         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2194                 blocksize = sctx->sectorsize;
2195                 spin_lock(&sctx->stat_lock);
2196                 sctx->stat.data_extents_scrubbed++;
2197                 sctx->stat.data_bytes_scrubbed += len;
2198                 spin_unlock(&sctx->stat_lock);
2199         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2200                 WARN_ON(sctx->nodesize != sctx->leafsize);
2201                 blocksize = sctx->nodesize;
2202                 spin_lock(&sctx->stat_lock);
2203                 sctx->stat.tree_extents_scrubbed++;
2204                 sctx->stat.tree_bytes_scrubbed += len;
2205                 spin_unlock(&sctx->stat_lock);
2206         } else {
2207                 blocksize = sctx->sectorsize;
2208                 WARN_ON(1);
2209         }
2210
2211         while (len) {
2212                 u64 l = min_t(u64, len, blocksize);
2213                 int have_csum = 0;
2214
2215                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2216                         /* push csums to sbio */
2217                         have_csum = scrub_find_csum(sctx, logical, l, csum);
2218                         if (have_csum == 0)
2219                                 ++sctx->stat.no_csum;
2220                         if (sctx->is_dev_replace && !have_csum) {
2221                                 ret = copy_nocow_pages(sctx, logical, l,
2222                                                        mirror_num,
2223                                                       physical_for_dev_replace);
2224                                 goto behind_scrub_pages;
2225                         }
2226                 }
2227                 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2228                                   mirror_num, have_csum ? csum : NULL, 0,
2229                                   physical_for_dev_replace);
2230 behind_scrub_pages:
2231                 if (ret)
2232                         return ret;
2233                 len -= l;
2234                 logical += l;
2235                 physical += l;
2236                 physical_for_dev_replace += l;
2237         }
2238         return 0;
2239 }
2240
2241 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2242                                            struct map_lookup *map,
2243                                            struct btrfs_device *scrub_dev,
2244                                            int num, u64 base, u64 length,
2245                                            int is_dev_replace)
2246 {
2247         struct btrfs_path *path;
2248         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2249         struct btrfs_root *root = fs_info->extent_root;
2250         struct btrfs_root *csum_root = fs_info->csum_root;
2251         struct btrfs_extent_item *extent;
2252         struct blk_plug plug;
2253         u64 flags;
2254         int ret;
2255         int slot;
2256         u64 nstripes;
2257         struct extent_buffer *l;
2258         struct btrfs_key key;
2259         u64 physical;
2260         u64 logical;
2261         u64 logic_end;
2262         u64 generation;
2263         int mirror_num;
2264         struct reada_control *reada1;
2265         struct reada_control *reada2;
2266         struct btrfs_key key_start;
2267         struct btrfs_key key_end;
2268         u64 increment = map->stripe_len;
2269         u64 offset;
2270         u64 extent_logical;
2271         u64 extent_physical;
2272         u64 extent_len;
2273         struct btrfs_device *extent_dev;
2274         int extent_mirror_num;
2275         int stop_loop;
2276
2277         if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2278                          BTRFS_BLOCK_GROUP_RAID6)) {
2279                 if (num >= nr_data_stripes(map)) {
2280                         return 0;
2281                 }
2282         }
2283
2284         nstripes = length;
2285         offset = 0;
2286         do_div(nstripes, map->stripe_len);
2287         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2288                 offset = map->stripe_len * num;
2289                 increment = map->stripe_len * map->num_stripes;
2290                 mirror_num = 1;
2291         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2292                 int factor = map->num_stripes / map->sub_stripes;
2293                 offset = map->stripe_len * (num / map->sub_stripes);
2294                 increment = map->stripe_len * factor;
2295                 mirror_num = num % map->sub_stripes + 1;
2296         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2297                 increment = map->stripe_len;
2298                 mirror_num = num % map->num_stripes + 1;
2299         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2300                 increment = map->stripe_len;
2301                 mirror_num = num % map->num_stripes + 1;
2302         } else {
2303                 increment = map->stripe_len;
2304                 mirror_num = 1;
2305         }
2306
2307         path = btrfs_alloc_path();
2308         if (!path)
2309                 return -ENOMEM;
2310
2311         /*
2312          * work on commit root. The related disk blocks are static as
2313          * long as COW is applied. This means, it is save to rewrite
2314          * them to repair disk errors without any race conditions
2315          */
2316         path->search_commit_root = 1;
2317         path->skip_locking = 1;
2318
2319         /*
2320          * trigger the readahead for extent tree csum tree and wait for
2321          * completion. During readahead, the scrub is officially paused
2322          * to not hold off transaction commits
2323          */
2324         logical = base + offset;
2325
2326         wait_event(sctx->list_wait,
2327                    atomic_read(&sctx->bios_in_flight) == 0);
2328         scrub_blocked_if_needed(fs_info);
2329
2330         /* FIXME it might be better to start readahead at commit root */
2331         key_start.objectid = logical;
2332         key_start.type = BTRFS_EXTENT_ITEM_KEY;
2333         key_start.offset = (u64)0;
2334         key_end.objectid = base + offset + nstripes * increment;
2335         key_end.type = BTRFS_METADATA_ITEM_KEY;
2336         key_end.offset = (u64)-1;
2337         reada1 = btrfs_reada_add(root, &key_start, &key_end);
2338
2339         key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
2340         key_start.type = BTRFS_EXTENT_CSUM_KEY;
2341         key_start.offset = logical;
2342         key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
2343         key_end.type = BTRFS_EXTENT_CSUM_KEY;
2344         key_end.offset = base + offset + nstripes * increment;
2345         reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
2346
2347         if (!IS_ERR(reada1))
2348                 btrfs_reada_wait(reada1);
2349         if (!IS_ERR(reada2))
2350                 btrfs_reada_wait(reada2);
2351
2352
2353         /*
2354          * collect all data csums for the stripe to avoid seeking during
2355          * the scrub. This might currently (crc32) end up to be about 1MB
2356          */
2357         blk_start_plug(&plug);
2358
2359         /*
2360          * now find all extents for each stripe and scrub them
2361          */
2362         logical = base + offset;
2363         physical = map->stripes[num].physical;
2364         logic_end = logical + increment * nstripes;
2365         ret = 0;
2366         while (logical < logic_end) {
2367                 /*
2368                  * canceled?
2369                  */
2370                 if (atomic_read(&fs_info->scrub_cancel_req) ||
2371                     atomic_read(&sctx->cancel_req)) {
2372                         ret = -ECANCELED;
2373                         goto out;
2374                 }
2375                 /*
2376                  * check to see if we have to pause
2377                  */
2378                 if (atomic_read(&fs_info->scrub_pause_req)) {
2379                         /* push queued extents */
2380                         atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
2381                         scrub_submit(sctx);
2382                         mutex_lock(&sctx->wr_ctx.wr_lock);
2383                         scrub_wr_submit(sctx);
2384                         mutex_unlock(&sctx->wr_ctx.wr_lock);
2385                         wait_event(sctx->list_wait,
2386                                    atomic_read(&sctx->bios_in_flight) == 0);
2387                         atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
2388                         scrub_blocked_if_needed(fs_info);
2389                 }
2390
2391                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2392                         key.type = BTRFS_METADATA_ITEM_KEY;
2393                 else
2394                         key.type = BTRFS_EXTENT_ITEM_KEY;
2395                 key.objectid = logical;
2396                 key.offset = (u64)-1;
2397
2398                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2399                 if (ret < 0)
2400                         goto out;
2401
2402                 if (ret > 0) {
2403                         ret = btrfs_previous_extent_item(root, path, 0);
2404                         if (ret < 0)
2405                                 goto out;
2406                         if (ret > 0) {
2407                                 /* there's no smaller item, so stick with the
2408                                  * larger one */
2409                                 btrfs_release_path(path);
2410                                 ret = btrfs_search_slot(NULL, root, &key,
2411                                                         path, 0, 0);
2412                                 if (ret < 0)
2413                                         goto out;
2414                         }
2415                 }
2416
2417                 stop_loop = 0;
2418                 while (1) {
2419                         u64 bytes;
2420
2421                         l = path->nodes[0];
2422                         slot = path->slots[0];
2423                         if (slot >= btrfs_header_nritems(l)) {
2424                                 ret = btrfs_next_leaf(root, path);
2425                                 if (ret == 0)
2426                                         continue;
2427                                 if (ret < 0)
2428                                         goto out;
2429
2430                                 stop_loop = 1;
2431                                 break;
2432                         }
2433                         btrfs_item_key_to_cpu(l, &key, slot);
2434
2435                         if (key.type == BTRFS_METADATA_ITEM_KEY)
2436                                 bytes = root->leafsize;
2437                         else
2438                                 bytes = key.offset;
2439
2440                         if (key.objectid + bytes <= logical)
2441                                 goto next;
2442
2443                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2444                             key.type != BTRFS_METADATA_ITEM_KEY)
2445                                 goto next;
2446
2447                         if (key.objectid >= logical + map->stripe_len) {
2448                                 /* out of this device extent */
2449                                 if (key.objectid >= logic_end)
2450                                         stop_loop = 1;
2451                                 break;
2452                         }
2453
2454                         extent = btrfs_item_ptr(l, slot,
2455                                                 struct btrfs_extent_item);
2456                         flags = btrfs_extent_flags(l, extent);
2457                         generation = btrfs_extent_generation(l, extent);
2458
2459                         if (key.objectid < logical &&
2460                             (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
2461                                 btrfs_err(fs_info,
2462                                            "scrub: tree block %llu spanning "
2463                                            "stripes, ignored. logical=%llu",
2464                                        key.objectid, logical);
2465                                 goto next;
2466                         }
2467
2468 again:
2469                         extent_logical = key.objectid;
2470                         extent_len = bytes;
2471
2472                         /*
2473                          * trim extent to this stripe
2474                          */
2475                         if (extent_logical < logical) {
2476                                 extent_len -= logical - extent_logical;
2477                                 extent_logical = logical;
2478                         }
2479                         if (extent_logical + extent_len >
2480                             logical + map->stripe_len) {
2481                                 extent_len = logical + map->stripe_len -
2482                                              extent_logical;
2483                         }
2484
2485                         extent_physical = extent_logical - logical + physical;
2486                         extent_dev = scrub_dev;
2487                         extent_mirror_num = mirror_num;
2488                         if (is_dev_replace)
2489                                 scrub_remap_extent(fs_info, extent_logical,
2490                                                    extent_len, &extent_physical,
2491                                                    &extent_dev,
2492                                                    &extent_mirror_num);
2493
2494                         ret = btrfs_lookup_csums_range(csum_root, logical,
2495                                                 logical + map->stripe_len - 1,
2496                                                 &sctx->csum_list, 1);
2497                         if (ret)
2498                                 goto out;
2499
2500                         ret = scrub_extent(sctx, extent_logical, extent_len,
2501                                            extent_physical, extent_dev, flags,
2502                                            generation, extent_mirror_num,
2503                                            extent_logical - logical + physical);
2504                         if (ret)
2505                                 goto out;
2506
2507                         scrub_free_csums(sctx);
2508                         if (extent_logical + extent_len <
2509                             key.objectid + bytes) {
2510                                 logical += increment;
2511                                 physical += map->stripe_len;
2512
2513                                 if (logical < key.objectid + bytes) {
2514                                         cond_resched();
2515                                         goto again;
2516                                 }
2517
2518                                 if (logical >= logic_end) {
2519                                         stop_loop = 1;
2520                                         break;
2521                                 }
2522                         }
2523 next:
2524                         path->slots[0]++;
2525                 }
2526                 btrfs_release_path(path);
2527                 logical += increment;
2528                 physical += map->stripe_len;
2529                 spin_lock(&sctx->stat_lock);
2530                 if (stop_loop)
2531                         sctx->stat.last_physical = map->stripes[num].physical +
2532                                                    length;
2533                 else
2534                         sctx->stat.last_physical = physical;
2535                 spin_unlock(&sctx->stat_lock);
2536                 if (stop_loop)
2537                         break;
2538         }
2539 out:
2540         /* push queued extents */
2541         scrub_submit(sctx);
2542         mutex_lock(&sctx->wr_ctx.wr_lock);
2543         scrub_wr_submit(sctx);
2544         mutex_unlock(&sctx->wr_ctx.wr_lock);
2545
2546         blk_finish_plug(&plug);
2547         btrfs_free_path(path);
2548         return ret < 0 ? ret : 0;
2549 }
2550
2551 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2552                                           struct btrfs_device *scrub_dev,
2553                                           u64 chunk_tree, u64 chunk_objectid,
2554                                           u64 chunk_offset, u64 length,
2555                                           u64 dev_offset, int is_dev_replace)
2556 {
2557         struct btrfs_mapping_tree *map_tree =
2558                 &sctx->dev_root->fs_info->mapping_tree;
2559         struct map_lookup *map;
2560         struct extent_map *em;
2561         int i;
2562         int ret = 0;
2563
2564         read_lock(&map_tree->map_tree.lock);
2565         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2566         read_unlock(&map_tree->map_tree.lock);
2567
2568         if (!em)
2569                 return -EINVAL;
2570
2571         map = (struct map_lookup *)em->bdev;
2572         if (em->start != chunk_offset)
2573                 goto out;
2574
2575         if (em->len < length)
2576                 goto out;
2577
2578         for (i = 0; i < map->num_stripes; ++i) {
2579                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2580                     map->stripes[i].physical == dev_offset) {
2581                         ret = scrub_stripe(sctx, map, scrub_dev, i,
2582                                            chunk_offset, length,
2583                                            is_dev_replace);
2584                         if (ret)
2585                                 goto out;
2586                 }
2587         }
2588 out:
2589         free_extent_map(em);
2590
2591         return ret;
2592 }
2593
2594 static noinline_for_stack
2595 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2596                            struct btrfs_device *scrub_dev, u64 start, u64 end,
2597                            int is_dev_replace)
2598 {
2599         struct btrfs_dev_extent *dev_extent = NULL;
2600         struct btrfs_path *path;
2601         struct btrfs_root *root = sctx->dev_root;
2602         struct btrfs_fs_info *fs_info = root->fs_info;
2603         u64 length;
2604         u64 chunk_tree;
2605         u64 chunk_objectid;
2606         u64 chunk_offset;
2607         int ret;
2608         int slot;
2609         struct extent_buffer *l;
2610         struct btrfs_key key;
2611         struct btrfs_key found_key;
2612         struct btrfs_block_group_cache *cache;
2613         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2614
2615         path = btrfs_alloc_path();
2616         if (!path)
2617                 return -ENOMEM;
2618
2619         path->reada = 2;
2620         path->search_commit_root = 1;
2621         path->skip_locking = 1;
2622
2623         key.objectid = scrub_dev->devid;
2624         key.offset = 0ull;
2625         key.type = BTRFS_DEV_EXTENT_KEY;
2626
2627         while (1) {
2628                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2629                 if (ret < 0)
2630                         break;
2631                 if (ret > 0) {
2632                         if (path->slots[0] >=
2633                             btrfs_header_nritems(path->nodes[0])) {
2634                                 ret = btrfs_next_leaf(root, path);
2635                                 if (ret)
2636                                         break;
2637                         }
2638                 }
2639
2640                 l = path->nodes[0];
2641                 slot = path->slots[0];
2642
2643                 btrfs_item_key_to_cpu(l, &found_key, slot);
2644
2645                 if (found_key.objectid != scrub_dev->devid)
2646                         break;
2647
2648                 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
2649                         break;
2650
2651                 if (found_key.offset >= end)
2652                         break;
2653
2654                 if (found_key.offset < key.offset)
2655                         break;
2656
2657                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2658                 length = btrfs_dev_extent_length(l, dev_extent);
2659
2660                 if (found_key.offset + length <= start) {
2661                         key.offset = found_key.offset + length;
2662                         btrfs_release_path(path);
2663                         continue;
2664                 }
2665
2666                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2667                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2668                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2669
2670                 /*
2671                  * get a reference on the corresponding block group to prevent
2672                  * the chunk from going away while we scrub it
2673                  */
2674                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2675                 if (!cache) {
2676                         ret = -ENOENT;
2677                         break;
2678                 }
2679                 dev_replace->cursor_right = found_key.offset + length;
2680                 dev_replace->cursor_left = found_key.offset;
2681                 dev_replace->item_needs_writeback = 1;
2682                 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
2683                                   chunk_offset, length, found_key.offset,
2684                                   is_dev_replace);
2685
2686                 /*
2687                  * flush, submit all pending read and write bios, afterwards
2688                  * wait for them.
2689                  * Note that in the dev replace case, a read request causes
2690                  * write requests that are submitted in the read completion
2691                  * worker. Therefore in the current situation, it is required
2692                  * that all write requests are flushed, so that all read and
2693                  * write requests are really completed when bios_in_flight
2694                  * changes to 0.
2695                  */
2696                 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
2697                 scrub_submit(sctx);
2698                 mutex_lock(&sctx->wr_ctx.wr_lock);
2699                 scrub_wr_submit(sctx);
2700                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2701
2702                 wait_event(sctx->list_wait,
2703                            atomic_read(&sctx->bios_in_flight) == 0);
2704                 atomic_inc(&fs_info->scrubs_paused);
2705                 wake_up(&fs_info->scrub_pause_wait);
2706
2707                 /*
2708                  * must be called before we decrease @scrub_paused.
2709                  * make sure we don't block transaction commit while
2710                  * we are waiting pending workers finished.
2711                  */
2712                 wait_event(sctx->list_wait,
2713                            atomic_read(&sctx->workers_pending) == 0);
2714                 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
2715
2716                 mutex_lock(&fs_info->scrub_lock);
2717                 __scrub_blocked_if_needed(fs_info);
2718                 atomic_dec(&fs_info->scrubs_paused);
2719                 mutex_unlock(&fs_info->scrub_lock);
2720                 wake_up(&fs_info->scrub_pause_wait);
2721
2722                 btrfs_put_block_group(cache);
2723                 if (ret)
2724                         break;
2725                 if (is_dev_replace &&
2726                     atomic64_read(&dev_replace->num_write_errors) > 0) {
2727                         ret = -EIO;
2728                         break;
2729                 }
2730                 if (sctx->stat.malloc_errors > 0) {
2731                         ret = -ENOMEM;
2732                         break;
2733                 }
2734
2735                 dev_replace->cursor_left = dev_replace->cursor_right;
2736                 dev_replace->item_needs_writeback = 1;
2737
2738                 key.offset = found_key.offset + length;
2739                 btrfs_release_path(path);
2740         }
2741
2742         btrfs_free_path(path);
2743
2744         /*
2745          * ret can still be 1 from search_slot or next_leaf,
2746          * that's not an error
2747          */
2748         return ret < 0 ? ret : 0;
2749 }
2750
2751 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2752                                            struct btrfs_device *scrub_dev)
2753 {
2754         int     i;
2755         u64     bytenr;
2756         u64     gen;
2757         int     ret;
2758         struct btrfs_root *root = sctx->dev_root;
2759
2760         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
2761                 return -EIO;
2762
2763         gen = root->fs_info->last_trans_committed;
2764
2765         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2766                 bytenr = btrfs_sb_offset(i);
2767                 if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->total_bytes)
2768                         break;
2769
2770                 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
2771                                   scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
2772                                   NULL, 1, bytenr);
2773                 if (ret)
2774                         return ret;
2775         }
2776         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
2777
2778         return 0;
2779 }
2780
2781 /*
2782  * get a reference count on fs_info->scrub_workers. start worker if necessary
2783  */
2784 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
2785                                                 int is_dev_replace)
2786 {
2787         int ret = 0;
2788         int flags = WQ_FREEZABLE | WQ_UNBOUND;
2789         int max_active = fs_info->thread_pool_size;
2790
2791         if (fs_info->scrub_workers_refcnt == 0) {
2792                 if (is_dev_replace)
2793                         fs_info->scrub_workers =
2794                                 btrfs_alloc_workqueue("btrfs-scrub", flags,
2795                                                       1, 4);
2796                 else
2797                         fs_info->scrub_workers =
2798                                 btrfs_alloc_workqueue("btrfs-scrub", flags,
2799                                                       max_active, 4);
2800                 if (!fs_info->scrub_workers) {
2801                         ret = -ENOMEM;
2802                         goto out;
2803                 }
2804                 fs_info->scrub_wr_completion_workers =
2805                         btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
2806                                               max_active, 2);
2807                 if (!fs_info->scrub_wr_completion_workers) {
2808                         ret = -ENOMEM;
2809                         goto out;
2810                 }
2811                 fs_info->scrub_nocow_workers =
2812                         btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
2813                 if (!fs_info->scrub_nocow_workers) {
2814                         ret = -ENOMEM;
2815                         goto out;
2816                 }
2817         }
2818         ++fs_info->scrub_workers_refcnt;
2819 out:
2820         return ret;
2821 }
2822
2823 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
2824 {
2825         if (--fs_info->scrub_workers_refcnt == 0) {
2826                 btrfs_destroy_workqueue(fs_info->scrub_workers);
2827                 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
2828                 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
2829         }
2830         WARN_ON(fs_info->scrub_workers_refcnt < 0);
2831 }
2832
2833 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2834                     u64 end, struct btrfs_scrub_progress *progress,
2835                     int readonly, int is_dev_replace)
2836 {
2837         struct scrub_ctx *sctx;
2838         int ret;
2839         struct btrfs_device *dev;
2840
2841         if (btrfs_fs_closing(fs_info))
2842                 return -EINVAL;
2843
2844         /*
2845          * check some assumptions
2846          */
2847         if (fs_info->chunk_root->nodesize != fs_info->chunk_root->leafsize) {
2848                 btrfs_err(fs_info,
2849                            "scrub: size assumption nodesize == leafsize (%d == %d) fails",
2850                        fs_info->chunk_root->nodesize,
2851                        fs_info->chunk_root->leafsize);
2852                 return -EINVAL;
2853         }
2854
2855         if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
2856                 /*
2857                  * in this case scrub is unable to calculate the checksum
2858                  * the way scrub is implemented. Do not handle this
2859                  * situation at all because it won't ever happen.
2860                  */
2861                 btrfs_err(fs_info,
2862                            "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
2863                        fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
2864                 return -EINVAL;
2865         }
2866
2867         if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
2868                 /* not supported for data w/o checksums */
2869                 btrfs_err(fs_info,
2870                            "scrub: size assumption sectorsize != PAGE_SIZE "
2871                            "(%d != %lu) fails",
2872                        fs_info->chunk_root->sectorsize, PAGE_SIZE);
2873                 return -EINVAL;
2874         }
2875
2876         if (fs_info->chunk_root->nodesize >
2877             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
2878             fs_info->chunk_root->sectorsize >
2879             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
2880                 /*
2881                  * would exhaust the array bounds of pagev member in
2882                  * struct scrub_block
2883                  */
2884                 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
2885                            "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
2886                        fs_info->chunk_root->nodesize,
2887                        SCRUB_MAX_PAGES_PER_BLOCK,
2888                        fs_info->chunk_root->sectorsize,
2889                        SCRUB_MAX_PAGES_PER_BLOCK);
2890                 return -EINVAL;
2891         }
2892
2893
2894         mutex_lock(&fs_info->fs_devices->device_list_mutex);
2895         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
2896         if (!dev || (dev->missing && !is_dev_replace)) {
2897                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2898                 return -ENODEV;
2899         }
2900
2901         mutex_lock(&fs_info->scrub_lock);
2902         if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
2903                 mutex_unlock(&fs_info->scrub_lock);
2904                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2905                 return -EIO;
2906         }
2907
2908         btrfs_dev_replace_lock(&fs_info->dev_replace);
2909         if (dev->scrub_device ||
2910             (!is_dev_replace &&
2911              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2912                 btrfs_dev_replace_unlock(&fs_info->dev_replace);
2913                 mutex_unlock(&fs_info->scrub_lock);
2914                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2915                 return -EINPROGRESS;
2916         }
2917         btrfs_dev_replace_unlock(&fs_info->dev_replace);
2918
2919         ret = scrub_workers_get(fs_info, is_dev_replace);
2920         if (ret) {
2921                 mutex_unlock(&fs_info->scrub_lock);
2922                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2923                 return ret;
2924         }
2925
2926         sctx = scrub_setup_ctx(dev, is_dev_replace);
2927         if (IS_ERR(sctx)) {
2928                 mutex_unlock(&fs_info->scrub_lock);
2929                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2930                 scrub_workers_put(fs_info);
2931                 return PTR_ERR(sctx);
2932         }
2933         sctx->readonly = readonly;
2934         dev->scrub_device = sctx;
2935         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2936
2937         /*
2938          * checking @scrub_pause_req here, we can avoid
2939          * race between committing transaction and scrubbing.
2940          */
2941         __scrub_blocked_if_needed(fs_info);
2942         atomic_inc(&fs_info->scrubs_running);
2943         mutex_unlock(&fs_info->scrub_lock);
2944
2945         if (!is_dev_replace) {
2946                 /*
2947                  * by holding device list mutex, we can
2948                  * kick off writing super in log tree sync.
2949                  */
2950                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2951                 ret = scrub_supers(sctx, dev);
2952                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2953         }
2954
2955         if (!ret)
2956                 ret = scrub_enumerate_chunks(sctx, dev, start, end,
2957                                              is_dev_replace);
2958
2959         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
2960         atomic_dec(&fs_info->scrubs_running);
2961         wake_up(&fs_info->scrub_pause_wait);
2962
2963         wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
2964
2965         if (progress)
2966                 memcpy(progress, &sctx->stat, sizeof(*progress));
2967
2968         mutex_lock(&fs_info->scrub_lock);
2969         dev->scrub_device = NULL;
2970         scrub_workers_put(fs_info);
2971         mutex_unlock(&fs_info->scrub_lock);
2972
2973         scrub_free_ctx(sctx);
2974
2975         return ret;
2976 }
2977
2978 void btrfs_scrub_pause(struct btrfs_root *root)
2979 {
2980         struct btrfs_fs_info *fs_info = root->fs_info;
2981
2982         mutex_lock(&fs_info->scrub_lock);
2983         atomic_inc(&fs_info->scrub_pause_req);
2984         while (atomic_read(&fs_info->scrubs_paused) !=
2985                atomic_read(&fs_info->scrubs_running)) {
2986                 mutex_unlock(&fs_info->scrub_lock);
2987                 wait_event(fs_info->scrub_pause_wait,
2988                            atomic_read(&fs_info->scrubs_paused) ==
2989                            atomic_read(&fs_info->scrubs_running));
2990                 mutex_lock(&fs_info->scrub_lock);
2991         }
2992         mutex_unlock(&fs_info->scrub_lock);
2993 }
2994
2995 void btrfs_scrub_continue(struct btrfs_root *root)
2996 {
2997         struct btrfs_fs_info *fs_info = root->fs_info;
2998
2999         atomic_dec(&fs_info->scrub_pause_req);
3000         wake_up(&fs_info->scrub_pause_wait);
3001 }
3002
3003 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3004 {
3005         mutex_lock(&fs_info->scrub_lock);
3006         if (!atomic_read(&fs_info->scrubs_running)) {
3007                 mutex_unlock(&fs_info->scrub_lock);
3008                 return -ENOTCONN;
3009         }
3010
3011         atomic_inc(&fs_info->scrub_cancel_req);
3012         while (atomic_read(&fs_info->scrubs_running)) {
3013                 mutex_unlock(&fs_info->scrub_lock);
3014                 wait_event(fs_info->scrub_pause_wait,
3015                            atomic_read(&fs_info->scrubs_running) == 0);
3016                 mutex_lock(&fs_info->scrub_lock);
3017         }
3018         atomic_dec(&fs_info->scrub_cancel_req);
3019         mutex_unlock(&fs_info->scrub_lock);
3020
3021         return 0;
3022 }
3023
3024 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3025                            struct btrfs_device *dev)
3026 {
3027         struct scrub_ctx *sctx;
3028
3029         mutex_lock(&fs_info->scrub_lock);
3030         sctx = dev->scrub_device;
3031         if (!sctx) {
3032                 mutex_unlock(&fs_info->scrub_lock);
3033                 return -ENOTCONN;
3034         }
3035         atomic_inc(&sctx->cancel_req);
3036         while (dev->scrub_device) {
3037                 mutex_unlock(&fs_info->scrub_lock);
3038                 wait_event(fs_info->scrub_pause_wait,
3039                            dev->scrub_device == NULL);
3040                 mutex_lock(&fs_info->scrub_lock);
3041         }
3042         mutex_unlock(&fs_info->scrub_lock);
3043
3044         return 0;
3045 }
3046
3047 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
3048                          struct btrfs_scrub_progress *progress)
3049 {
3050         struct btrfs_device *dev;
3051         struct scrub_ctx *sctx = NULL;
3052
3053         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3054         dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
3055         if (dev)
3056                 sctx = dev->scrub_device;
3057         if (sctx)
3058                 memcpy(progress, &sctx->stat, sizeof(*progress));
3059         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3060
3061         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3062 }
3063
3064 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
3065                                u64 extent_logical, u64 extent_len,
3066                                u64 *extent_physical,
3067                                struct btrfs_device **extent_dev,
3068                                int *extent_mirror_num)
3069 {
3070         u64 mapped_length;
3071         struct btrfs_bio *bbio = NULL;
3072         int ret;
3073
3074         mapped_length = extent_len;
3075         ret = btrfs_map_block(fs_info, READ, extent_logical,
3076                               &mapped_length, &bbio, 0);
3077         if (ret || !bbio || mapped_length < extent_len ||
3078             !bbio->stripes[0].dev->bdev) {
3079                 kfree(bbio);
3080                 return;
3081         }
3082
3083         *extent_physical = bbio->stripes[0].physical;
3084         *extent_mirror_num = bbio->mirror_num;
3085         *extent_dev = bbio->stripes[0].dev;
3086         kfree(bbio);
3087 }
3088
3089 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
3090                               struct scrub_wr_ctx *wr_ctx,
3091                               struct btrfs_fs_info *fs_info,
3092                               struct btrfs_device *dev,
3093                               int is_dev_replace)
3094 {
3095         WARN_ON(wr_ctx->wr_curr_bio != NULL);
3096
3097         mutex_init(&wr_ctx->wr_lock);
3098         wr_ctx->wr_curr_bio = NULL;
3099         if (!is_dev_replace)
3100                 return 0;
3101
3102         WARN_ON(!dev->bdev);
3103         wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
3104                                          bio_get_nr_vecs(dev->bdev));
3105         wr_ctx->tgtdev = dev;
3106         atomic_set(&wr_ctx->flush_all_writes, 0);
3107         return 0;
3108 }
3109
3110 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
3111 {
3112         mutex_lock(&wr_ctx->wr_lock);
3113         kfree(wr_ctx->wr_curr_bio);
3114         wr_ctx->wr_curr_bio = NULL;
3115         mutex_unlock(&wr_ctx->wr_lock);
3116 }
3117
3118 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
3119                             int mirror_num, u64 physical_for_dev_replace)
3120 {
3121         struct scrub_copy_nocow_ctx *nocow_ctx;
3122         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
3123
3124         nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
3125         if (!nocow_ctx) {
3126                 spin_lock(&sctx->stat_lock);
3127                 sctx->stat.malloc_errors++;
3128                 spin_unlock(&sctx->stat_lock);
3129                 return -ENOMEM;
3130         }
3131
3132         scrub_pending_trans_workers_inc(sctx);
3133
3134         nocow_ctx->sctx = sctx;
3135         nocow_ctx->logical = logical;
3136         nocow_ctx->len = len;
3137         nocow_ctx->mirror_num = mirror_num;
3138         nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
3139         btrfs_init_work(&nocow_ctx->work, copy_nocow_pages_worker, NULL, NULL);
3140         INIT_LIST_HEAD(&nocow_ctx->inodes);
3141         btrfs_queue_work(fs_info->scrub_nocow_workers,
3142                          &nocow_ctx->work);
3143
3144         return 0;
3145 }
3146
3147 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
3148 {
3149         struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
3150         struct scrub_nocow_inode *nocow_inode;
3151
3152         nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
3153         if (!nocow_inode)
3154                 return -ENOMEM;
3155         nocow_inode->inum = inum;
3156         nocow_inode->offset = offset;
3157         nocow_inode->root = root;
3158         list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
3159         return 0;
3160 }
3161
3162 #define COPY_COMPLETE 1
3163
3164 static void copy_nocow_pages_worker(struct btrfs_work_struct *work)
3165 {
3166         struct scrub_copy_nocow_ctx *nocow_ctx =
3167                 container_of(work, struct scrub_copy_nocow_ctx, work);
3168         struct scrub_ctx *sctx = nocow_ctx->sctx;
3169         u64 logical = nocow_ctx->logical;
3170         u64 len = nocow_ctx->len;
3171         int mirror_num = nocow_ctx->mirror_num;
3172         u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3173         int ret;
3174         struct btrfs_trans_handle *trans = NULL;
3175         struct btrfs_fs_info *fs_info;
3176         struct btrfs_path *path;
3177         struct btrfs_root *root;
3178         int not_written = 0;
3179
3180         fs_info = sctx->dev_root->fs_info;
3181         root = fs_info->extent_root;
3182
3183         path = btrfs_alloc_path();
3184         if (!path) {
3185                 spin_lock(&sctx->stat_lock);
3186                 sctx->stat.malloc_errors++;
3187                 spin_unlock(&sctx->stat_lock);
3188                 not_written = 1;
3189                 goto out;
3190         }
3191
3192         trans = btrfs_join_transaction(root);
3193         if (IS_ERR(trans)) {
3194                 not_written = 1;
3195                 goto out;
3196         }
3197
3198         ret = iterate_inodes_from_logical(logical, fs_info, path,
3199                                           record_inode_for_nocow, nocow_ctx);
3200         if (ret != 0 && ret != -ENOENT) {
3201                 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
3202                         "phys %llu, len %llu, mir %u, ret %d",
3203                         logical, physical_for_dev_replace, len, mirror_num,
3204                         ret);
3205                 not_written = 1;
3206                 goto out;
3207         }
3208
3209         btrfs_end_transaction(trans, root);
3210         trans = NULL;
3211         while (!list_empty(&nocow_ctx->inodes)) {
3212                 struct scrub_nocow_inode *entry;
3213                 entry = list_first_entry(&nocow_ctx->inodes,
3214                                          struct scrub_nocow_inode,
3215                                          list);
3216                 list_del_init(&entry->list);
3217                 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
3218                                                  entry->root, nocow_ctx);
3219                 kfree(entry);
3220                 if (ret == COPY_COMPLETE) {
3221                         ret = 0;
3222                         break;
3223                 } else if (ret) {
3224                         break;
3225                 }
3226         }
3227 out:
3228         while (!list_empty(&nocow_ctx->inodes)) {
3229                 struct scrub_nocow_inode *entry;
3230                 entry = list_first_entry(&nocow_ctx->inodes,
3231                                          struct scrub_nocow_inode,
3232                                          list);
3233                 list_del_init(&entry->list);
3234                 kfree(entry);
3235         }
3236         if (trans && !IS_ERR(trans))
3237                 btrfs_end_transaction(trans, root);
3238         if (not_written)
3239                 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
3240                                             num_uncorrectable_read_errors);
3241
3242         btrfs_free_path(path);
3243         kfree(nocow_ctx);
3244
3245         scrub_pending_trans_workers_dec(sctx);
3246 }
3247
3248 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
3249                                       struct scrub_copy_nocow_ctx *nocow_ctx)
3250 {
3251         struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
3252         struct btrfs_key key;
3253         struct inode *inode;
3254         struct page *page;
3255         struct btrfs_root *local_root;
3256         struct btrfs_ordered_extent *ordered;
3257         struct extent_map *em;
3258         struct extent_state *cached_state = NULL;
3259         struct extent_io_tree *io_tree;
3260         u64 physical_for_dev_replace;
3261         u64 len = nocow_ctx->len;
3262         u64 lockstart = offset, lockend = offset + len - 1;
3263         unsigned long index;
3264         int srcu_index;
3265         int ret = 0;
3266         int err = 0;
3267
3268         key.objectid = root;
3269         key.type = BTRFS_ROOT_ITEM_KEY;
3270         key.offset = (u64)-1;
3271
3272         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
3273
3274         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
3275         if (IS_ERR(local_root)) {
3276                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
3277                 return PTR_ERR(local_root);
3278         }
3279
3280         key.type = BTRFS_INODE_ITEM_KEY;
3281         key.objectid = inum;
3282         key.offset = 0;
3283         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
3284         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
3285         if (IS_ERR(inode))
3286                 return PTR_ERR(inode);
3287
3288         /* Avoid truncate/dio/punch hole.. */
3289         mutex_lock(&inode->i_mutex);
3290         inode_dio_wait(inode);
3291
3292         physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3293         io_tree = &BTRFS_I(inode)->io_tree;
3294
3295         lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
3296         ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
3297         if (ordered) {
3298                 btrfs_put_ordered_extent(ordered);
3299                 goto out_unlock;
3300         }
3301
3302         em = btrfs_get_extent(inode, NULL, 0, lockstart, len, 0);
3303         if (IS_ERR(em)) {
3304                 ret = PTR_ERR(em);
3305                 goto out_unlock;
3306         }
3307
3308         /*
3309          * This extent does not actually cover the logical extent anymore,
3310          * move on to the next inode.
3311          */
3312         if (em->block_start > nocow_ctx->logical ||
3313             em->block_start + em->block_len < nocow_ctx->logical + len) {
3314                 free_extent_map(em);
3315                 goto out_unlock;
3316         }
3317         free_extent_map(em);
3318
3319         while (len >= PAGE_CACHE_SIZE) {
3320                 index = offset >> PAGE_CACHE_SHIFT;
3321 again:
3322                 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
3323                 if (!page) {
3324                         btrfs_err(fs_info, "find_or_create_page() failed");
3325                         ret = -ENOMEM;
3326                         goto out;
3327                 }
3328
3329                 if (PageUptodate(page)) {
3330                         if (PageDirty(page))
3331                                 goto next_page;
3332                 } else {
3333                         ClearPageError(page);
3334                         err = extent_read_full_page_nolock(io_tree, page,
3335                                                            btrfs_get_extent,
3336                                                            nocow_ctx->mirror_num);
3337                         if (err) {
3338                                 ret = err;
3339                                 goto next_page;
3340                         }
3341
3342                         lock_page(page);
3343                         /*
3344                          * If the page has been remove from the page cache,
3345                          * the data on it is meaningless, because it may be
3346                          * old one, the new data may be written into the new
3347                          * page in the page cache.
3348                          */
3349                         if (page->mapping != inode->i_mapping) {
3350                                 unlock_page(page);
3351                                 page_cache_release(page);
3352                                 goto again;
3353                         }
3354                         if (!PageUptodate(page)) {
3355                                 ret = -EIO;
3356                                 goto next_page;
3357                         }
3358                 }
3359                 err = write_page_nocow(nocow_ctx->sctx,
3360                                        physical_for_dev_replace, page);
3361                 if (err)
3362                         ret = err;
3363 next_page:
3364                 unlock_page(page);
3365                 page_cache_release(page);
3366
3367                 if (ret)
3368                         break;
3369
3370                 offset += PAGE_CACHE_SIZE;
3371                 physical_for_dev_replace += PAGE_CACHE_SIZE;
3372                 len -= PAGE_CACHE_SIZE;
3373         }
3374         ret = COPY_COMPLETE;
3375 out_unlock:
3376         unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
3377                              GFP_NOFS);
3378 out:
3379         mutex_unlock(&inode->i_mutex);
3380         iput(inode);
3381         return ret;
3382 }
3383
3384 static int write_page_nocow(struct scrub_ctx *sctx,
3385                             u64 physical_for_dev_replace, struct page *page)
3386 {
3387         struct bio *bio;
3388         struct btrfs_device *dev;
3389         int ret;
3390
3391         dev = sctx->wr_ctx.tgtdev;
3392         if (!dev)
3393                 return -EIO;
3394         if (!dev->bdev) {
3395                 printk_ratelimited(KERN_WARNING
3396                         "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
3397                 return -EIO;
3398         }
3399         bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
3400         if (!bio) {
3401                 spin_lock(&sctx->stat_lock);
3402                 sctx->stat.malloc_errors++;
3403                 spin_unlock(&sctx->stat_lock);
3404                 return -ENOMEM;
3405         }
3406         bio->bi_size = 0;
3407         bio->bi_sector = physical_for_dev_replace >> 9;
3408         bio->bi_bdev = dev->bdev;
3409         ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
3410         if (ret != PAGE_CACHE_SIZE) {
3411 leave_with_eio:
3412                 bio_put(bio);
3413                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
3414                 return -EIO;
3415         }
3416
3417         if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
3418                 goto leave_with_eio;
3419
3420         bio_put(bio);
3421         return 0;
3422 }