4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/slab.h>
27 #include <linux/crc32c.h>
28 #include <linux/drbd.h>
29 #include <linux/drbd_limits.h>
30 #include <linux/dynamic_debug.h>
34 enum al_transaction_types {
36 AL_TR_INITIALIZED = 0xffff
38 /* all fields on disc in big endian */
39 struct __packed al_transaction_on_disk {
40 /* don't we all like magic */
43 /* to identify the most recent transaction block
44 * in the on disk ring buffer */
47 /* checksum on the full 4k block, with this field set to 0. */
50 /* type of transaction, special transaction types like:
51 * purge-all, set-all-idle, set-all-active, ... to-be-defined
52 * see also enum al_transaction_types */
53 __be16 transaction_type;
55 /* we currently allow only a few thousand extents,
56 * so 16bit will be enough for the slot number. */
58 /* how many updates in this transaction */
61 /* maximum slot number, "al-extents" in drbd.conf speak.
62 * Having this in each transaction should make reconfiguration
63 * of that parameter easier. */
66 /* slot number the context starts with */
67 __be16 context_start_slot_nr;
69 /* Some reserved bytes. Expected usage is a 64bit counter of
70 * sectors-written since device creation, and other data generation tag
74 /* --- 36 byte used --- */
76 /* Reserve space for up to AL_UPDATES_PER_TRANSACTION changes
77 * in one transaction, then use the remaining byte in the 4k block for
78 * context information. "Flexible" number of updates per transaction
79 * does not help, as we have to account for the case when all update
80 * slots are used anyways, so it would only complicate code without
83 __be16 update_slot_nr[AL_UPDATES_PER_TRANSACTION];
85 /* but the extent number is 32bit, which at an extent size of 4 MiB
86 * allows to cover device sizes of up to 2**54 Byte (16 PiB) */
87 __be32 update_extent_nr[AL_UPDATES_PER_TRANSACTION];
89 /* --- 420 bytes used (36 + 64*6) --- */
91 /* 4096 - 420 = 3676 = 919 * 4 */
92 __be32 context[AL_CONTEXT_PER_TRANSACTION];
95 void *drbd_md_get_buffer(struct drbd_device *device, const char *intent)
99 wait_event(device->misc_wait,
100 (r = atomic_cmpxchg(&device->md_io.in_use, 0, 1)) == 0 ||
101 device->state.disk <= D_FAILED);
106 device->md_io.current_use = intent;
107 device->md_io.start_jif = jiffies;
108 device->md_io.submit_jif = device->md_io.start_jif - 1;
109 return page_address(device->md_io.page);
112 void drbd_md_put_buffer(struct drbd_device *device)
114 if (atomic_dec_and_test(&device->md_io.in_use))
115 wake_up(&device->misc_wait);
118 void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev,
124 dt = rcu_dereference(bdev->disk_conf)->disk_timeout;
128 dt = MAX_SCHEDULE_TIMEOUT;
130 dt = wait_event_timeout(device->misc_wait,
131 *done || test_bit(FORCE_DETACH, &device->flags), dt);
133 drbd_err(device, "meta-data IO operation timed out\n");
134 drbd_chk_io_error(device, 1, DRBD_FORCE_DETACH);
138 static int _drbd_md_sync_page_io(struct drbd_device *device,
139 struct drbd_backing_dev *bdev,
140 sector_t sector, int op)
143 /* we do all our meta data IO in aligned 4k blocks. */
144 const int size = 4096;
145 int err, op_flags = 0;
147 device->md_io.done = 0;
148 device->md_io.error = -ENODEV;
150 if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags))
151 op_flags |= REQ_FUA | REQ_PREFLUSH;
152 op_flags |= REQ_SYNC | REQ_NOIDLE;
154 bio = bio_alloc_drbd(GFP_NOIO);
155 bio->bi_bdev = bdev->md_bdev;
156 bio->bi_iter.bi_sector = sector;
158 if (bio_add_page(bio, device->md_io.page, size, 0) != size)
160 bio->bi_private = device;
161 bio->bi_end_io = drbd_md_endio;
162 bio_set_op_attrs(bio, op, op_flags);
164 if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL)
165 /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
167 else if (!get_ldev_if_state(device, D_ATTACHING)) {
168 /* Corresponding put_ldev in drbd_md_endio() */
169 drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
174 bio_get(bio); /* one bio_put() is in the completion handler */
175 atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
176 device->md_io.submit_jif = jiffies;
177 if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
181 wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
183 err = device->md_io.error;
190 int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
191 sector_t sector, int op)
194 D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
196 BUG_ON(!bdev->md_bdev);
198 dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
199 current->comm, current->pid, __func__,
200 (unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ",
203 if (sector < drbd_md_first_sector(bdev) ||
204 sector + 7 > drbd_md_last_sector(bdev))
205 drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
206 current->comm, current->pid, __func__,
207 (unsigned long long)sector,
208 (op == REQ_OP_WRITE) ? "WRITE" : "READ");
210 err = _drbd_md_sync_page_io(device, bdev, sector, op);
212 drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
213 (unsigned long long)sector,
214 (op == REQ_OP_WRITE) ? "WRITE" : "READ", err);
219 static struct bm_extent *find_active_resync_extent(struct drbd_device *device, unsigned int enr)
221 struct lc_element *tmp;
222 tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
223 if (unlikely(tmp != NULL)) {
224 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
225 if (test_bit(BME_NO_WRITES, &bm_ext->flags))
231 static struct lc_element *_al_get(struct drbd_device *device, unsigned int enr, bool nonblock)
233 struct lc_element *al_ext;
234 struct bm_extent *bm_ext;
237 spin_lock_irq(&device->al_lock);
238 bm_ext = find_active_resync_extent(device, enr);
240 wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
241 spin_unlock_irq(&device->al_lock);
243 wake_up(&device->al_wait);
247 al_ext = lc_try_get(device->act_log, enr);
249 al_ext = lc_get(device->act_log, enr);
250 spin_unlock_irq(&device->al_lock);
254 bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i)
256 /* for bios crossing activity log extent boundaries,
257 * we may need to activate two extents in one go */
258 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
259 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
261 D_ASSERT(device, first <= last);
262 D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
264 /* FIXME figure out a fast path for bios crossing AL extent boundaries */
268 return _al_get(device, first, true);
271 bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i)
273 /* for bios crossing activity log extent boundaries,
274 * we may need to activate two extents in one go */
275 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
276 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
278 bool need_transaction = false;
280 D_ASSERT(device, first <= last);
281 D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
283 for (enr = first; enr <= last; enr++) {
284 struct lc_element *al_ext;
285 wait_event(device->al_wait,
286 (al_ext = _al_get(device, enr, false)) != NULL);
287 if (al_ext->lc_number != enr)
288 need_transaction = true;
290 return need_transaction;
293 #if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
294 /* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT
295 * are still coupled, or assume too much about their relation.
296 * Code below will not work if this is violated.
297 * Will be cleaned up with some followup patch.
302 static unsigned int al_extent_to_bm_page(unsigned int al_enr)
307 /* al extent number to bit */
308 (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT));
311 static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *device)
313 const unsigned int stripes = device->ldev->md.al_stripes;
314 const unsigned int stripe_size_4kB = device->ldev->md.al_stripe_size_4k;
316 /* transaction number, modulo on-disk ring buffer wrap around */
317 unsigned int t = device->al_tr_number % (device->ldev->md.al_size_4k);
319 /* ... to aligned 4k on disk block */
320 t = ((t % stripes) * stripe_size_4kB) + t/stripes;
322 /* ... to 512 byte sector in activity log */
325 /* ... plus offset to the on disk position */
326 return device->ldev->md.md_offset + device->ldev->md.al_offset + t;
329 static int __al_write_transaction(struct drbd_device *device, struct al_transaction_on_disk *buffer)
331 struct lc_element *e;
338 memset(buffer, 0, sizeof(*buffer));
339 buffer->magic = cpu_to_be32(DRBD_AL_MAGIC);
340 buffer->tr_number = cpu_to_be32(device->al_tr_number);
344 drbd_bm_reset_al_hints(device);
346 /* Even though no one can start to change this list
347 * once we set the LC_LOCKED -- from drbd_al_begin_io(),
348 * lc_try_lock_for_transaction() --, someone may still
349 * be in the process of changing it. */
350 spin_lock_irq(&device->al_lock);
351 list_for_each_entry(e, &device->act_log->to_be_changed, list) {
352 if (i == AL_UPDATES_PER_TRANSACTION) {
356 buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
357 buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
358 if (e->lc_number != LC_FREE)
359 drbd_bm_mark_for_writeout(device,
360 al_extent_to_bm_page(e->lc_number));
363 spin_unlock_irq(&device->al_lock);
364 BUG_ON(i > AL_UPDATES_PER_TRANSACTION);
366 buffer->n_updates = cpu_to_be16(i);
367 for ( ; i < AL_UPDATES_PER_TRANSACTION; i++) {
368 buffer->update_slot_nr[i] = cpu_to_be16(-1);
369 buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE);
372 buffer->context_size = cpu_to_be16(device->act_log->nr_elements);
373 buffer->context_start_slot_nr = cpu_to_be16(device->al_tr_cycle);
375 mx = min_t(int, AL_CONTEXT_PER_TRANSACTION,
376 device->act_log->nr_elements - device->al_tr_cycle);
377 for (i = 0; i < mx; i++) {
378 unsigned idx = device->al_tr_cycle + i;
379 extent_nr = lc_element_by_index(device->act_log, idx)->lc_number;
380 buffer->context[i] = cpu_to_be32(extent_nr);
382 for (; i < AL_CONTEXT_PER_TRANSACTION; i++)
383 buffer->context[i] = cpu_to_be32(LC_FREE);
385 device->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
386 if (device->al_tr_cycle >= device->act_log->nr_elements)
387 device->al_tr_cycle = 0;
389 sector = al_tr_number_to_on_disk_sector(device);
391 crc = crc32c(0, buffer, 4096);
392 buffer->crc32c = cpu_to_be32(crc);
394 if (drbd_bm_write_hinted(device))
397 bool write_al_updates;
399 write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
401 if (write_al_updates) {
402 if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
404 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
406 device->al_tr_number++;
407 device->al_writ_cnt++;
415 static int al_write_transaction(struct drbd_device *device)
417 struct al_transaction_on_disk *buffer;
420 if (!get_ldev(device)) {
421 drbd_err(device, "disk is %s, cannot start al transaction\n",
422 drbd_disk_str(device->state.disk));
426 /* The bitmap write may have failed, causing a state change. */
427 if (device->state.disk < D_INCONSISTENT) {
429 "disk is %s, cannot write al transaction\n",
430 drbd_disk_str(device->state.disk));
435 /* protects md_io_buffer, al_tr_cycle, ... */
436 buffer = drbd_md_get_buffer(device, __func__);
438 drbd_err(device, "disk failed while waiting for md_io buffer\n");
443 err = __al_write_transaction(device, buffer);
445 drbd_md_put_buffer(device);
452 void drbd_al_begin_io_commit(struct drbd_device *device)
456 /* Serialize multiple transactions.
457 * This uses test_and_set_bit, memory barrier is implicit.
459 wait_event(device->al_wait,
460 device->act_log->pending_changes == 0 ||
461 (locked = lc_try_lock_for_transaction(device->act_log)));
464 /* Double check: it may have been committed by someone else,
465 * while we have been waiting for the lock. */
466 if (device->act_log->pending_changes) {
467 bool write_al_updates;
470 write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
473 if (write_al_updates)
474 al_write_transaction(device);
475 spin_lock_irq(&device->al_lock);
478 we need an "lc_cancel" here;
480 lc_committed(device->act_log);
481 spin_unlock_irq(&device->al_lock);
483 lc_unlock(device->act_log);
484 wake_up(&device->al_wait);
489 * @delegate: delegate activity log I/O to the worker thread
491 void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i)
493 if (drbd_al_begin_io_prepare(device, i))
494 drbd_al_begin_io_commit(device);
497 int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i)
499 struct lru_cache *al = device->act_log;
500 /* for bios crossing activity log extent boundaries,
501 * we may need to activate two extents in one go */
502 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
503 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
504 unsigned nr_al_extents;
505 unsigned available_update_slots;
508 D_ASSERT(device, first <= last);
510 nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */
511 available_update_slots = min(al->nr_elements - al->used,
512 al->max_pending_changes - al->pending_changes);
514 /* We want all necessary updates for a given request within the same transaction
515 * We could first check how many updates are *actually* needed,
516 * and use that instead of the worst-case nr_al_extents */
517 if (available_update_slots < nr_al_extents) {
518 /* Too many activity log extents are currently "hot".
520 * If we have accumulated pending changes already,
523 * If we cannot get even a single pending change through,
524 * stop the fast path until we made some progress,
525 * or requests to "cold" extents could be starved. */
526 if (!al->pending_changes)
527 __set_bit(__LC_STARVING, &device->act_log->flags);
531 /* Is resync active in this area? */
532 for (enr = first; enr <= last; enr++) {
533 struct lc_element *tmp;
534 tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
535 if (unlikely(tmp != NULL)) {
536 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
537 if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
538 if (!test_and_set_bit(BME_PRIORITY, &bm_ext->flags))
545 /* Checkout the refcounts.
546 * Given that we checked for available elements and update slots above,
547 * this has to be successful. */
548 for (enr = first; enr <= last; enr++) {
549 struct lc_element *al_ext;
550 al_ext = lc_get_cumulative(device->act_log, enr);
552 drbd_info(device, "LOGIC BUG for enr=%u\n", enr);
557 void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i)
559 /* for bios crossing activity log extent boundaries,
560 * we may need to activate two extents in one go */
561 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
562 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
564 struct lc_element *extent;
567 D_ASSERT(device, first <= last);
568 spin_lock_irqsave(&device->al_lock, flags);
570 for (enr = first; enr <= last; enr++) {
571 extent = lc_find(device->act_log, enr);
573 drbd_err(device, "al_complete_io() called on inactive extent %u\n", enr);
576 lc_put(device->act_log, extent);
578 spin_unlock_irqrestore(&device->al_lock, flags);
579 wake_up(&device->al_wait);
582 static int _try_lc_del(struct drbd_device *device, struct lc_element *al_ext)
586 spin_lock_irq(&device->al_lock);
587 rv = (al_ext->refcnt == 0);
589 lc_del(device->act_log, al_ext);
590 spin_unlock_irq(&device->al_lock);
596 * drbd_al_shrink() - Removes all active extents form the activity log
597 * @device: DRBD device.
599 * Removes all active extents form the activity log, waiting until
600 * the reference count of each entry dropped to 0 first, of course.
602 * You need to lock device->act_log with lc_try_lock() / lc_unlock()
604 void drbd_al_shrink(struct drbd_device *device)
606 struct lc_element *al_ext;
609 D_ASSERT(device, test_bit(__LC_LOCKED, &device->act_log->flags));
611 for (i = 0; i < device->act_log->nr_elements; i++) {
612 al_ext = lc_element_by_index(device->act_log, i);
613 if (al_ext->lc_number == LC_FREE)
615 wait_event(device->al_wait, _try_lc_del(device, al_ext));
618 wake_up(&device->al_wait);
621 int drbd_al_initialize(struct drbd_device *device, void *buffer)
623 struct al_transaction_on_disk *al = buffer;
624 struct drbd_md *md = &device->ldev->md;
625 int al_size_4k = md->al_stripes * md->al_stripe_size_4k;
628 __al_write_transaction(device, al);
629 /* There may or may not have been a pending transaction. */
630 spin_lock_irq(&device->al_lock);
631 lc_committed(device->act_log);
632 spin_unlock_irq(&device->al_lock);
634 /* The rest of the transactions will have an empty "updates" list, and
635 * are written out only to provide the context, and to initialize the
636 * on-disk ring buffer. */
637 for (i = 1; i < al_size_4k; i++) {
638 int err = __al_write_transaction(device, al);
645 static const char *drbd_change_sync_fname[] = {
646 [RECORD_RS_FAILED] = "drbd_rs_failed_io",
647 [SET_IN_SYNC] = "drbd_set_in_sync",
648 [SET_OUT_OF_SYNC] = "drbd_set_out_of_sync"
651 /* ATTENTION. The AL's extents are 4MB each, while the extents in the
652 * resync LRU-cache are 16MB each.
653 * The caller of this function has to hold an get_ldev() reference.
655 * Adjusts the caching members ->rs_left (success) or ->rs_failed (!success),
656 * potentially pulling in (and recounting the corresponding bits)
657 * this resync extent into the resync extent lru cache.
659 * Returns whether all bits have been cleared for this resync extent,
660 * precisely: (rs_left <= rs_failed)
662 * TODO will be obsoleted once we have a caching lru of the on disk bitmap
664 static bool update_rs_extent(struct drbd_device *device,
665 unsigned int enr, int count,
666 enum update_sync_bits_mode mode)
668 struct lc_element *e;
670 D_ASSERT(device, atomic_read(&device->local_cnt));
672 /* When setting out-of-sync bits,
673 * we don't need it cached (lc_find).
674 * But if it is present in the cache,
675 * we should update the cached bit count.
676 * Otherwise, that extent should be in the resync extent lru cache
677 * already -- or we want to pull it in if necessary -- (lc_get),
678 * then update and check rs_left and rs_failed. */
679 if (mode == SET_OUT_OF_SYNC)
680 e = lc_find(device->resync, enr);
682 e = lc_get(device->resync, enr);
684 struct bm_extent *ext = lc_entry(e, struct bm_extent, lce);
685 if (ext->lce.lc_number == enr) {
686 if (mode == SET_IN_SYNC)
687 ext->rs_left -= count;
688 else if (mode == SET_OUT_OF_SYNC)
689 ext->rs_left += count;
691 ext->rs_failed += count;
692 if (ext->rs_left < ext->rs_failed) {
693 drbd_warn(device, "BAD! enr=%u rs_left=%d "
694 "rs_failed=%d count=%d cstate=%s\n",
695 ext->lce.lc_number, ext->rs_left,
696 ext->rs_failed, count,
697 drbd_conn_str(device->state.conn));
699 /* We don't expect to be able to clear more bits
700 * than have been set when we originally counted
701 * the set bits to cache that value in ext->rs_left.
702 * Whatever the reason (disconnect during resync,
703 * delayed local completion of an application write),
704 * try to fix it up by recounting here. */
705 ext->rs_left = drbd_bm_e_weight(device, enr);
708 /* Normally this element should be in the cache,
709 * since drbd_rs_begin_io() pulled it already in.
711 * But maybe an application write finished, and we set
712 * something outside the resync lru_cache in sync.
714 int rs_left = drbd_bm_e_weight(device, enr);
715 if (ext->flags != 0) {
716 drbd_warn(device, "changing resync lce: %d[%u;%02lx]"
718 ext->lce.lc_number, ext->rs_left,
719 ext->flags, enr, rs_left);
722 if (ext->rs_failed) {
723 drbd_warn(device, "Kicking resync_lru element enr=%u "
724 "out with rs_failed=%d\n",
725 ext->lce.lc_number, ext->rs_failed);
727 ext->rs_left = rs_left;
728 ext->rs_failed = (mode == RECORD_RS_FAILED) ? count : 0;
729 /* we don't keep a persistent log of the resync lru,
730 * we can commit any change right away. */
731 lc_committed(device->resync);
733 if (mode != SET_OUT_OF_SYNC)
734 lc_put(device->resync, &ext->lce);
735 /* no race, we are within the al_lock! */
737 if (ext->rs_left <= ext->rs_failed) {
741 } else if (mode != SET_OUT_OF_SYNC) {
742 /* be quiet if lc_find() did not find it. */
743 drbd_err(device, "lc_get() failed! locked=%d/%d flags=%lu\n",
744 device->resync_locked,
745 device->resync->nr_elements,
746 device->resync->flags);
751 void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go)
753 unsigned long now = jiffies;
754 unsigned long last = device->rs_mark_time[device->rs_last_mark];
755 int next = (device->rs_last_mark + 1) % DRBD_SYNC_MARKS;
756 if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
757 if (device->rs_mark_left[device->rs_last_mark] != still_to_go &&
758 device->state.conn != C_PAUSED_SYNC_T &&
759 device->state.conn != C_PAUSED_SYNC_S) {
760 device->rs_mark_time[next] = now;
761 device->rs_mark_left[next] = still_to_go;
762 device->rs_last_mark = next;
767 /* It is called lazy update, so don't do write-out too often. */
768 static bool lazy_bitmap_update_due(struct drbd_device *device)
770 return time_after(jiffies, device->rs_last_bcast + 2*HZ);
773 static void maybe_schedule_on_disk_bitmap_update(struct drbd_device *device, bool rs_done)
776 struct drbd_connection *connection = first_peer_device(device)->connection;
777 if (connection->agreed_pro_version <= 95 ||
778 is_sync_target_state(device->state.conn))
779 set_bit(RS_DONE, &device->flags);
780 /* and also set RS_PROGRESS below */
782 /* Else: rather wait for explicit notification via receive_state,
783 * to avoid uuids-rotated-too-fast causing full resync
784 * in next handshake, in case the replication link breaks
785 * at the most unfortunate time... */
786 } else if (!lazy_bitmap_update_due(device))
789 drbd_device_post_work(device, RS_PROGRESS);
792 static int update_sync_bits(struct drbd_device *device,
793 unsigned long sbnr, unsigned long ebnr,
794 enum update_sync_bits_mode mode)
797 * We keep a count of set bits per resync-extent in the ->rs_left
798 * caching member, so we need to loop and work within the resync extent
799 * alignment. Typically this loop will execute exactly once.
802 unsigned long count = 0;
803 unsigned int cleared = 0;
804 while (sbnr <= ebnr) {
805 /* set temporary boundary bit number to last bit number within
806 * the resync extent of the current start bit number,
807 * but cap at provided end bit number */
808 unsigned long tbnr = min(ebnr, sbnr | BM_BLOCKS_PER_BM_EXT_MASK);
811 if (mode == RECORD_RS_FAILED)
812 /* Only called from drbd_rs_failed_io(), bits
813 * supposedly still set. Recount, maybe some
814 * of the bits have been successfully cleared
815 * by application IO meanwhile.
817 c = drbd_bm_count_bits(device, sbnr, tbnr);
818 else if (mode == SET_IN_SYNC)
819 c = drbd_bm_clear_bits(device, sbnr, tbnr);
820 else /* if (mode == SET_OUT_OF_SYNC) */
821 c = drbd_bm_set_bits(device, sbnr, tbnr);
824 spin_lock_irqsave(&device->al_lock, flags);
825 cleared += update_rs_extent(device, BM_BIT_TO_EXT(sbnr), c, mode);
826 spin_unlock_irqrestore(&device->al_lock, flags);
832 if (mode == SET_IN_SYNC) {
833 unsigned long still_to_go = drbd_bm_total_weight(device);
834 bool rs_is_done = (still_to_go <= device->rs_failed);
835 drbd_advance_rs_marks(device, still_to_go);
836 if (cleared || rs_is_done)
837 maybe_schedule_on_disk_bitmap_update(device, rs_is_done);
838 } else if (mode == RECORD_RS_FAILED)
839 device->rs_failed += count;
840 wake_up(&device->al_wait);
845 static bool plausible_request_size(int size)
848 && size <= DRBD_MAX_BATCH_BIO_SIZE
849 && IS_ALIGNED(size, 512);
852 /* clear the bit corresponding to the piece of storage in question:
853 * size byte of data starting from sector. Only clear a bits of the affected
854 * one ore more _aligned_ BM_BLOCK_SIZE blocks.
856 * called by worker on C_SYNC_TARGET and receiver on SyncSource.
859 int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
860 enum update_sync_bits_mode mode)
862 /* Is called from worker and receiver context _only_ */
863 unsigned long sbnr, ebnr, lbnr;
864 unsigned long count = 0;
865 sector_t esector, nr_sectors;
867 /* This would be an empty REQ_PREFLUSH, be silent. */
868 if ((mode == SET_OUT_OF_SYNC) && size == 0)
871 if (!plausible_request_size(size)) {
872 drbd_err(device, "%s: sector=%llus size=%d nonsense!\n",
873 drbd_change_sync_fname[mode],
874 (unsigned long long)sector, size);
878 if (!get_ldev(device))
879 return 0; /* no disk, no metadata, no bitmap to manipulate bits in */
881 nr_sectors = drbd_get_capacity(device->this_bdev);
882 esector = sector + (size >> 9) - 1;
884 if (!expect(sector < nr_sectors))
886 if (!expect(esector < nr_sectors))
887 esector = nr_sectors - 1;
889 lbnr = BM_SECT_TO_BIT(nr_sectors-1);
891 if (mode == SET_IN_SYNC) {
892 /* Round up start sector, round down end sector. We make sure
893 * we only clear full, aligned, BM_BLOCK_SIZE blocks. */
894 if (unlikely(esector < BM_SECT_PER_BIT-1))
896 if (unlikely(esector == (nr_sectors-1)))
899 ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
900 sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
902 /* We set it out of sync, or record resync failure.
903 * Should not round anything here. */
904 sbnr = BM_SECT_TO_BIT(sector);
905 ebnr = BM_SECT_TO_BIT(esector);
908 count = update_sync_bits(device, sbnr, ebnr, mode);
915 struct bm_extent *_bme_get(struct drbd_device *device, unsigned int enr)
917 struct lc_element *e;
918 struct bm_extent *bm_ext;
920 unsigned long rs_flags;
922 spin_lock_irq(&device->al_lock);
923 if (device->resync_locked > device->resync->nr_elements/2) {
924 spin_unlock_irq(&device->al_lock);
927 e = lc_get(device->resync, enr);
928 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
930 if (bm_ext->lce.lc_number != enr) {
931 bm_ext->rs_left = drbd_bm_e_weight(device, enr);
932 bm_ext->rs_failed = 0;
933 lc_committed(device->resync);
936 if (bm_ext->lce.refcnt == 1)
937 device->resync_locked++;
938 set_bit(BME_NO_WRITES, &bm_ext->flags);
940 rs_flags = device->resync->flags;
941 spin_unlock_irq(&device->al_lock);
943 wake_up(&device->al_wait);
946 if (rs_flags & LC_STARVING)
947 drbd_warn(device, "Have to wait for element"
948 " (resync LRU too small?)\n");
949 BUG_ON(rs_flags & LC_LOCKED);
955 static int _is_in_al(struct drbd_device *device, unsigned int enr)
959 spin_lock_irq(&device->al_lock);
960 rv = lc_is_used(device->act_log, enr);
961 spin_unlock_irq(&device->al_lock);
967 * drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED
968 * @device: DRBD device.
969 * @sector: The sector number.
971 * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
973 int drbd_rs_begin_io(struct drbd_device *device, sector_t sector)
975 unsigned int enr = BM_SECT_TO_EXT(sector);
976 struct bm_extent *bm_ext;
981 sig = wait_event_interruptible(device->al_wait,
982 (bm_ext = _bme_get(device, enr)));
986 if (test_bit(BME_LOCKED, &bm_ext->flags))
989 /* step aside only while we are above c-min-rate; unless disabled. */
990 sa = drbd_rs_c_min_rate_throttle(device);
992 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
993 sig = wait_event_interruptible(device->al_wait,
994 !_is_in_al(device, enr * AL_EXT_PER_BM_SECT + i) ||
995 (sa && test_bit(BME_PRIORITY, &bm_ext->flags)));
997 if (sig || (sa && test_bit(BME_PRIORITY, &bm_ext->flags))) {
998 spin_lock_irq(&device->al_lock);
999 if (lc_put(device->resync, &bm_ext->lce) == 0) {
1000 bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
1001 device->resync_locked--;
1002 wake_up(&device->al_wait);
1004 spin_unlock_irq(&device->al_lock);
1007 if (schedule_timeout_interruptible(HZ/10))
1012 set_bit(BME_LOCKED, &bm_ext->flags);
1017 * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep
1018 * @device: DRBD device.
1019 * @sector: The sector number.
1021 * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then
1022 * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
1023 * if there is still application IO going on in this area.
1025 int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
1027 unsigned int enr = BM_SECT_TO_EXT(sector);
1028 const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
1029 struct lc_element *e;
1030 struct bm_extent *bm_ext;
1032 bool throttle = drbd_rs_should_slow_down(device, sector, true);
1034 /* If we need to throttle, a half-locked (only marked BME_NO_WRITES,
1035 * not yet BME_LOCKED) extent needs to be kicked out explicitly if we
1036 * need to throttle. There is at most one such half-locked extent,
1037 * which is remembered in resync_wenr. */
1039 if (throttle && device->resync_wenr != enr)
1042 spin_lock_irq(&device->al_lock);
1043 if (device->resync_wenr != LC_FREE && device->resync_wenr != enr) {
1044 /* in case you have very heavy scattered io, it may
1045 * stall the syncer undefined if we give up the ref count
1046 * when we try again and requeue.
1048 * if we don't give up the refcount, but the next time
1049 * we are scheduled this extent has been "synced" by new
1050 * application writes, we'd miss the lc_put on the
1051 * extent we keep the refcount on.
1052 * so we remembered which extent we had to try again, and
1053 * if the next requested one is something else, we do
1054 * the lc_put here...
1055 * we also have to wake_up
1057 e = lc_find(device->resync, device->resync_wenr);
1058 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1060 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1061 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1062 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1063 device->resync_wenr = LC_FREE;
1064 if (lc_put(device->resync, &bm_ext->lce) == 0) {
1066 device->resync_locked--;
1068 wake_up(&device->al_wait);
1070 drbd_alert(device, "LOGIC BUG\n");
1074 e = lc_try_get(device->resync, enr);
1075 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1077 if (test_bit(BME_LOCKED, &bm_ext->flags))
1079 if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) {
1080 device->resync_locked++;
1082 /* we did set the BME_NO_WRITES,
1083 * but then could not set BME_LOCKED,
1084 * so we tried again.
1085 * drop the extra reference. */
1086 bm_ext->lce.refcnt--;
1087 D_ASSERT(device, bm_ext->lce.refcnt > 0);
1091 /* do we rather want to try later? */
1092 if (device->resync_locked > device->resync->nr_elements-3)
1094 /* Do or do not. There is no try. -- Yoda */
1095 e = lc_get(device->resync, enr);
1096 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1098 const unsigned long rs_flags = device->resync->flags;
1099 if (rs_flags & LC_STARVING)
1100 drbd_warn(device, "Have to wait for element"
1101 " (resync LRU too small?)\n");
1102 BUG_ON(rs_flags & LC_LOCKED);
1105 if (bm_ext->lce.lc_number != enr) {
1106 bm_ext->rs_left = drbd_bm_e_weight(device, enr);
1107 bm_ext->rs_failed = 0;
1108 lc_committed(device->resync);
1109 wake_up(&device->al_wait);
1110 D_ASSERT(device, test_bit(BME_LOCKED, &bm_ext->flags) == 0);
1112 set_bit(BME_NO_WRITES, &bm_ext->flags);
1113 D_ASSERT(device, bm_ext->lce.refcnt == 1);
1114 device->resync_locked++;
1118 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
1119 if (lc_is_used(device->act_log, al_enr+i))
1122 set_bit(BME_LOCKED, &bm_ext->flags);
1124 device->resync_wenr = LC_FREE;
1125 spin_unlock_irq(&device->al_lock);
1131 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1132 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1133 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1134 device->resync_wenr = LC_FREE;
1135 if (lc_put(device->resync, &bm_ext->lce) == 0) {
1137 device->resync_locked--;
1139 wake_up(&device->al_wait);
1141 device->resync_wenr = enr;
1143 spin_unlock_irq(&device->al_lock);
1147 void drbd_rs_complete_io(struct drbd_device *device, sector_t sector)
1149 unsigned int enr = BM_SECT_TO_EXT(sector);
1150 struct lc_element *e;
1151 struct bm_extent *bm_ext;
1152 unsigned long flags;
1154 spin_lock_irqsave(&device->al_lock, flags);
1155 e = lc_find(device->resync, enr);
1156 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1158 spin_unlock_irqrestore(&device->al_lock, flags);
1159 if (__ratelimit(&drbd_ratelimit_state))
1160 drbd_err(device, "drbd_rs_complete_io() called, but extent not found\n");
1164 if (bm_ext->lce.refcnt == 0) {
1165 spin_unlock_irqrestore(&device->al_lock, flags);
1166 drbd_err(device, "drbd_rs_complete_io(,%llu [=%u]) called, "
1167 "but refcnt is 0!?\n",
1168 (unsigned long long)sector, enr);
1172 if (lc_put(device->resync, &bm_ext->lce) == 0) {
1173 bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */
1174 device->resync_locked--;
1175 wake_up(&device->al_wait);
1178 spin_unlock_irqrestore(&device->al_lock, flags);
1182 * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED)
1183 * @device: DRBD device.
1185 void drbd_rs_cancel_all(struct drbd_device *device)
1187 spin_lock_irq(&device->al_lock);
1189 if (get_ldev_if_state(device, D_FAILED)) { /* Makes sure ->resync is there. */
1190 lc_reset(device->resync);
1193 device->resync_locked = 0;
1194 device->resync_wenr = LC_FREE;
1195 spin_unlock_irq(&device->al_lock);
1196 wake_up(&device->al_wait);
1200 * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU
1201 * @device: DRBD device.
1203 * Returns 0 upon success, -EAGAIN if at least one reference count was
1206 int drbd_rs_del_all(struct drbd_device *device)
1208 struct lc_element *e;
1209 struct bm_extent *bm_ext;
1212 spin_lock_irq(&device->al_lock);
1214 if (get_ldev_if_state(device, D_FAILED)) {
1215 /* ok, ->resync is there. */
1216 for (i = 0; i < device->resync->nr_elements; i++) {
1217 e = lc_element_by_index(device->resync, i);
1218 bm_ext = lc_entry(e, struct bm_extent, lce);
1219 if (bm_ext->lce.lc_number == LC_FREE)
1221 if (bm_ext->lce.lc_number == device->resync_wenr) {
1222 drbd_info(device, "dropping %u in drbd_rs_del_all, apparently"
1223 " got 'synced' by application io\n",
1224 device->resync_wenr);
1225 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1226 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1227 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1228 device->resync_wenr = LC_FREE;
1229 lc_put(device->resync, &bm_ext->lce);
1231 if (bm_ext->lce.refcnt != 0) {
1232 drbd_info(device, "Retrying drbd_rs_del_all() later. "
1233 "refcnt=%d\n", bm_ext->lce.refcnt);
1235 spin_unlock_irq(&device->al_lock);
1238 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1239 D_ASSERT(device, !test_bit(BME_NO_WRITES, &bm_ext->flags));
1240 lc_del(device->resync, &bm_ext->lce);
1242 D_ASSERT(device, device->resync->used == 0);
1245 spin_unlock_irq(&device->al_lock);
1246 wake_up(&device->al_wait);