drbd: move bitmap write from resync_finished to after_state_change
[cascardo/linux.git] / drivers / block / drbd / drbd_main.c
1 /*
2    drbd.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27  */
28
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
33 #include <net/sock.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
36 #include <linux/fs.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
40 #include <linux/mm.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
48
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
52
53 #include <linux/drbd_limits.h>
54 #include "drbd_int.h"
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57 #include "drbd_vli.h"
58
59 struct after_state_chg_work {
60         struct drbd_work w;
61         union drbd_state os;
62         union drbd_state ns;
63         enum chg_state_flags flags;
64         struct completion *done;
65 };
66
67 static DEFINE_MUTEX(drbd_main_mutex);
68 int drbdd_init(struct drbd_thread *);
69 int drbd_worker(struct drbd_thread *);
70 int drbd_asender(struct drbd_thread *);
71
72 int drbd_init(void);
73 static int drbd_open(struct block_device *bdev, fmode_t mode);
74 static int drbd_release(struct gendisk *gd, fmode_t mode);
75 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77                            union drbd_state ns, enum chg_state_flags flags);
78 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79 static void md_sync_timer_fn(unsigned long data);
80 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
82
83 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84               "Lars Ellenberg <lars@linbit.com>");
85 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86 MODULE_VERSION(REL_VERSION);
87 MODULE_LICENSE("GPL");
88 MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)");
89 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
90
91 #include <linux/moduleparam.h>
92 /* allow_open_on_secondary */
93 MODULE_PARM_DESC(allow_oos, "DONT USE!");
94 /* thanks to these macros, if compiled into the kernel (not-module),
95  * this becomes the boot parameter drbd.minor_count */
96 module_param(minor_count, uint, 0444);
97 module_param(disable_sendpage, bool, 0644);
98 module_param(allow_oos, bool, 0);
99 module_param(cn_idx, uint, 0444);
100 module_param(proc_details, int, 0644);
101
102 #ifdef CONFIG_DRBD_FAULT_INJECTION
103 int enable_faults;
104 int fault_rate;
105 static int fault_count;
106 int fault_devs;
107 /* bitmap of enabled faults */
108 module_param(enable_faults, int, 0664);
109 /* fault rate % value - applies to all enabled faults */
110 module_param(fault_rate, int, 0664);
111 /* count of faults inserted */
112 module_param(fault_count, int, 0664);
113 /* bitmap of devices to insert faults on */
114 module_param(fault_devs, int, 0644);
115 #endif
116
117 /* module parameter, defined */
118 unsigned int minor_count = 32;
119 int disable_sendpage;
120 int allow_oos;
121 unsigned int cn_idx = CN_IDX_DRBD;
122 int proc_details;       /* Detail level in proc drbd*/
123
124 /* Module parameter for setting the user mode helper program
125  * to run. Default is /sbin/drbdadm */
126 char usermode_helper[80] = "/sbin/drbdadm";
127
128 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
129
130 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
131  * as member "struct gendisk *vdisk;"
132  */
133 struct drbd_conf **minor_table;
134
135 struct kmem_cache *drbd_request_cache;
136 struct kmem_cache *drbd_ee_cache;       /* epoch entries */
137 struct kmem_cache *drbd_bm_ext_cache;   /* bitmap extents */
138 struct kmem_cache *drbd_al_ext_cache;   /* activity log extents */
139 mempool_t *drbd_request_mempool;
140 mempool_t *drbd_ee_mempool;
141
142 /* I do not use a standard mempool, because:
143    1) I want to hand out the pre-allocated objects first.
144    2) I want to be able to interrupt sleeping allocation with a signal.
145    Note: This is a single linked list, the next pointer is the private
146          member of struct page.
147  */
148 struct page *drbd_pp_pool;
149 spinlock_t   drbd_pp_lock;
150 int          drbd_pp_vacant;
151 wait_queue_head_t drbd_pp_wait;
152
153 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
154
155 static const struct block_device_operations drbd_ops = {
156         .owner =   THIS_MODULE,
157         .open =    drbd_open,
158         .release = drbd_release,
159 };
160
161 #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
162
163 #ifdef __CHECKER__
164 /* When checking with sparse, and this is an inline function, sparse will
165    give tons of false positives. When this is a real functions sparse works.
166  */
167 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
168 {
169         int io_allowed;
170
171         atomic_inc(&mdev->local_cnt);
172         io_allowed = (mdev->state.disk >= mins);
173         if (!io_allowed) {
174                 if (atomic_dec_and_test(&mdev->local_cnt))
175                         wake_up(&mdev->misc_wait);
176         }
177         return io_allowed;
178 }
179
180 #endif
181
182 /**
183  * DOC: The transfer log
184  *
185  * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
186  * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
187  * of the list. There is always at least one &struct drbd_tl_epoch object.
188  *
189  * Each &struct drbd_tl_epoch has a circular double linked list of requests
190  * attached.
191  */
192 static int tl_init(struct drbd_conf *mdev)
193 {
194         struct drbd_tl_epoch *b;
195
196         /* during device minor initialization, we may well use GFP_KERNEL */
197         b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
198         if (!b)
199                 return 0;
200         INIT_LIST_HEAD(&b->requests);
201         INIT_LIST_HEAD(&b->w.list);
202         b->next = NULL;
203         b->br_number = 4711;
204         b->n_writes = 0;
205         b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
206
207         mdev->oldest_tle = b;
208         mdev->newest_tle = b;
209         INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
210
211         mdev->tl_hash = NULL;
212         mdev->tl_hash_s = 0;
213
214         return 1;
215 }
216
217 static void tl_cleanup(struct drbd_conf *mdev)
218 {
219         D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
220         D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
221         kfree(mdev->oldest_tle);
222         mdev->oldest_tle = NULL;
223         kfree(mdev->unused_spare_tle);
224         mdev->unused_spare_tle = NULL;
225         kfree(mdev->tl_hash);
226         mdev->tl_hash = NULL;
227         mdev->tl_hash_s = 0;
228 }
229
230 /**
231  * _tl_add_barrier() - Adds a barrier to the transfer log
232  * @mdev:       DRBD device.
233  * @new:        Barrier to be added before the current head of the TL.
234  *
235  * The caller must hold the req_lock.
236  */
237 void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
238 {
239         struct drbd_tl_epoch *newest_before;
240
241         INIT_LIST_HEAD(&new->requests);
242         INIT_LIST_HEAD(&new->w.list);
243         new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
244         new->next = NULL;
245         new->n_writes = 0;
246
247         newest_before = mdev->newest_tle;
248         /* never send a barrier number == 0, because that is special-cased
249          * when using TCQ for our write ordering code */
250         new->br_number = (newest_before->br_number+1) ?: 1;
251         if (mdev->newest_tle != new) {
252                 mdev->newest_tle->next = new;
253                 mdev->newest_tle = new;
254         }
255 }
256
257 /**
258  * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
259  * @mdev:       DRBD device.
260  * @barrier_nr: Expected identifier of the DRBD write barrier packet.
261  * @set_size:   Expected number of requests before that barrier.
262  *
263  * In case the passed barrier_nr or set_size does not match the oldest
264  * &struct drbd_tl_epoch objects this function will cause a termination
265  * of the connection.
266  */
267 void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
268                        unsigned int set_size)
269 {
270         struct drbd_tl_epoch *b, *nob; /* next old barrier */
271         struct list_head *le, *tle;
272         struct drbd_request *r;
273
274         spin_lock_irq(&mdev->req_lock);
275
276         b = mdev->oldest_tle;
277
278         /* first some paranoia code */
279         if (b == NULL) {
280                 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
281                         barrier_nr);
282                 goto bail;
283         }
284         if (b->br_number != barrier_nr) {
285                 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
286                         barrier_nr, b->br_number);
287                 goto bail;
288         }
289         if (b->n_writes != set_size) {
290                 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
291                         barrier_nr, set_size, b->n_writes);
292                 goto bail;
293         }
294
295         /* Clean up list of requests processed during current epoch */
296         list_for_each_safe(le, tle, &b->requests) {
297                 r = list_entry(le, struct drbd_request, tl_requests);
298                 _req_mod(r, barrier_acked);
299         }
300         /* There could be requests on the list waiting for completion
301            of the write to the local disk. To avoid corruptions of
302            slab's data structures we have to remove the lists head.
303
304            Also there could have been a barrier ack out of sequence, overtaking
305            the write acks - which would be a bug and violating write ordering.
306            To not deadlock in case we lose connection while such requests are
307            still pending, we need some way to find them for the
308            _req_mode(connection_lost_while_pending).
309
310            These have been list_move'd to the out_of_sequence_requests list in
311            _req_mod(, barrier_acked) above.
312            */
313         list_del_init(&b->requests);
314
315         nob = b->next;
316         if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
317                 _tl_add_barrier(mdev, b);
318                 if (nob)
319                         mdev->oldest_tle = nob;
320                 /* if nob == NULL b was the only barrier, and becomes the new
321                    barrier. Therefore mdev->oldest_tle points already to b */
322         } else {
323                 D_ASSERT(nob != NULL);
324                 mdev->oldest_tle = nob;
325                 kfree(b);
326         }
327
328         spin_unlock_irq(&mdev->req_lock);
329         dec_ap_pending(mdev);
330
331         return;
332
333 bail:
334         spin_unlock_irq(&mdev->req_lock);
335         drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
336 }
337
338 /**
339  * _tl_restart() - Walks the transfer log, and applies an action to all requests
340  * @mdev:       DRBD device.
341  * @what:       The action/event to perform with all request objects
342  *
343  * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
344  * restart_frozen_disk_io.
345  */
346 static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
347 {
348         struct drbd_tl_epoch *b, *tmp, **pn;
349         struct list_head *le, *tle, carry_reads;
350         struct drbd_request *req;
351         int rv, n_writes, n_reads;
352
353         b = mdev->oldest_tle;
354         pn = &mdev->oldest_tle;
355         while (b) {
356                 n_writes = 0;
357                 n_reads = 0;
358                 INIT_LIST_HEAD(&carry_reads);
359                 list_for_each_safe(le, tle, &b->requests) {
360                         req = list_entry(le, struct drbd_request, tl_requests);
361                         rv = _req_mod(req, what);
362
363                         n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
364                         n_reads  += (rv & MR_READ) >> MR_READ_SHIFT;
365                 }
366                 tmp = b->next;
367
368                 if (n_writes) {
369                         if (what == resend) {
370                                 b->n_writes = n_writes;
371                                 if (b->w.cb == NULL) {
372                                         b->w.cb = w_send_barrier;
373                                         inc_ap_pending(mdev);
374                                         set_bit(CREATE_BARRIER, &mdev->flags);
375                                 }
376
377                                 drbd_queue_work(&mdev->data.work, &b->w);
378                         }
379                         pn = &b->next;
380                 } else {
381                         if (n_reads)
382                                 list_add(&carry_reads, &b->requests);
383                         /* there could still be requests on that ring list,
384                          * in case local io is still pending */
385                         list_del(&b->requests);
386
387                         /* dec_ap_pending corresponding to queue_barrier.
388                          * the newest barrier may not have been queued yet,
389                          * in which case w.cb is still NULL. */
390                         if (b->w.cb != NULL)
391                                 dec_ap_pending(mdev);
392
393                         if (b == mdev->newest_tle) {
394                                 /* recycle, but reinit! */
395                                 D_ASSERT(tmp == NULL);
396                                 INIT_LIST_HEAD(&b->requests);
397                                 list_splice(&carry_reads, &b->requests);
398                                 INIT_LIST_HEAD(&b->w.list);
399                                 b->w.cb = NULL;
400                                 b->br_number = net_random();
401                                 b->n_writes = 0;
402
403                                 *pn = b;
404                                 break;
405                         }
406                         *pn = tmp;
407                         kfree(b);
408                 }
409                 b = tmp;
410                 list_splice(&carry_reads, &b->requests);
411         }
412 }
413
414
415 /**
416  * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
417  * @mdev:       DRBD device.
418  *
419  * This is called after the connection to the peer was lost. The storage covered
420  * by the requests on the transfer gets marked as our of sync. Called from the
421  * receiver thread and the worker thread.
422  */
423 void tl_clear(struct drbd_conf *mdev)
424 {
425         struct list_head *le, *tle;
426         struct drbd_request *r;
427
428         spin_lock_irq(&mdev->req_lock);
429
430         _tl_restart(mdev, connection_lost_while_pending);
431
432         /* we expect this list to be empty. */
433         D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
434
435         /* but just in case, clean it up anyways! */
436         list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
437                 r = list_entry(le, struct drbd_request, tl_requests);
438                 /* It would be nice to complete outside of spinlock.
439                  * But this is easier for now. */
440                 _req_mod(r, connection_lost_while_pending);
441         }
442
443         /* ensure bit indicating barrier is required is clear */
444         clear_bit(CREATE_BARRIER, &mdev->flags);
445
446         memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
447
448         spin_unlock_irq(&mdev->req_lock);
449 }
450
451 void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
452 {
453         spin_lock_irq(&mdev->req_lock);
454         _tl_restart(mdev, what);
455         spin_unlock_irq(&mdev->req_lock);
456 }
457
458 /**
459  * cl_wide_st_chg() - true if the state change is a cluster wide one
460  * @mdev:       DRBD device.
461  * @os:         old (current) state.
462  * @ns:         new (wanted) state.
463  */
464 static int cl_wide_st_chg(struct drbd_conf *mdev,
465                           union drbd_state os, union drbd_state ns)
466 {
467         return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
468                  ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
469                   (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
470                   (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
471                   (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
472                 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
473                 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
474 }
475
476 enum drbd_state_rv
477 drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
478                   union drbd_state mask, union drbd_state val)
479 {
480         unsigned long flags;
481         union drbd_state os, ns;
482         enum drbd_state_rv rv;
483
484         spin_lock_irqsave(&mdev->req_lock, flags);
485         os = mdev->state;
486         ns.i = (os.i & ~mask.i) | val.i;
487         rv = _drbd_set_state(mdev, ns, f, NULL);
488         ns = mdev->state;
489         spin_unlock_irqrestore(&mdev->req_lock, flags);
490
491         return rv;
492 }
493
494 /**
495  * drbd_force_state() - Impose a change which happens outside our control on our state
496  * @mdev:       DRBD device.
497  * @mask:       mask of state bits to change.
498  * @val:        value of new state bits.
499  */
500 void drbd_force_state(struct drbd_conf *mdev,
501         union drbd_state mask, union drbd_state val)
502 {
503         drbd_change_state(mdev, CS_HARD, mask, val);
504 }
505
506 static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
507 static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
508                                                     union drbd_state,
509                                                     union drbd_state);
510 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
511                                        union drbd_state ns, const char **warn_sync_abort);
512 int drbd_send_state_req(struct drbd_conf *,
513                         union drbd_state, union drbd_state);
514
515 static enum drbd_state_rv
516 _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
517              union drbd_state val)
518 {
519         union drbd_state os, ns;
520         unsigned long flags;
521         enum drbd_state_rv rv;
522
523         if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
524                 return SS_CW_SUCCESS;
525
526         if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
527                 return SS_CW_FAILED_BY_PEER;
528
529         rv = 0;
530         spin_lock_irqsave(&mdev->req_lock, flags);
531         os = mdev->state;
532         ns.i = (os.i & ~mask.i) | val.i;
533         ns = sanitize_state(mdev, os, ns, NULL);
534
535         if (!cl_wide_st_chg(mdev, os, ns))
536                 rv = SS_CW_NO_NEED;
537         if (!rv) {
538                 rv = is_valid_state(mdev, ns);
539                 if (rv == SS_SUCCESS) {
540                         rv = is_valid_state_transition(mdev, ns, os);
541                         if (rv == SS_SUCCESS)
542                                 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
543                 }
544         }
545         spin_unlock_irqrestore(&mdev->req_lock, flags);
546
547         return rv;
548 }
549
550 /**
551  * drbd_req_state() - Perform an eventually cluster wide state change
552  * @mdev:       DRBD device.
553  * @mask:       mask of state bits to change.
554  * @val:        value of new state bits.
555  * @f:          flags
556  *
557  * Should not be called directly, use drbd_request_state() or
558  * _drbd_request_state().
559  */
560 static enum drbd_state_rv
561 drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
562                union drbd_state val, enum chg_state_flags f)
563 {
564         struct completion done;
565         unsigned long flags;
566         union drbd_state os, ns;
567         enum drbd_state_rv rv;
568
569         init_completion(&done);
570
571         if (f & CS_SERIALIZE)
572                 mutex_lock(&mdev->state_mutex);
573
574         spin_lock_irqsave(&mdev->req_lock, flags);
575         os = mdev->state;
576         ns.i = (os.i & ~mask.i) | val.i;
577         ns = sanitize_state(mdev, os, ns, NULL);
578
579         if (cl_wide_st_chg(mdev, os, ns)) {
580                 rv = is_valid_state(mdev, ns);
581                 if (rv == SS_SUCCESS)
582                         rv = is_valid_state_transition(mdev, ns, os);
583                 spin_unlock_irqrestore(&mdev->req_lock, flags);
584
585                 if (rv < SS_SUCCESS) {
586                         if (f & CS_VERBOSE)
587                                 print_st_err(mdev, os, ns, rv);
588                         goto abort;
589                 }
590
591                 drbd_state_lock(mdev);
592                 if (!drbd_send_state_req(mdev, mask, val)) {
593                         drbd_state_unlock(mdev);
594                         rv = SS_CW_FAILED_BY_PEER;
595                         if (f & CS_VERBOSE)
596                                 print_st_err(mdev, os, ns, rv);
597                         goto abort;
598                 }
599
600                 wait_event(mdev->state_wait,
601                         (rv = _req_st_cond(mdev, mask, val)));
602
603                 if (rv < SS_SUCCESS) {
604                         drbd_state_unlock(mdev);
605                         if (f & CS_VERBOSE)
606                                 print_st_err(mdev, os, ns, rv);
607                         goto abort;
608                 }
609                 spin_lock_irqsave(&mdev->req_lock, flags);
610                 os = mdev->state;
611                 ns.i = (os.i & ~mask.i) | val.i;
612                 rv = _drbd_set_state(mdev, ns, f, &done);
613                 drbd_state_unlock(mdev);
614         } else {
615                 rv = _drbd_set_state(mdev, ns, f, &done);
616         }
617
618         spin_unlock_irqrestore(&mdev->req_lock, flags);
619
620         if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
621                 D_ASSERT(current != mdev->worker.task);
622                 wait_for_completion(&done);
623         }
624
625 abort:
626         if (f & CS_SERIALIZE)
627                 mutex_unlock(&mdev->state_mutex);
628
629         return rv;
630 }
631
632 /**
633  * _drbd_request_state() - Request a state change (with flags)
634  * @mdev:       DRBD device.
635  * @mask:       mask of state bits to change.
636  * @val:        value of new state bits.
637  * @f:          flags
638  *
639  * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
640  * flag, or when logging of failed state change requests is not desired.
641  */
642 enum drbd_state_rv
643 _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
644                     union drbd_state val, enum chg_state_flags f)
645 {
646         enum drbd_state_rv rv;
647
648         wait_event(mdev->state_wait,
649                    (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
650
651         return rv;
652 }
653
654 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
655 {
656         dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
657             name,
658             drbd_conn_str(ns.conn),
659             drbd_role_str(ns.role),
660             drbd_role_str(ns.peer),
661             drbd_disk_str(ns.disk),
662             drbd_disk_str(ns.pdsk),
663             is_susp(ns) ? 's' : 'r',
664             ns.aftr_isp ? 'a' : '-',
665             ns.peer_isp ? 'p' : '-',
666             ns.user_isp ? 'u' : '-'
667             );
668 }
669
670 void print_st_err(struct drbd_conf *mdev, union drbd_state os,
671                   union drbd_state ns, enum drbd_state_rv err)
672 {
673         if (err == SS_IN_TRANSIENT_STATE)
674                 return;
675         dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
676         print_st(mdev, " state", os);
677         print_st(mdev, "wanted", ns);
678 }
679
680
681 /**
682  * is_valid_state() - Returns an SS_ error code if ns is not valid
683  * @mdev:       DRBD device.
684  * @ns:         State to consider.
685  */
686 static enum drbd_state_rv
687 is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
688 {
689         /* See drbd_state_sw_errors in drbd_strings.c */
690
691         enum drbd_fencing_p fp;
692         enum drbd_state_rv rv = SS_SUCCESS;
693
694         fp = FP_DONT_CARE;
695         if (get_ldev(mdev)) {
696                 fp = mdev->ldev->dc.fencing;
697                 put_ldev(mdev);
698         }
699
700         if (get_net_conf(mdev)) {
701                 if (!mdev->net_conf->two_primaries &&
702                     ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
703                         rv = SS_TWO_PRIMARIES;
704                 put_net_conf(mdev);
705         }
706
707         if (rv <= 0)
708                 /* already found a reason to abort */;
709         else if (ns.role == R_SECONDARY && mdev->open_cnt)
710                 rv = SS_DEVICE_IN_USE;
711
712         else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
713                 rv = SS_NO_UP_TO_DATE_DISK;
714
715         else if (fp >= FP_RESOURCE &&
716                  ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
717                 rv = SS_PRIMARY_NOP;
718
719         else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
720                 rv = SS_NO_UP_TO_DATE_DISK;
721
722         else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
723                 rv = SS_NO_LOCAL_DISK;
724
725         else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
726                 rv = SS_NO_REMOTE_DISK;
727
728         else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
729                 rv = SS_NO_UP_TO_DATE_DISK;
730
731         else if ((ns.conn == C_CONNECTED ||
732                   ns.conn == C_WF_BITMAP_S ||
733                   ns.conn == C_SYNC_SOURCE ||
734                   ns.conn == C_PAUSED_SYNC_S) &&
735                   ns.disk == D_OUTDATED)
736                 rv = SS_CONNECTED_OUTDATES;
737
738         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
739                  (mdev->sync_conf.verify_alg[0] == 0))
740                 rv = SS_NO_VERIFY_ALG;
741
742         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
743                   mdev->agreed_pro_version < 88)
744                 rv = SS_NOT_SUPPORTED;
745
746         return rv;
747 }
748
749 /**
750  * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
751  * @mdev:       DRBD device.
752  * @ns:         new state.
753  * @os:         old state.
754  */
755 static enum drbd_state_rv
756 is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
757                           union drbd_state os)
758 {
759         enum drbd_state_rv rv = SS_SUCCESS;
760
761         if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
762             os.conn > C_CONNECTED)
763                 rv = SS_RESYNC_RUNNING;
764
765         if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
766                 rv = SS_ALREADY_STANDALONE;
767
768         if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
769                 rv = SS_IS_DISKLESS;
770
771         if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
772                 rv = SS_NO_NET_CONFIG;
773
774         if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
775                 rv = SS_LOWER_THAN_OUTDATED;
776
777         if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
778                 rv = SS_IN_TRANSIENT_STATE;
779
780         if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
781                 rv = SS_IN_TRANSIENT_STATE;
782
783         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
784                 rv = SS_NEED_CONNECTION;
785
786         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
787             ns.conn != os.conn && os.conn > C_CONNECTED)
788                 rv = SS_RESYNC_RUNNING;
789
790         if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
791             os.conn < C_CONNECTED)
792                 rv = SS_NEED_CONNECTION;
793
794         if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
795             && os.conn < C_WF_REPORT_PARAMS)
796                 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
797
798         return rv;
799 }
800
801 /**
802  * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
803  * @mdev:       DRBD device.
804  * @os:         old state.
805  * @ns:         new state.
806  * @warn_sync_abort:
807  *
808  * When we loose connection, we have to set the state of the peers disk (pdsk)
809  * to D_UNKNOWN. This rule and many more along those lines are in this function.
810  */
811 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
812                                        union drbd_state ns, const char **warn_sync_abort)
813 {
814         enum drbd_fencing_p fp;
815         enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
816
817         fp = FP_DONT_CARE;
818         if (get_ldev(mdev)) {
819                 fp = mdev->ldev->dc.fencing;
820                 put_ldev(mdev);
821         }
822
823         /* Disallow Network errors to configure a device's network part */
824         if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
825             os.conn <= C_DISCONNECTING)
826                 ns.conn = os.conn;
827
828         /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
829          * If you try to go into some Sync* state, that shall fail (elsewhere). */
830         if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
831             ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
832                 ns.conn = os.conn;
833
834         /* we cannot fail (again) if we already detached */
835         if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
836                 ns.disk = D_DISKLESS;
837
838         /* if we are only D_ATTACHING yet,
839          * we can (and should) go directly to D_DISKLESS. */
840         if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
841                 ns.disk = D_DISKLESS;
842
843         /* After C_DISCONNECTING only C_STANDALONE may follow */
844         if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
845                 ns.conn = os.conn;
846
847         if (ns.conn < C_CONNECTED) {
848                 ns.peer_isp = 0;
849                 ns.peer = R_UNKNOWN;
850                 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
851                         ns.pdsk = D_UNKNOWN;
852         }
853
854         /* Clear the aftr_isp when becoming unconfigured */
855         if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
856                 ns.aftr_isp = 0;
857
858         /* Abort resync if a disk fails/detaches */
859         if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
860             (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
861                 if (warn_sync_abort)
862                         *warn_sync_abort =
863                                 os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
864                                 "Online-verify" : "Resync";
865                 ns.conn = C_CONNECTED;
866         }
867
868         /* Connection breaks down before we finished "Negotiating" */
869         if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
870             get_ldev_if_state(mdev, D_NEGOTIATING)) {
871                 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
872                         ns.disk = mdev->new_state_tmp.disk;
873                         ns.pdsk = mdev->new_state_tmp.pdsk;
874                 } else {
875                         dev_alert(DEV, "Connection lost while negotiating, no data!\n");
876                         ns.disk = D_DISKLESS;
877                         ns.pdsk = D_UNKNOWN;
878                 }
879                 put_ldev(mdev);
880         }
881
882         /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
883         if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
884                 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
885                         ns.disk = D_UP_TO_DATE;
886                 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
887                         ns.pdsk = D_UP_TO_DATE;
888         }
889
890         /* Implications of the connection stat on the disk states */
891         disk_min = D_DISKLESS;
892         disk_max = D_UP_TO_DATE;
893         pdsk_min = D_INCONSISTENT;
894         pdsk_max = D_UNKNOWN;
895         switch ((enum drbd_conns)ns.conn) {
896         case C_WF_BITMAP_T:
897         case C_PAUSED_SYNC_T:
898         case C_STARTING_SYNC_T:
899         case C_WF_SYNC_UUID:
900         case C_BEHIND:
901                 disk_min = D_INCONSISTENT;
902                 disk_max = D_OUTDATED;
903                 pdsk_min = D_UP_TO_DATE;
904                 pdsk_max = D_UP_TO_DATE;
905                 break;
906         case C_VERIFY_S:
907         case C_VERIFY_T:
908                 disk_min = D_UP_TO_DATE;
909                 disk_max = D_UP_TO_DATE;
910                 pdsk_min = D_UP_TO_DATE;
911                 pdsk_max = D_UP_TO_DATE;
912                 break;
913         case C_CONNECTED:
914                 disk_min = D_DISKLESS;
915                 disk_max = D_UP_TO_DATE;
916                 pdsk_min = D_DISKLESS;
917                 pdsk_max = D_UP_TO_DATE;
918                 break;
919         case C_WF_BITMAP_S:
920         case C_PAUSED_SYNC_S:
921         case C_STARTING_SYNC_S:
922         case C_AHEAD:
923                 disk_min = D_UP_TO_DATE;
924                 disk_max = D_UP_TO_DATE;
925                 pdsk_min = D_INCONSISTENT;
926                 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
927                 break;
928         case C_SYNC_TARGET:
929                 disk_min = D_INCONSISTENT;
930                 disk_max = D_INCONSISTENT;
931                 pdsk_min = D_UP_TO_DATE;
932                 pdsk_max = D_UP_TO_DATE;
933                 break;
934         case C_SYNC_SOURCE:
935                 disk_min = D_UP_TO_DATE;
936                 disk_max = D_UP_TO_DATE;
937                 pdsk_min = D_INCONSISTENT;
938                 pdsk_max = D_INCONSISTENT;
939                 break;
940         case C_STANDALONE:
941         case C_DISCONNECTING:
942         case C_UNCONNECTED:
943         case C_TIMEOUT:
944         case C_BROKEN_PIPE:
945         case C_NETWORK_FAILURE:
946         case C_PROTOCOL_ERROR:
947         case C_TEAR_DOWN:
948         case C_WF_CONNECTION:
949         case C_WF_REPORT_PARAMS:
950         case C_MASK:
951                 break;
952         }
953         if (ns.disk > disk_max)
954                 ns.disk = disk_max;
955
956         if (ns.disk < disk_min) {
957                 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
958                          drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
959                 ns.disk = disk_min;
960         }
961         if (ns.pdsk > pdsk_max)
962                 ns.pdsk = pdsk_max;
963
964         if (ns.pdsk < pdsk_min) {
965                 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
966                          drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
967                 ns.pdsk = pdsk_min;
968         }
969
970         if (fp == FP_STONITH &&
971             (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
972             !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
973                 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
974
975         if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
976             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
977             !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
978                 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
979
980         if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
981                 if (ns.conn == C_SYNC_SOURCE)
982                         ns.conn = C_PAUSED_SYNC_S;
983                 if (ns.conn == C_SYNC_TARGET)
984                         ns.conn = C_PAUSED_SYNC_T;
985         } else {
986                 if (ns.conn == C_PAUSED_SYNC_S)
987                         ns.conn = C_SYNC_SOURCE;
988                 if (ns.conn == C_PAUSED_SYNC_T)
989                         ns.conn = C_SYNC_TARGET;
990         }
991
992         return ns;
993 }
994
995 /* helper for __drbd_set_state */
996 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
997 {
998         if (mdev->agreed_pro_version < 90)
999                 mdev->ov_start_sector = 0;
1000         mdev->rs_total = drbd_bm_bits(mdev);
1001         mdev->ov_position = 0;
1002         if (cs == C_VERIFY_T) {
1003                 /* starting online verify from an arbitrary position
1004                  * does not fit well into the existing protocol.
1005                  * on C_VERIFY_T, we initialize ov_left and friends
1006                  * implicitly in receive_DataRequest once the
1007                  * first P_OV_REQUEST is received */
1008                 mdev->ov_start_sector = ~(sector_t)0;
1009         } else {
1010                 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
1011                 if (bit >= mdev->rs_total) {
1012                         mdev->ov_start_sector =
1013                                 BM_BIT_TO_SECT(mdev->rs_total - 1);
1014                         mdev->rs_total = 1;
1015                 } else
1016                         mdev->rs_total -= bit;
1017                 mdev->ov_position = mdev->ov_start_sector;
1018         }
1019         mdev->ov_left = mdev->rs_total;
1020 }
1021
1022 static void drbd_resume_al(struct drbd_conf *mdev)
1023 {
1024         if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1025                 dev_info(DEV, "Resumed AL updates\n");
1026 }
1027
1028 /**
1029  * __drbd_set_state() - Set a new DRBD state
1030  * @mdev:       DRBD device.
1031  * @ns:         new state.
1032  * @flags:      Flags
1033  * @done:       Optional completion, that will get completed after the after_state_ch() finished
1034  *
1035  * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1036  */
1037 enum drbd_state_rv
1038 __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1039                  enum chg_state_flags flags, struct completion *done)
1040 {
1041         union drbd_state os;
1042         enum drbd_state_rv rv = SS_SUCCESS;
1043         const char *warn_sync_abort = NULL;
1044         struct after_state_chg_work *ascw;
1045
1046         os = mdev->state;
1047
1048         ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
1049
1050         if (ns.i == os.i)
1051                 return SS_NOTHING_TO_DO;
1052
1053         if (!(flags & CS_HARD)) {
1054                 /*  pre-state-change checks ; only look at ns  */
1055                 /* See drbd_state_sw_errors in drbd_strings.c */
1056
1057                 rv = is_valid_state(mdev, ns);
1058                 if (rv < SS_SUCCESS) {
1059                         /* If the old state was illegal as well, then let
1060                            this happen...*/
1061
1062                         if (is_valid_state(mdev, os) == rv)
1063                                 rv = is_valid_state_transition(mdev, ns, os);
1064                 } else
1065                         rv = is_valid_state_transition(mdev, ns, os);
1066         }
1067
1068         if (rv < SS_SUCCESS) {
1069                 if (flags & CS_VERBOSE)
1070                         print_st_err(mdev, os, ns, rv);
1071                 return rv;
1072         }
1073
1074         if (warn_sync_abort)
1075                 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
1076
1077         {
1078         char *pbp, pb[300];
1079         pbp = pb;
1080         *pbp = 0;
1081         if (ns.role != os.role)
1082                 pbp += sprintf(pbp, "role( %s -> %s ) ",
1083                                drbd_role_str(os.role),
1084                                drbd_role_str(ns.role));
1085         if (ns.peer != os.peer)
1086                 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1087                                drbd_role_str(os.peer),
1088                                drbd_role_str(ns.peer));
1089         if (ns.conn != os.conn)
1090                 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1091                                drbd_conn_str(os.conn),
1092                                drbd_conn_str(ns.conn));
1093         if (ns.disk != os.disk)
1094                 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1095                                drbd_disk_str(os.disk),
1096                                drbd_disk_str(ns.disk));
1097         if (ns.pdsk != os.pdsk)
1098                 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1099                                drbd_disk_str(os.pdsk),
1100                                drbd_disk_str(ns.pdsk));
1101         if (is_susp(ns) != is_susp(os))
1102                 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1103                                is_susp(os),
1104                                is_susp(ns));
1105         if (ns.aftr_isp != os.aftr_isp)
1106                 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1107                                os.aftr_isp,
1108                                ns.aftr_isp);
1109         if (ns.peer_isp != os.peer_isp)
1110                 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1111                                os.peer_isp,
1112                                ns.peer_isp);
1113         if (ns.user_isp != os.user_isp)
1114                 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1115                                os.user_isp,
1116                                ns.user_isp);
1117         dev_info(DEV, "%s\n", pb);
1118         }
1119
1120         /* solve the race between becoming unconfigured,
1121          * worker doing the cleanup, and
1122          * admin reconfiguring us:
1123          * on (re)configure, first set CONFIG_PENDING,
1124          * then wait for a potentially exiting worker,
1125          * start the worker, and schedule one no_op.
1126          * then proceed with configuration.
1127          */
1128         if (ns.disk == D_DISKLESS &&
1129             ns.conn == C_STANDALONE &&
1130             ns.role == R_SECONDARY &&
1131             !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1132                 set_bit(DEVICE_DYING, &mdev->flags);
1133
1134         /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1135          * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1136          * drbd_ldev_destroy() won't happen before our corresponding
1137          * after_state_ch works run, where we put_ldev again. */
1138         if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1139             (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1140                 atomic_inc(&mdev->local_cnt);
1141
1142         mdev->state = ns;
1143         wake_up(&mdev->misc_wait);
1144         wake_up(&mdev->state_wait);
1145
1146         /* aborted verify run. log the last position */
1147         if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1148             ns.conn < C_CONNECTED) {
1149                 mdev->ov_start_sector =
1150                         BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
1151                 dev_info(DEV, "Online Verify reached sector %llu\n",
1152                         (unsigned long long)mdev->ov_start_sector);
1153         }
1154
1155         if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1156             (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
1157                 dev_info(DEV, "Syncer continues.\n");
1158                 mdev->rs_paused += (long)jiffies
1159                                   -(long)mdev->rs_mark_time[mdev->rs_last_mark];
1160                 if (ns.conn == C_SYNC_TARGET)
1161                         mod_timer(&mdev->resync_timer, jiffies);
1162         }
1163
1164         if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
1165             (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1166                 dev_info(DEV, "Resync suspended\n");
1167                 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
1168         }
1169
1170         if (os.conn == C_CONNECTED &&
1171             (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1172                 unsigned long now = jiffies;
1173                 int i;
1174
1175                 set_ov_position(mdev, ns.conn);
1176                 mdev->rs_start = now;
1177                 mdev->rs_last_events = 0;
1178                 mdev->rs_last_sect_ev = 0;
1179                 mdev->ov_last_oos_size = 0;
1180                 mdev->ov_last_oos_start = 0;
1181
1182                 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1183                         mdev->rs_mark_left[i] = mdev->ov_left;
1184                         mdev->rs_mark_time[i] = now;
1185                 }
1186
1187                 drbd_rs_controller_reset(mdev);
1188
1189                 if (ns.conn == C_VERIFY_S) {
1190                         dev_info(DEV, "Starting Online Verify from sector %llu\n",
1191                                         (unsigned long long)mdev->ov_position);
1192                         mod_timer(&mdev->resync_timer, jiffies);
1193                 }
1194         }
1195
1196         if (get_ldev(mdev)) {
1197                 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1198                                                  MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1199                                                  MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1200
1201                 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1202                         mdf |= MDF_CRASHED_PRIMARY;
1203                 if (mdev->state.role == R_PRIMARY ||
1204                     (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1205                         mdf |= MDF_PRIMARY_IND;
1206                 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1207                         mdf |= MDF_CONNECTED_IND;
1208                 if (mdev->state.disk > D_INCONSISTENT)
1209                         mdf |= MDF_CONSISTENT;
1210                 if (mdev->state.disk > D_OUTDATED)
1211                         mdf |= MDF_WAS_UP_TO_DATE;
1212                 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1213                         mdf |= MDF_PEER_OUT_DATED;
1214                 if (mdf != mdev->ldev->md.flags) {
1215                         mdev->ldev->md.flags = mdf;
1216                         drbd_md_mark_dirty(mdev);
1217                 }
1218                 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1219                         drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1220                 put_ldev(mdev);
1221         }
1222
1223         /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1224         if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1225             os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1226                 set_bit(CONSIDER_RESYNC, &mdev->flags);
1227
1228         /* Receiver should clean up itself */
1229         if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1230                 drbd_thread_stop_nowait(&mdev->receiver);
1231
1232         /* Now the receiver finished cleaning up itself, it should die */
1233         if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1234                 drbd_thread_stop_nowait(&mdev->receiver);
1235
1236         /* Upon network failure, we need to restart the receiver. */
1237         if (os.conn > C_TEAR_DOWN &&
1238             ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1239                 drbd_thread_restart_nowait(&mdev->receiver);
1240
1241         /* Resume AL writing if we get a connection */
1242         if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1243                 drbd_resume_al(mdev);
1244
1245         ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1246         if (ascw) {
1247                 ascw->os = os;
1248                 ascw->ns = ns;
1249                 ascw->flags = flags;
1250                 ascw->w.cb = w_after_state_ch;
1251                 ascw->done = done;
1252                 drbd_queue_work(&mdev->data.work, &ascw->w);
1253         } else {
1254                 dev_warn(DEV, "Could not kmalloc an ascw\n");
1255         }
1256
1257         return rv;
1258 }
1259
1260 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1261 {
1262         struct after_state_chg_work *ascw =
1263                 container_of(w, struct after_state_chg_work, w);
1264         after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1265         if (ascw->flags & CS_WAIT_COMPLETE) {
1266                 D_ASSERT(ascw->done != NULL);
1267                 complete(ascw->done);
1268         }
1269         kfree(ascw);
1270
1271         return 1;
1272 }
1273
1274 static void abw_start_sync(struct drbd_conf *mdev, int rv)
1275 {
1276         if (rv) {
1277                 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1278                 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1279                 return;
1280         }
1281
1282         switch (mdev->state.conn) {
1283         case C_STARTING_SYNC_T:
1284                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1285                 break;
1286         case C_STARTING_SYNC_S:
1287                 drbd_start_resync(mdev, C_SYNC_SOURCE);
1288                 break;
1289         }
1290 }
1291
1292 int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
1293 {
1294         int rv;
1295
1296         D_ASSERT(current == mdev->worker.task);
1297
1298         /* open coded non-blocking drbd_suspend_io(mdev); */
1299         set_bit(SUSPEND_IO, &mdev->flags);
1300         if (!is_susp(mdev->state))
1301                 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
1302
1303         drbd_bm_lock(mdev, why);
1304         rv = io_fn(mdev);
1305         drbd_bm_unlock(mdev);
1306
1307         drbd_resume_io(mdev);
1308
1309         return rv;
1310 }
1311
1312 /**
1313  * after_state_ch() - Perform after state change actions that may sleep
1314  * @mdev:       DRBD device.
1315  * @os:         old state.
1316  * @ns:         new state.
1317  * @flags:      Flags
1318  */
1319 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1320                            union drbd_state ns, enum chg_state_flags flags)
1321 {
1322         enum drbd_fencing_p fp;
1323         enum drbd_req_event what = nothing;
1324         union drbd_state nsm = (union drbd_state){ .i = -1 };
1325
1326         if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1327                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1328                 if (mdev->p_uuid)
1329                         mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1330         }
1331
1332         fp = FP_DONT_CARE;
1333         if (get_ldev(mdev)) {
1334                 fp = mdev->ldev->dc.fencing;
1335                 put_ldev(mdev);
1336         }
1337
1338         /* Inform userspace about the change... */
1339         drbd_bcast_state(mdev, ns);
1340
1341         if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1342             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1343                 drbd_khelper(mdev, "pri-on-incon-degr");
1344
1345         /* Here we have the actions that are performed after a
1346            state change. This function might sleep */
1347
1348         nsm.i = -1;
1349         if (ns.susp_nod) {
1350                 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1351                         if (ns.conn == C_CONNECTED)
1352                                 what = resend, nsm.susp_nod = 0;
1353                         else /* ns.conn > C_CONNECTED */
1354                                 dev_err(DEV, "Unexpected Resynd going on!\n");
1355                 }
1356
1357                 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
1358                         what = restart_frozen_disk_io, nsm.susp_nod = 0;
1359
1360         }
1361
1362         if (ns.susp_fen) {
1363                 /* case1: The outdate peer handler is successful: */
1364                 if (os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) {
1365                         tl_clear(mdev);
1366                         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1367                                 drbd_uuid_new_current(mdev);
1368                                 clear_bit(NEW_CUR_UUID, &mdev->flags);
1369                         }
1370                         spin_lock_irq(&mdev->req_lock);
1371                         _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1372                         spin_unlock_irq(&mdev->req_lock);
1373                 }
1374                 /* case2: The connection was established again: */
1375                 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1376                         clear_bit(NEW_CUR_UUID, &mdev->flags);
1377                         what = resend;
1378                         nsm.susp_fen = 0;
1379                 }
1380         }
1381
1382         if (what != nothing) {
1383                 spin_lock_irq(&mdev->req_lock);
1384                 _tl_restart(mdev, what);
1385                 nsm.i &= mdev->state.i;
1386                 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
1387                 spin_unlock_irq(&mdev->req_lock);
1388         }
1389
1390         /* Do not change the order of the if above and the two below... */
1391         if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
1392                 drbd_send_uuids(mdev);
1393                 drbd_send_state(mdev);
1394         }
1395         if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S)
1396                 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)");
1397
1398         /* Lost contact to peer's copy of the data */
1399         if ((os.pdsk >= D_INCONSISTENT &&
1400              os.pdsk != D_UNKNOWN &&
1401              os.pdsk != D_OUTDATED)
1402         &&  (ns.pdsk < D_INCONSISTENT ||
1403              ns.pdsk == D_UNKNOWN ||
1404              ns.pdsk == D_OUTDATED)) {
1405                 if (get_ldev(mdev)) {
1406                         if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1407                             mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1408                                 if (is_susp(mdev->state)) {
1409                                         set_bit(NEW_CUR_UUID, &mdev->flags);
1410                                 } else {
1411                                         drbd_uuid_new_current(mdev);
1412                                         drbd_send_uuids(mdev);
1413                                 }
1414                         }
1415                         put_ldev(mdev);
1416                 }
1417         }
1418
1419         if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1420                 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
1421                         drbd_uuid_new_current(mdev);
1422                         drbd_send_uuids(mdev);
1423                 }
1424
1425                 /* D_DISKLESS Peer becomes secondary */
1426                 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1427                         drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote diskless peer");
1428                 put_ldev(mdev);
1429         }
1430
1431         if (os.role == R_PRIMARY && ns.role == R_SECONDARY && get_ldev(mdev)) {
1432                 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote");
1433                 put_ldev(mdev);
1434         }
1435
1436         /* Last part of the attaching process ... */
1437         if (ns.conn >= C_CONNECTED &&
1438             os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1439                 drbd_send_sizes(mdev, 0, 0);  /* to start sync... */
1440                 drbd_send_uuids(mdev);
1441                 drbd_send_state(mdev);
1442         }
1443
1444         /* We want to pause/continue resync, tell peer. */
1445         if (ns.conn >= C_CONNECTED &&
1446              ((os.aftr_isp != ns.aftr_isp) ||
1447               (os.user_isp != ns.user_isp)))
1448                 drbd_send_state(mdev);
1449
1450         /* In case one of the isp bits got set, suspend other devices. */
1451         if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1452             (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1453                 suspend_other_sg(mdev);
1454
1455         /* Make sure the peer gets informed about eventual state
1456            changes (ISP bits) while we were in WFReportParams. */
1457         if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1458                 drbd_send_state(mdev);
1459
1460         if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1461                 drbd_send_state(mdev);
1462
1463         /* We are in the progress to start a full sync... */
1464         if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1465             (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1466                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
1467
1468         /* We are invalidating our self... */
1469         if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1470             os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1471                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1472
1473         /* first half of local IO error, failure to attach,
1474          * or administrative detach */
1475         if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1476                 enum drbd_io_error_p eh;
1477                 int was_io_error;
1478                 /* corresponding get_ldev was in __drbd_set_state, to serialize
1479                  * our cleanup here with the transition to D_DISKLESS,
1480                  * so it is safe to dreference ldev here. */
1481                 eh = mdev->ldev->dc.on_io_error;
1482                 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1483
1484                 /* current state still has to be D_FAILED,
1485                  * there is only one way out: to D_DISKLESS,
1486                  * and that may only happen after our put_ldev below. */
1487                 if (mdev->state.disk != D_FAILED)
1488                         dev_err(DEV,
1489                                 "ASSERT FAILED: disk is %s during detach\n",
1490                                 drbd_disk_str(mdev->state.disk));
1491
1492                 if (drbd_send_state(mdev))
1493                         dev_warn(DEV, "Notified peer that I am detaching my disk\n");
1494                 else
1495                         dev_err(DEV, "Sending state for detaching disk failed\n");
1496
1497                 drbd_rs_cancel_all(mdev);
1498
1499                 /* In case we want to get something to stable storage still,
1500                  * this may be the last chance.
1501                  * Following put_ldev may transition to D_DISKLESS. */
1502                 drbd_md_sync(mdev);
1503                 put_ldev(mdev);
1504
1505                 if (was_io_error && eh == EP_CALL_HELPER)
1506                         drbd_khelper(mdev, "local-io-error");
1507         }
1508
1509         /* second half of local IO error, failure to attach,
1510          * or administrative detach,
1511          * after local_cnt references have reached zero again */
1512         if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1513                 /* We must still be diskless,
1514                  * re-attach has to be serialized with this! */
1515                 if (mdev->state.disk != D_DISKLESS)
1516                         dev_err(DEV,
1517                                 "ASSERT FAILED: disk is %s while going diskless\n",
1518                                 drbd_disk_str(mdev->state.disk));
1519
1520                 mdev->rs_total = 0;
1521                 mdev->rs_failed = 0;
1522                 atomic_set(&mdev->rs_pending_cnt, 0);
1523
1524                 if (drbd_send_state(mdev))
1525                         dev_warn(DEV, "Notified peer that I'm now diskless.\n");
1526                 else
1527                         dev_err(DEV, "Sending state for being diskless failed\n");
1528                 /* corresponding get_ldev in __drbd_set_state
1529                  * this may finaly trigger drbd_ldev_destroy. */
1530                 put_ldev(mdev);
1531         }
1532
1533         /* Disks got bigger while they were detached */
1534         if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1535             test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1536                 if (ns.conn == C_CONNECTED)
1537                         resync_after_online_grow(mdev);
1538         }
1539
1540         /* A resync finished or aborted, wake paused devices... */
1541         if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1542             (os.peer_isp && !ns.peer_isp) ||
1543             (os.user_isp && !ns.user_isp))
1544                 resume_next_sg(mdev);
1545
1546         /* sync target done with resync.  Explicitly notify peer, even though
1547          * it should (at least for non-empty resyncs) already know itself. */
1548         if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1549                 drbd_send_state(mdev);
1550
1551         if (os.conn > C_CONNECTED && ns.conn == C_CONNECTED)
1552                 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
1553
1554         /* free tl_hash if we Got thawed and are C_STANDALONE */
1555         if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
1556                 drbd_free_tl_hash(mdev);
1557
1558         /* Upon network connection, we need to start the receiver */
1559         if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1560                 drbd_thread_start(&mdev->receiver);
1561
1562         /* Terminate worker thread if we are unconfigured - it will be
1563            restarted as needed... */
1564         if (ns.disk == D_DISKLESS &&
1565             ns.conn == C_STANDALONE &&
1566             ns.role == R_SECONDARY) {
1567                 if (os.aftr_isp != ns.aftr_isp)
1568                         resume_next_sg(mdev);
1569                 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1570                 if (test_bit(DEVICE_DYING, &mdev->flags))
1571                         drbd_thread_stop_nowait(&mdev->worker);
1572         }
1573
1574         drbd_md_sync(mdev);
1575 }
1576
1577
1578 static int drbd_thread_setup(void *arg)
1579 {
1580         struct drbd_thread *thi = (struct drbd_thread *) arg;
1581         struct drbd_conf *mdev = thi->mdev;
1582         unsigned long flags;
1583         int retval;
1584
1585 restart:
1586         retval = thi->function(thi);
1587
1588         spin_lock_irqsave(&thi->t_lock, flags);
1589
1590         /* if the receiver has been "Exiting", the last thing it did
1591          * was set the conn state to "StandAlone",
1592          * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1593          * and receiver thread will be "started".
1594          * drbd_thread_start needs to set "Restarting" in that case.
1595          * t_state check and assignment needs to be within the same spinlock,
1596          * so either thread_start sees Exiting, and can remap to Restarting,
1597          * or thread_start see None, and can proceed as normal.
1598          */
1599
1600         if (thi->t_state == Restarting) {
1601                 dev_info(DEV, "Restarting %s\n", current->comm);
1602                 thi->t_state = Running;
1603                 spin_unlock_irqrestore(&thi->t_lock, flags);
1604                 goto restart;
1605         }
1606
1607         thi->task = NULL;
1608         thi->t_state = None;
1609         smp_mb();
1610         complete(&thi->stop);
1611         spin_unlock_irqrestore(&thi->t_lock, flags);
1612
1613         dev_info(DEV, "Terminating %s\n", current->comm);
1614
1615         /* Release mod reference taken when thread was started */
1616         module_put(THIS_MODULE);
1617         return retval;
1618 }
1619
1620 static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1621                       int (*func) (struct drbd_thread *))
1622 {
1623         spin_lock_init(&thi->t_lock);
1624         thi->task    = NULL;
1625         thi->t_state = None;
1626         thi->function = func;
1627         thi->mdev = mdev;
1628 }
1629
1630 int drbd_thread_start(struct drbd_thread *thi)
1631 {
1632         struct drbd_conf *mdev = thi->mdev;
1633         struct task_struct *nt;
1634         unsigned long flags;
1635
1636         const char *me =
1637                 thi == &mdev->receiver ? "receiver" :
1638                 thi == &mdev->asender  ? "asender"  :
1639                 thi == &mdev->worker   ? "worker"   : "NONSENSE";
1640
1641         /* is used from state engine doing drbd_thread_stop_nowait,
1642          * while holding the req lock irqsave */
1643         spin_lock_irqsave(&thi->t_lock, flags);
1644
1645         switch (thi->t_state) {
1646         case None:
1647                 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1648                                 me, current->comm, current->pid);
1649
1650                 /* Get ref on module for thread - this is released when thread exits */
1651                 if (!try_module_get(THIS_MODULE)) {
1652                         dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1653                         spin_unlock_irqrestore(&thi->t_lock, flags);
1654                         return false;
1655                 }
1656
1657                 init_completion(&thi->stop);
1658                 D_ASSERT(thi->task == NULL);
1659                 thi->reset_cpu_mask = 1;
1660                 thi->t_state = Running;
1661                 spin_unlock_irqrestore(&thi->t_lock, flags);
1662                 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1663
1664                 nt = kthread_create(drbd_thread_setup, (void *) thi,
1665                                     "drbd%d_%s", mdev_to_minor(mdev), me);
1666
1667                 if (IS_ERR(nt)) {
1668                         dev_err(DEV, "Couldn't start thread\n");
1669
1670                         module_put(THIS_MODULE);
1671                         return false;
1672                 }
1673                 spin_lock_irqsave(&thi->t_lock, flags);
1674                 thi->task = nt;
1675                 thi->t_state = Running;
1676                 spin_unlock_irqrestore(&thi->t_lock, flags);
1677                 wake_up_process(nt);
1678                 break;
1679         case Exiting:
1680                 thi->t_state = Restarting;
1681                 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1682                                 me, current->comm, current->pid);
1683                 /* fall through */
1684         case Running:
1685         case Restarting:
1686         default:
1687                 spin_unlock_irqrestore(&thi->t_lock, flags);
1688                 break;
1689         }
1690
1691         return true;
1692 }
1693
1694
1695 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1696 {
1697         unsigned long flags;
1698
1699         enum drbd_thread_state ns = restart ? Restarting : Exiting;
1700
1701         /* may be called from state engine, holding the req lock irqsave */
1702         spin_lock_irqsave(&thi->t_lock, flags);
1703
1704         if (thi->t_state == None) {
1705                 spin_unlock_irqrestore(&thi->t_lock, flags);
1706                 if (restart)
1707                         drbd_thread_start(thi);
1708                 return;
1709         }
1710
1711         if (thi->t_state != ns) {
1712                 if (thi->task == NULL) {
1713                         spin_unlock_irqrestore(&thi->t_lock, flags);
1714                         return;
1715                 }
1716
1717                 thi->t_state = ns;
1718                 smp_mb();
1719                 init_completion(&thi->stop);
1720                 if (thi->task != current)
1721                         force_sig(DRBD_SIGKILL, thi->task);
1722
1723         }
1724
1725         spin_unlock_irqrestore(&thi->t_lock, flags);
1726
1727         if (wait)
1728                 wait_for_completion(&thi->stop);
1729 }
1730
1731 #ifdef CONFIG_SMP
1732 /**
1733  * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1734  * @mdev:       DRBD device.
1735  *
1736  * Forces all threads of a device onto the same CPU. This is beneficial for
1737  * DRBD's performance. May be overwritten by user's configuration.
1738  */
1739 void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1740 {
1741         int ord, cpu;
1742
1743         /* user override. */
1744         if (cpumask_weight(mdev->cpu_mask))
1745                 return;
1746
1747         ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1748         for_each_online_cpu(cpu) {
1749                 if (ord-- == 0) {
1750                         cpumask_set_cpu(cpu, mdev->cpu_mask);
1751                         return;
1752                 }
1753         }
1754         /* should not be reached */
1755         cpumask_setall(mdev->cpu_mask);
1756 }
1757
1758 /**
1759  * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1760  * @mdev:       DRBD device.
1761  *
1762  * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1763  * prematurely.
1764  */
1765 void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1766 {
1767         struct task_struct *p = current;
1768         struct drbd_thread *thi =
1769                 p == mdev->asender.task  ? &mdev->asender  :
1770                 p == mdev->receiver.task ? &mdev->receiver :
1771                 p == mdev->worker.task   ? &mdev->worker   :
1772                 NULL;
1773         ERR_IF(thi == NULL)
1774                 return;
1775         if (!thi->reset_cpu_mask)
1776                 return;
1777         thi->reset_cpu_mask = 0;
1778         set_cpus_allowed_ptr(p, mdev->cpu_mask);
1779 }
1780 #endif
1781
1782 /* the appropriate socket mutex must be held already */
1783 int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1784                           enum drbd_packets cmd, struct p_header80 *h,
1785                           size_t size, unsigned msg_flags)
1786 {
1787         int sent, ok;
1788
1789         ERR_IF(!h) return false;
1790         ERR_IF(!size) return false;
1791
1792         h->magic   = BE_DRBD_MAGIC;
1793         h->command = cpu_to_be16(cmd);
1794         h->length  = cpu_to_be16(size-sizeof(struct p_header80));
1795
1796         sent = drbd_send(mdev, sock, h, size, msg_flags);
1797
1798         ok = (sent == size);
1799         if (!ok)
1800                 dev_err(DEV, "short sent %s size=%d sent=%d\n",
1801                     cmdname(cmd), (int)size, sent);
1802         return ok;
1803 }
1804
1805 /* don't pass the socket. we may only look at it
1806  * when we hold the appropriate socket mutex.
1807  */
1808 int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1809                   enum drbd_packets cmd, struct p_header80 *h, size_t size)
1810 {
1811         int ok = 0;
1812         struct socket *sock;
1813
1814         if (use_data_socket) {
1815                 mutex_lock(&mdev->data.mutex);
1816                 sock = mdev->data.socket;
1817         } else {
1818                 mutex_lock(&mdev->meta.mutex);
1819                 sock = mdev->meta.socket;
1820         }
1821
1822         /* drbd_disconnect() could have called drbd_free_sock()
1823          * while we were waiting in down()... */
1824         if (likely(sock != NULL))
1825                 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1826
1827         if (use_data_socket)
1828                 mutex_unlock(&mdev->data.mutex);
1829         else
1830                 mutex_unlock(&mdev->meta.mutex);
1831         return ok;
1832 }
1833
1834 int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1835                    size_t size)
1836 {
1837         struct p_header80 h;
1838         int ok;
1839
1840         h.magic   = BE_DRBD_MAGIC;
1841         h.command = cpu_to_be16(cmd);
1842         h.length  = cpu_to_be16(size);
1843
1844         if (!drbd_get_data_sock(mdev))
1845                 return 0;
1846
1847         ok = (sizeof(h) ==
1848                 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1849         ok = ok && (size ==
1850                 drbd_send(mdev, mdev->data.socket, data, size, 0));
1851
1852         drbd_put_data_sock(mdev);
1853
1854         return ok;
1855 }
1856
1857 int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1858 {
1859         struct p_rs_param_95 *p;
1860         struct socket *sock;
1861         int size, rv;
1862         const int apv = mdev->agreed_pro_version;
1863
1864         size = apv <= 87 ? sizeof(struct p_rs_param)
1865                 : apv == 88 ? sizeof(struct p_rs_param)
1866                         + strlen(mdev->sync_conf.verify_alg) + 1
1867                 : apv <= 94 ? sizeof(struct p_rs_param_89)
1868                 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
1869
1870         /* used from admin command context and receiver/worker context.
1871          * to avoid kmalloc, grab the socket right here,
1872          * then use the pre-allocated sbuf there */
1873         mutex_lock(&mdev->data.mutex);
1874         sock = mdev->data.socket;
1875
1876         if (likely(sock != NULL)) {
1877                 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1878
1879                 p = &mdev->data.sbuf.rs_param_95;
1880
1881                 /* initialize verify_alg and csums_alg */
1882                 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1883
1884                 p->rate = cpu_to_be32(sc->rate);
1885                 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
1886                 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
1887                 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
1888                 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
1889
1890                 if (apv >= 88)
1891                         strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1892                 if (apv >= 89)
1893                         strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1894
1895                 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1896         } else
1897                 rv = 0; /* not ok */
1898
1899         mutex_unlock(&mdev->data.mutex);
1900
1901         return rv;
1902 }
1903
1904 int drbd_send_protocol(struct drbd_conf *mdev)
1905 {
1906         struct p_protocol *p;
1907         int size, cf, rv;
1908
1909         size = sizeof(struct p_protocol);
1910
1911         if (mdev->agreed_pro_version >= 87)
1912                 size += strlen(mdev->net_conf->integrity_alg) + 1;
1913
1914         /* we must not recurse into our own queue,
1915          * as that is blocked during handshake */
1916         p = kmalloc(size, GFP_NOIO);
1917         if (p == NULL)
1918                 return 0;
1919
1920         p->protocol      = cpu_to_be32(mdev->net_conf->wire_protocol);
1921         p->after_sb_0p   = cpu_to_be32(mdev->net_conf->after_sb_0p);
1922         p->after_sb_1p   = cpu_to_be32(mdev->net_conf->after_sb_1p);
1923         p->after_sb_2p   = cpu_to_be32(mdev->net_conf->after_sb_2p);
1924         p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1925
1926         cf = 0;
1927         if (mdev->net_conf->want_lose)
1928                 cf |= CF_WANT_LOSE;
1929         if (mdev->net_conf->dry_run) {
1930                 if (mdev->agreed_pro_version >= 92)
1931                         cf |= CF_DRY_RUN;
1932                 else {
1933                         dev_err(DEV, "--dry-run is not supported by peer");
1934                         kfree(p);
1935                         return 0;
1936                 }
1937         }
1938         p->conn_flags    = cpu_to_be32(cf);
1939
1940         if (mdev->agreed_pro_version >= 87)
1941                 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1942
1943         rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
1944                            (struct p_header80 *)p, size);
1945         kfree(p);
1946         return rv;
1947 }
1948
1949 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1950 {
1951         struct p_uuids p;
1952         int i;
1953
1954         if (!get_ldev_if_state(mdev, D_NEGOTIATING))
1955                 return 1;
1956
1957         for (i = UI_CURRENT; i < UI_SIZE; i++)
1958                 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
1959
1960         mdev->comm_bm_set = drbd_bm_total_weight(mdev);
1961         p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
1962         uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
1963         uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
1964         uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
1965         p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
1966
1967         put_ldev(mdev);
1968
1969         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
1970                              (struct p_header80 *)&p, sizeof(p));
1971 }
1972
1973 int drbd_send_uuids(struct drbd_conf *mdev)
1974 {
1975         return _drbd_send_uuids(mdev, 0);
1976 }
1977
1978 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
1979 {
1980         return _drbd_send_uuids(mdev, 8);
1981 }
1982
1983
1984 int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
1985 {
1986         struct p_rs_uuid p;
1987
1988         p.uuid = cpu_to_be64(val);
1989
1990         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
1991                              (struct p_header80 *)&p, sizeof(p));
1992 }
1993
1994 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
1995 {
1996         struct p_sizes p;
1997         sector_t d_size, u_size;
1998         int q_order_type;
1999         int ok;
2000
2001         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2002                 D_ASSERT(mdev->ldev->backing_bdev);
2003                 d_size = drbd_get_max_capacity(mdev->ldev);
2004                 u_size = mdev->ldev->dc.disk_size;
2005                 q_order_type = drbd_queue_order_type(mdev);
2006                 put_ldev(mdev);
2007         } else {
2008                 d_size = 0;
2009                 u_size = 0;
2010                 q_order_type = QUEUE_ORDERED_NONE;
2011         }
2012
2013         p.d_size = cpu_to_be64(d_size);
2014         p.u_size = cpu_to_be64(u_size);
2015         p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
2016         p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9);
2017         p.queue_order_type = cpu_to_be16(q_order_type);
2018         p.dds_flags = cpu_to_be16(flags);
2019
2020         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
2021                            (struct p_header80 *)&p, sizeof(p));
2022         return ok;
2023 }
2024
2025 /**
2026  * drbd_send_state() - Sends the drbd state to the peer
2027  * @mdev:       DRBD device.
2028  */
2029 int drbd_send_state(struct drbd_conf *mdev)
2030 {
2031         struct socket *sock;
2032         struct p_state p;
2033         int ok = 0;
2034
2035         /* Grab state lock so we wont send state if we're in the middle
2036          * of a cluster wide state change on another thread */
2037         drbd_state_lock(mdev);
2038
2039         mutex_lock(&mdev->data.mutex);
2040
2041         p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2042         sock = mdev->data.socket;
2043
2044         if (likely(sock != NULL)) {
2045                 ok = _drbd_send_cmd(mdev, sock, P_STATE,
2046                                     (struct p_header80 *)&p, sizeof(p), 0);
2047         }
2048
2049         mutex_unlock(&mdev->data.mutex);
2050
2051         drbd_state_unlock(mdev);
2052         return ok;
2053 }
2054
2055 int drbd_send_state_req(struct drbd_conf *mdev,
2056         union drbd_state mask, union drbd_state val)
2057 {
2058         struct p_req_state p;
2059
2060         p.mask    = cpu_to_be32(mask.i);
2061         p.val     = cpu_to_be32(val.i);
2062
2063         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
2064                              (struct p_header80 *)&p, sizeof(p));
2065 }
2066
2067 int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
2068 {
2069         struct p_req_state_reply p;
2070
2071         p.retcode    = cpu_to_be32(retcode);
2072
2073         return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
2074                              (struct p_header80 *)&p, sizeof(p));
2075 }
2076
2077 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2078         struct p_compressed_bm *p,
2079         struct bm_xfer_ctx *c)
2080 {
2081         struct bitstream bs;
2082         unsigned long plain_bits;
2083         unsigned long tmp;
2084         unsigned long rl;
2085         unsigned len;
2086         unsigned toggle;
2087         int bits;
2088
2089         /* may we use this feature? */
2090         if ((mdev->sync_conf.use_rle == 0) ||
2091                 (mdev->agreed_pro_version < 90))
2092                         return 0;
2093
2094         if (c->bit_offset >= c->bm_bits)
2095                 return 0; /* nothing to do. */
2096
2097         /* use at most thus many bytes */
2098         bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2099         memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2100         /* plain bits covered in this code string */
2101         plain_bits = 0;
2102
2103         /* p->encoding & 0x80 stores whether the first run length is set.
2104          * bit offset is implicit.
2105          * start with toggle == 2 to be able to tell the first iteration */
2106         toggle = 2;
2107
2108         /* see how much plain bits we can stuff into one packet
2109          * using RLE and VLI. */
2110         do {
2111                 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2112                                     : _drbd_bm_find_next(mdev, c->bit_offset);
2113                 if (tmp == -1UL)
2114                         tmp = c->bm_bits;
2115                 rl = tmp - c->bit_offset;
2116
2117                 if (toggle == 2) { /* first iteration */
2118                         if (rl == 0) {
2119                                 /* the first checked bit was set,
2120                                  * store start value, */
2121                                 DCBP_set_start(p, 1);
2122                                 /* but skip encoding of zero run length */
2123                                 toggle = !toggle;
2124                                 continue;
2125                         }
2126                         DCBP_set_start(p, 0);
2127                 }
2128
2129                 /* paranoia: catch zero runlength.
2130                  * can only happen if bitmap is modified while we scan it. */
2131                 if (rl == 0) {
2132                         dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2133                             "t:%u bo:%lu\n", toggle, c->bit_offset);
2134                         return -1;
2135                 }
2136
2137                 bits = vli_encode_bits(&bs, rl);
2138                 if (bits == -ENOBUFS) /* buffer full */
2139                         break;
2140                 if (bits <= 0) {
2141                         dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2142                         return 0;
2143                 }
2144
2145                 toggle = !toggle;
2146                 plain_bits += rl;
2147                 c->bit_offset = tmp;
2148         } while (c->bit_offset < c->bm_bits);
2149
2150         len = bs.cur.b - p->code + !!bs.cur.bit;
2151
2152         if (plain_bits < (len << 3)) {
2153                 /* incompressible with this method.
2154                  * we need to rewind both word and bit position. */
2155                 c->bit_offset -= plain_bits;
2156                 bm_xfer_ctx_bit_to_word_offset(c);
2157                 c->bit_offset = c->word_offset * BITS_PER_LONG;
2158                 return 0;
2159         }
2160
2161         /* RLE + VLI was able to compress it just fine.
2162          * update c->word_offset. */
2163         bm_xfer_ctx_bit_to_word_offset(c);
2164
2165         /* store pad_bits */
2166         DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2167
2168         return len;
2169 }
2170
2171 /**
2172  * send_bitmap_rle_or_plain
2173  *
2174  * Return 0 when done, 1 when another iteration is needed, and a negative error
2175  * code upon failure.
2176  */
2177 static int
2178 send_bitmap_rle_or_plain(struct drbd_conf *mdev,
2179                          struct p_header80 *h, struct bm_xfer_ctx *c)
2180 {
2181         struct p_compressed_bm *p = (void*)h;
2182         unsigned long num_words;
2183         int len;
2184         int ok;
2185
2186         len = fill_bitmap_rle_bits(mdev, p, c);
2187
2188         if (len < 0)
2189                 return -EIO;
2190
2191         if (len) {
2192                 DCBP_set_code(p, RLE_VLI_Bits);
2193                 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2194                         sizeof(*p) + len, 0);
2195
2196                 c->packets[0]++;
2197                 c->bytes[0] += sizeof(*p) + len;
2198
2199                 if (c->bit_offset >= c->bm_bits)
2200                         len = 0; /* DONE */
2201         } else {
2202                 /* was not compressible.
2203                  * send a buffer full of plain text bits instead. */
2204                 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2205                 len = num_words * sizeof(long);
2206                 if (len)
2207                         drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2208                 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
2209                                    h, sizeof(struct p_header80) + len, 0);
2210                 c->word_offset += num_words;
2211                 c->bit_offset = c->word_offset * BITS_PER_LONG;
2212
2213                 c->packets[1]++;
2214                 c->bytes[1] += sizeof(struct p_header80) + len;
2215
2216                 if (c->bit_offset > c->bm_bits)
2217                         c->bit_offset = c->bm_bits;
2218         }
2219         if (ok) {
2220                 if (len == 0) {
2221                         INFO_bm_xfer_stats(mdev, "send", c);
2222                         return 0;
2223                 } else
2224                         return 1;
2225         }
2226         return -EIO;
2227 }
2228
2229 /* See the comment at receive_bitmap() */
2230 int _drbd_send_bitmap(struct drbd_conf *mdev)
2231 {
2232         struct bm_xfer_ctx c;
2233         struct p_header80 *p;
2234         int err;
2235
2236         ERR_IF(!mdev->bitmap) return false;
2237
2238         /* maybe we should use some per thread scratch page,
2239          * and allocate that during initial device creation? */
2240         p = (struct p_header80 *) __get_free_page(GFP_NOIO);
2241         if (!p) {
2242                 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
2243                 return false;
2244         }
2245
2246         if (get_ldev(mdev)) {
2247                 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2248                         dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2249                         drbd_bm_set_all(mdev);
2250                         if (drbd_bm_write(mdev)) {
2251                                 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2252                                  * but otherwise process as per normal - need to tell other
2253                                  * side that a full resync is required! */
2254                                 dev_err(DEV, "Failed to write bitmap to disk!\n");
2255                         } else {
2256                                 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2257                                 drbd_md_sync(mdev);
2258                         }
2259                 }
2260                 put_ldev(mdev);
2261         }
2262
2263         c = (struct bm_xfer_ctx) {
2264                 .bm_bits = drbd_bm_bits(mdev),
2265                 .bm_words = drbd_bm_words(mdev),
2266         };
2267
2268         do {
2269                 err = send_bitmap_rle_or_plain(mdev, p, &c);
2270         } while (err > 0);
2271
2272         free_page((unsigned long) p);
2273         return err == 0;
2274 }
2275
2276 int drbd_send_bitmap(struct drbd_conf *mdev)
2277 {
2278         int err;
2279
2280         if (!drbd_get_data_sock(mdev))
2281                 return -1;
2282         err = !_drbd_send_bitmap(mdev);
2283         drbd_put_data_sock(mdev);
2284         return err;
2285 }
2286
2287 int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2288 {
2289         int ok;
2290         struct p_barrier_ack p;
2291
2292         p.barrier  = barrier_nr;
2293         p.set_size = cpu_to_be32(set_size);
2294
2295         if (mdev->state.conn < C_CONNECTED)
2296                 return false;
2297         ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
2298                         (struct p_header80 *)&p, sizeof(p));
2299         return ok;
2300 }
2301
2302 /**
2303  * _drbd_send_ack() - Sends an ack packet
2304  * @mdev:       DRBD device.
2305  * @cmd:        Packet command code.
2306  * @sector:     sector, needs to be in big endian byte order
2307  * @blksize:    size in byte, needs to be in big endian byte order
2308  * @block_id:   Id, big endian byte order
2309  */
2310 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2311                           u64 sector,
2312                           u32 blksize,
2313                           u64 block_id)
2314 {
2315         int ok;
2316         struct p_block_ack p;
2317
2318         p.sector   = sector;
2319         p.block_id = block_id;
2320         p.blksize  = blksize;
2321         p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2322
2323         if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
2324                 return false;
2325         ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
2326                                 (struct p_header80 *)&p, sizeof(p));
2327         return ok;
2328 }
2329
2330 /* dp->sector and dp->block_id already/still in network byte order,
2331  * data_size is payload size according to dp->head,
2332  * and may need to be corrected for digest size. */
2333 int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2334                      struct p_data *dp, int data_size)
2335 {
2336         data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2337                 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
2338         return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2339                               dp->block_id);
2340 }
2341
2342 int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2343                      struct p_block_req *rp)
2344 {
2345         return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2346 }
2347
2348 /**
2349  * drbd_send_ack() - Sends an ack packet
2350  * @mdev:       DRBD device.
2351  * @cmd:        Packet command code.
2352  * @e:          Epoch entry.
2353  */
2354 int drbd_send_ack(struct drbd_conf *mdev,
2355         enum drbd_packets cmd, struct drbd_epoch_entry *e)
2356 {
2357         return _drbd_send_ack(mdev, cmd,
2358                               cpu_to_be64(e->sector),
2359                               cpu_to_be32(e->size),
2360                               e->block_id);
2361 }
2362
2363 /* This function misuses the block_id field to signal if the blocks
2364  * are is sync or not. */
2365 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2366                      sector_t sector, int blksize, u64 block_id)
2367 {
2368         return _drbd_send_ack(mdev, cmd,
2369                               cpu_to_be64(sector),
2370                               cpu_to_be32(blksize),
2371                               cpu_to_be64(block_id));
2372 }
2373
2374 int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2375                        sector_t sector, int size, u64 block_id)
2376 {
2377         int ok;
2378         struct p_block_req p;
2379
2380         p.sector   = cpu_to_be64(sector);
2381         p.block_id = block_id;
2382         p.blksize  = cpu_to_be32(size);
2383
2384         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
2385                                 (struct p_header80 *)&p, sizeof(p));
2386         return ok;
2387 }
2388
2389 int drbd_send_drequest_csum(struct drbd_conf *mdev,
2390                             sector_t sector, int size,
2391                             void *digest, int digest_size,
2392                             enum drbd_packets cmd)
2393 {
2394         int ok;
2395         struct p_block_req p;
2396
2397         p.sector   = cpu_to_be64(sector);
2398         p.block_id = BE_DRBD_MAGIC + 0xbeef;
2399         p.blksize  = cpu_to_be32(size);
2400
2401         p.head.magic   = BE_DRBD_MAGIC;
2402         p.head.command = cpu_to_be16(cmd);
2403         p.head.length  = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
2404
2405         mutex_lock(&mdev->data.mutex);
2406
2407         ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2408         ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2409
2410         mutex_unlock(&mdev->data.mutex);
2411
2412         return ok;
2413 }
2414
2415 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2416 {
2417         int ok;
2418         struct p_block_req p;
2419
2420         p.sector   = cpu_to_be64(sector);
2421         p.block_id = BE_DRBD_MAGIC + 0xbabe;
2422         p.blksize  = cpu_to_be32(size);
2423
2424         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
2425                            (struct p_header80 *)&p, sizeof(p));
2426         return ok;
2427 }
2428
2429 /* called on sndtimeo
2430  * returns false if we should retry,
2431  * true if we think connection is dead
2432  */
2433 static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2434 {
2435         int drop_it;
2436         /* long elapsed = (long)(jiffies - mdev->last_received); */
2437
2438         drop_it =   mdev->meta.socket == sock
2439                 || !mdev->asender.task
2440                 || get_t_state(&mdev->asender) != Running
2441                 || mdev->state.conn < C_CONNECTED;
2442
2443         if (drop_it)
2444                 return true;
2445
2446         drop_it = !--mdev->ko_count;
2447         if (!drop_it) {
2448                 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2449                        current->comm, current->pid, mdev->ko_count);
2450                 request_ping(mdev);
2451         }
2452
2453         return drop_it; /* && (mdev->state == R_PRIMARY) */;
2454 }
2455
2456 /* The idea of sendpage seems to be to put some kind of reference
2457  * to the page into the skb, and to hand it over to the NIC. In
2458  * this process get_page() gets called.
2459  *
2460  * As soon as the page was really sent over the network put_page()
2461  * gets called by some part of the network layer. [ NIC driver? ]
2462  *
2463  * [ get_page() / put_page() increment/decrement the count. If count
2464  *   reaches 0 the page will be freed. ]
2465  *
2466  * This works nicely with pages from FSs.
2467  * But this means that in protocol A we might signal IO completion too early!
2468  *
2469  * In order not to corrupt data during a resync we must make sure
2470  * that we do not reuse our own buffer pages (EEs) to early, therefore
2471  * we have the net_ee list.
2472  *
2473  * XFS seems to have problems, still, it submits pages with page_count == 0!
2474  * As a workaround, we disable sendpage on pages
2475  * with page_count == 0 or PageSlab.
2476  */
2477 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
2478                    int offset, size_t size, unsigned msg_flags)
2479 {
2480         int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
2481         kunmap(page);
2482         if (sent == size)
2483                 mdev->send_cnt += size>>9;
2484         return sent == size;
2485 }
2486
2487 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
2488                     int offset, size_t size, unsigned msg_flags)
2489 {
2490         mm_segment_t oldfs = get_fs();
2491         int sent, ok;
2492         int len = size;
2493
2494         /* e.g. XFS meta- & log-data is in slab pages, which have a
2495          * page_count of 0 and/or have PageSlab() set.
2496          * we cannot use send_page for those, as that does get_page();
2497          * put_page(); and would cause either a VM_BUG directly, or
2498          * __page_cache_release a page that would actually still be referenced
2499          * by someone, leading to some obscure delayed Oops somewhere else. */
2500         if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
2501                 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
2502
2503         msg_flags |= MSG_NOSIGNAL;
2504         drbd_update_congested(mdev);
2505         set_fs(KERNEL_DS);
2506         do {
2507                 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2508                                                         offset, len,
2509                                                         msg_flags);
2510                 if (sent == -EAGAIN) {
2511                         if (we_should_drop_the_connection(mdev,
2512                                                           mdev->data.socket))
2513                                 break;
2514                         else
2515                                 continue;
2516                 }
2517                 if (sent <= 0) {
2518                         dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2519                              __func__, (int)size, len, sent);
2520                         break;
2521                 }
2522                 len    -= sent;
2523                 offset += sent;
2524         } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2525         set_fs(oldfs);
2526         clear_bit(NET_CONGESTED, &mdev->flags);
2527
2528         ok = (len == 0);
2529         if (likely(ok))
2530                 mdev->send_cnt += size>>9;
2531         return ok;
2532 }
2533
2534 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2535 {
2536         struct bio_vec *bvec;
2537         int i;
2538         /* hint all but last page with MSG_MORE */
2539         __bio_for_each_segment(bvec, bio, i, 0) {
2540                 if (!_drbd_no_send_page(mdev, bvec->bv_page,
2541                                      bvec->bv_offset, bvec->bv_len,
2542                                      i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2543                         return 0;
2544         }
2545         return 1;
2546 }
2547
2548 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2549 {
2550         struct bio_vec *bvec;
2551         int i;
2552         /* hint all but last page with MSG_MORE */
2553         __bio_for_each_segment(bvec, bio, i, 0) {
2554                 if (!_drbd_send_page(mdev, bvec->bv_page,
2555                                      bvec->bv_offset, bvec->bv_len,
2556                                      i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2557                         return 0;
2558         }
2559         return 1;
2560 }
2561
2562 static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2563 {
2564         struct page *page = e->pages;
2565         unsigned len = e->size;
2566         /* hint all but last page with MSG_MORE */
2567         page_chain_for_each(page) {
2568                 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2569                 if (!_drbd_send_page(mdev, page, 0, l,
2570                                 page_chain_next(page) ? MSG_MORE : 0))
2571                         return 0;
2572                 len -= l;
2573         }
2574         return 1;
2575 }
2576
2577 static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2578 {
2579         if (mdev->agreed_pro_version >= 95)
2580                 return  (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
2581                         (bi_rw & REQ_FUA ? DP_FUA : 0) |
2582                         (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2583                         (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2584         else
2585                 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
2586 }
2587
2588 /* Used to send write requests
2589  * R_PRIMARY -> Peer    (P_DATA)
2590  */
2591 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2592 {
2593         int ok = 1;
2594         struct p_data p;
2595         unsigned int dp_flags = 0;
2596         void *dgb;
2597         int dgs;
2598
2599         if (!drbd_get_data_sock(mdev))
2600                 return 0;
2601
2602         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2603                 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2604
2605         if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
2606                 p.head.h80.magic   = BE_DRBD_MAGIC;
2607                 p.head.h80.command = cpu_to_be16(P_DATA);
2608                 p.head.h80.length  =
2609                         cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2610         } else {
2611                 p.head.h95.magic   = BE_DRBD_MAGIC_BIG;
2612                 p.head.h95.command = cpu_to_be16(P_DATA);
2613                 p.head.h95.length  =
2614                         cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2615         }
2616
2617         p.sector   = cpu_to_be64(req->sector);
2618         p.block_id = (unsigned long)req;
2619         p.seq_num  = cpu_to_be32(req->seq_num =
2620                                  atomic_add_return(1, &mdev->packet_seq));
2621
2622         dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2623
2624         if (mdev->state.conn >= C_SYNC_SOURCE &&
2625             mdev->state.conn <= C_PAUSED_SYNC_T)
2626                 dp_flags |= DP_MAY_SET_IN_SYNC;
2627
2628         p.dp_flags = cpu_to_be32(dp_flags);
2629         set_bit(UNPLUG_REMOTE, &mdev->flags);
2630         ok = (sizeof(p) ==
2631                 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
2632         if (ok && dgs) {
2633                 dgb = mdev->int_dig_out;
2634                 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
2635                 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2636         }
2637         if (ok) {
2638                 /* For protocol A, we have to memcpy the payload into
2639                  * socket buffers, as we may complete right away
2640                  * as soon as we handed it over to tcp, at which point the data
2641                  * pages may become invalid.
2642                  *
2643                  * For data-integrity enabled, we copy it as well, so we can be
2644                  * sure that even if the bio pages may still be modified, it
2645                  * won't change the data on the wire, thus if the digest checks
2646                  * out ok after sending on this side, but does not fit on the
2647                  * receiving side, we sure have detected corruption elsewhere.
2648                  */
2649                 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
2650                         ok = _drbd_send_bio(mdev, req->master_bio);
2651                 else
2652                         ok = _drbd_send_zc_bio(mdev, req->master_bio);
2653
2654                 /* double check digest, sometimes buffers have been modified in flight. */
2655                 if (dgs > 0 && dgs <= 64) {
2656                         /* 64 byte, 512 bit, is the larges digest size
2657                          * currently supported in kernel crypto. */
2658                         unsigned char digest[64];
2659                         drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2660                         if (memcmp(mdev->int_dig_out, digest, dgs)) {
2661                                 dev_warn(DEV,
2662                                         "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2663                                         (unsigned long long)req->sector, req->size);
2664                         }
2665                 } /* else if (dgs > 64) {
2666                      ... Be noisy about digest too large ...
2667                 } */
2668         }
2669
2670         drbd_put_data_sock(mdev);
2671
2672         return ok;
2673 }
2674
2675 /* answer packet, used to send data back for read requests:
2676  *  Peer       -> (diskless) R_PRIMARY   (P_DATA_REPLY)
2677  *  C_SYNC_SOURCE -> C_SYNC_TARGET         (P_RS_DATA_REPLY)
2678  */
2679 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2680                     struct drbd_epoch_entry *e)
2681 {
2682         int ok;
2683         struct p_data p;
2684         void *dgb;
2685         int dgs;
2686
2687         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2688                 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2689
2690         if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
2691                 p.head.h80.magic   = BE_DRBD_MAGIC;
2692                 p.head.h80.command = cpu_to_be16(cmd);
2693                 p.head.h80.length  =
2694                         cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2695         } else {
2696                 p.head.h95.magic   = BE_DRBD_MAGIC_BIG;
2697                 p.head.h95.command = cpu_to_be16(cmd);
2698                 p.head.h95.length  =
2699                         cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2700         }
2701
2702         p.sector   = cpu_to_be64(e->sector);
2703         p.block_id = e->block_id;
2704         /* p.seq_num  = 0;    No sequence numbers here.. */
2705
2706         /* Only called by our kernel thread.
2707          * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2708          * in response to admin command or module unload.
2709          */
2710         if (!drbd_get_data_sock(mdev))
2711                 return 0;
2712
2713         ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
2714         if (ok && dgs) {
2715                 dgb = mdev->int_dig_out;
2716                 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
2717                 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2718         }
2719         if (ok)
2720                 ok = _drbd_send_zc_ee(mdev, e);
2721
2722         drbd_put_data_sock(mdev);
2723
2724         return ok;
2725 }
2726
2727 int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2728 {
2729         struct p_block_desc p;
2730
2731         p.sector  = cpu_to_be64(req->sector);
2732         p.blksize = cpu_to_be32(req->size);
2733
2734         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2735 }
2736
2737 /*
2738   drbd_send distinguishes two cases:
2739
2740   Packets sent via the data socket "sock"
2741   and packets sent via the meta data socket "msock"
2742
2743                     sock                      msock
2744   -----------------+-------------------------+------------------------------
2745   timeout           conf.timeout / 2          conf.timeout / 2
2746   timeout action    send a ping via msock     Abort communication
2747                                               and close all sockets
2748 */
2749
2750 /*
2751  * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2752  */
2753 int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2754               void *buf, size_t size, unsigned msg_flags)
2755 {
2756         struct kvec iov;
2757         struct msghdr msg;
2758         int rv, sent = 0;
2759
2760         if (!sock)
2761                 return -1000;
2762
2763         /* THINK  if (signal_pending) return ... ? */
2764
2765         iov.iov_base = buf;
2766         iov.iov_len  = size;
2767
2768         msg.msg_name       = NULL;
2769         msg.msg_namelen    = 0;
2770         msg.msg_control    = NULL;
2771         msg.msg_controllen = 0;
2772         msg.msg_flags      = msg_flags | MSG_NOSIGNAL;
2773
2774         if (sock == mdev->data.socket) {
2775                 mdev->ko_count = mdev->net_conf->ko_count;
2776                 drbd_update_congested(mdev);
2777         }
2778         do {
2779                 /* STRANGE
2780                  * tcp_sendmsg does _not_ use its size parameter at all ?
2781                  *
2782                  * -EAGAIN on timeout, -EINTR on signal.
2783                  */
2784 /* THINK
2785  * do we need to block DRBD_SIG if sock == &meta.socket ??
2786  * otherwise wake_asender() might interrupt some send_*Ack !
2787  */
2788                 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2789                 if (rv == -EAGAIN) {
2790                         if (we_should_drop_the_connection(mdev, sock))
2791                                 break;
2792                         else
2793                                 continue;
2794                 }
2795                 D_ASSERT(rv != 0);
2796                 if (rv == -EINTR) {
2797                         flush_signals(current);
2798                         rv = 0;
2799                 }
2800                 if (rv < 0)
2801                         break;
2802                 sent += rv;
2803                 iov.iov_base += rv;
2804                 iov.iov_len  -= rv;
2805         } while (sent < size);
2806
2807         if (sock == mdev->data.socket)
2808                 clear_bit(NET_CONGESTED, &mdev->flags);
2809
2810         if (rv <= 0) {
2811                 if (rv != -EAGAIN) {
2812                         dev_err(DEV, "%s_sendmsg returned %d\n",
2813                             sock == mdev->meta.socket ? "msock" : "sock",
2814                             rv);
2815                         drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2816                 } else
2817                         drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2818         }
2819
2820         return sent;
2821 }
2822
2823 static int drbd_open(struct block_device *bdev, fmode_t mode)
2824 {
2825         struct drbd_conf *mdev = bdev->bd_disk->private_data;
2826         unsigned long flags;
2827         int rv = 0;
2828
2829         mutex_lock(&drbd_main_mutex);
2830         spin_lock_irqsave(&mdev->req_lock, flags);
2831         /* to have a stable mdev->state.role
2832          * and no race with updating open_cnt */
2833
2834         if (mdev->state.role != R_PRIMARY) {
2835                 if (mode & FMODE_WRITE)
2836                         rv = -EROFS;
2837                 else if (!allow_oos)
2838                         rv = -EMEDIUMTYPE;
2839         }
2840
2841         if (!rv)
2842                 mdev->open_cnt++;
2843         spin_unlock_irqrestore(&mdev->req_lock, flags);
2844         mutex_unlock(&drbd_main_mutex);
2845
2846         return rv;
2847 }
2848
2849 static int drbd_release(struct gendisk *gd, fmode_t mode)
2850 {
2851         struct drbd_conf *mdev = gd->private_data;
2852         mutex_lock(&drbd_main_mutex);
2853         mdev->open_cnt--;
2854         mutex_unlock(&drbd_main_mutex);
2855         return 0;
2856 }
2857
2858 static void drbd_set_defaults(struct drbd_conf *mdev)
2859 {
2860         /* This way we get a compile error when sync_conf grows,
2861            and we forgot to initialize it here */
2862         mdev->sync_conf = (struct syncer_conf) {
2863                 /* .rate = */           DRBD_RATE_DEF,
2864                 /* .after = */          DRBD_AFTER_DEF,
2865                 /* .al_extents = */     DRBD_AL_EXTENTS_DEF,
2866                 /* .verify_alg = */     {}, 0,
2867                 /* .cpu_mask = */       {}, 0,
2868                 /* .csums_alg = */      {}, 0,
2869                 /* .use_rle = */        0,
2870                 /* .on_no_data = */     DRBD_ON_NO_DATA_DEF,
2871                 /* .c_plan_ahead = */   DRBD_C_PLAN_AHEAD_DEF,
2872                 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
2873                 /* .c_fill_target = */  DRBD_C_FILL_TARGET_DEF,
2874                 /* .c_max_rate = */     DRBD_C_MAX_RATE_DEF,
2875                 /* .c_min_rate = */     DRBD_C_MIN_RATE_DEF
2876         };
2877
2878         /* Have to use that way, because the layout differs between
2879            big endian and little endian */
2880         mdev->state = (union drbd_state) {
2881                 { .role = R_SECONDARY,
2882                   .peer = R_UNKNOWN,
2883                   .conn = C_STANDALONE,
2884                   .disk = D_DISKLESS,
2885                   .pdsk = D_UNKNOWN,
2886                   .susp = 0,
2887                   .susp_nod = 0,
2888                   .susp_fen = 0
2889                 } };
2890 }
2891
2892 void drbd_init_set_defaults(struct drbd_conf *mdev)
2893 {
2894         /* the memset(,0,) did most of this.
2895          * note: only assignments, no allocation in here */
2896
2897         drbd_set_defaults(mdev);
2898
2899         atomic_set(&mdev->ap_bio_cnt, 0);
2900         atomic_set(&mdev->ap_pending_cnt, 0);
2901         atomic_set(&mdev->rs_pending_cnt, 0);
2902         atomic_set(&mdev->unacked_cnt, 0);
2903         atomic_set(&mdev->local_cnt, 0);
2904         atomic_set(&mdev->net_cnt, 0);
2905         atomic_set(&mdev->packet_seq, 0);
2906         atomic_set(&mdev->pp_in_use, 0);
2907         atomic_set(&mdev->pp_in_use_by_net, 0);
2908         atomic_set(&mdev->rs_sect_in, 0);
2909         atomic_set(&mdev->rs_sect_ev, 0);
2910         atomic_set(&mdev->ap_in_flight, 0);
2911
2912         mutex_init(&mdev->md_io_mutex);
2913         mutex_init(&mdev->data.mutex);
2914         mutex_init(&mdev->meta.mutex);
2915         sema_init(&mdev->data.work.s, 0);
2916         sema_init(&mdev->meta.work.s, 0);
2917         mutex_init(&mdev->state_mutex);
2918
2919         spin_lock_init(&mdev->data.work.q_lock);
2920         spin_lock_init(&mdev->meta.work.q_lock);
2921
2922         spin_lock_init(&mdev->al_lock);
2923         spin_lock_init(&mdev->req_lock);
2924         spin_lock_init(&mdev->peer_seq_lock);
2925         spin_lock_init(&mdev->epoch_lock);
2926
2927         INIT_LIST_HEAD(&mdev->active_ee);
2928         INIT_LIST_HEAD(&mdev->sync_ee);
2929         INIT_LIST_HEAD(&mdev->done_ee);
2930         INIT_LIST_HEAD(&mdev->read_ee);
2931         INIT_LIST_HEAD(&mdev->net_ee);
2932         INIT_LIST_HEAD(&mdev->resync_reads);
2933         INIT_LIST_HEAD(&mdev->data.work.q);
2934         INIT_LIST_HEAD(&mdev->meta.work.q);
2935         INIT_LIST_HEAD(&mdev->resync_work.list);
2936         INIT_LIST_HEAD(&mdev->unplug_work.list);
2937         INIT_LIST_HEAD(&mdev->go_diskless.list);
2938         INIT_LIST_HEAD(&mdev->md_sync_work.list);
2939         INIT_LIST_HEAD(&mdev->start_resync_work.list);
2940         INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
2941
2942         mdev->resync_work.cb  = w_resync_inactive;
2943         mdev->unplug_work.cb  = w_send_write_hint;
2944         mdev->go_diskless.cb  = w_go_diskless;
2945         mdev->md_sync_work.cb = w_md_sync;
2946         mdev->bm_io_work.w.cb = w_bitmap_io;
2947         init_timer(&mdev->resync_timer);
2948         init_timer(&mdev->md_sync_timer);
2949         mdev->resync_timer.function = resync_timer_fn;
2950         mdev->resync_timer.data = (unsigned long) mdev;
2951         mdev->md_sync_timer.function = md_sync_timer_fn;
2952         mdev->md_sync_timer.data = (unsigned long) mdev;
2953
2954         init_waitqueue_head(&mdev->misc_wait);
2955         init_waitqueue_head(&mdev->state_wait);
2956         init_waitqueue_head(&mdev->net_cnt_wait);
2957         init_waitqueue_head(&mdev->ee_wait);
2958         init_waitqueue_head(&mdev->al_wait);
2959         init_waitqueue_head(&mdev->seq_wait);
2960
2961         drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
2962         drbd_thread_init(mdev, &mdev->worker, drbd_worker);
2963         drbd_thread_init(mdev, &mdev->asender, drbd_asender);
2964
2965         mdev->agreed_pro_version = PRO_VERSION_MAX;
2966         mdev->write_ordering = WO_bdev_flush;
2967         mdev->resync_wenr = LC_FREE;
2968 }
2969
2970 void drbd_mdev_cleanup(struct drbd_conf *mdev)
2971 {
2972         int i;
2973         if (mdev->receiver.t_state != None)
2974                 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2975                                 mdev->receiver.t_state);
2976
2977         /* no need to lock it, I'm the only thread alive */
2978         if (atomic_read(&mdev->current_epoch->epoch_size) !=  0)
2979                 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
2980         mdev->al_writ_cnt  =
2981         mdev->bm_writ_cnt  =
2982         mdev->read_cnt     =
2983         mdev->recv_cnt     =
2984         mdev->send_cnt     =
2985         mdev->writ_cnt     =
2986         mdev->p_size       =
2987         mdev->rs_start     =
2988         mdev->rs_total     =
2989         mdev->rs_failed    = 0;
2990         mdev->rs_last_events = 0;
2991         mdev->rs_last_sect_ev = 0;
2992         for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2993                 mdev->rs_mark_left[i] = 0;
2994                 mdev->rs_mark_time[i] = 0;
2995         }
2996         D_ASSERT(mdev->net_conf == NULL);
2997
2998         drbd_set_my_capacity(mdev, 0);
2999         if (mdev->bitmap) {
3000                 /* maybe never allocated. */
3001                 drbd_bm_resize(mdev, 0, 1);
3002                 drbd_bm_cleanup(mdev);
3003         }
3004
3005         drbd_free_resources(mdev);
3006         clear_bit(AL_SUSPENDED, &mdev->flags);
3007
3008         /*
3009          * currently we drbd_init_ee only on module load, so
3010          * we may do drbd_release_ee only on module unload!
3011          */
3012         D_ASSERT(list_empty(&mdev->active_ee));
3013         D_ASSERT(list_empty(&mdev->sync_ee));
3014         D_ASSERT(list_empty(&mdev->done_ee));
3015         D_ASSERT(list_empty(&mdev->read_ee));
3016         D_ASSERT(list_empty(&mdev->net_ee));
3017         D_ASSERT(list_empty(&mdev->resync_reads));
3018         D_ASSERT(list_empty(&mdev->data.work.q));
3019         D_ASSERT(list_empty(&mdev->meta.work.q));
3020         D_ASSERT(list_empty(&mdev->resync_work.list));
3021         D_ASSERT(list_empty(&mdev->unplug_work.list));
3022         D_ASSERT(list_empty(&mdev->go_diskless.list));
3023 }
3024
3025
3026 static void drbd_destroy_mempools(void)
3027 {
3028         struct page *page;
3029
3030         while (drbd_pp_pool) {
3031                 page = drbd_pp_pool;
3032                 drbd_pp_pool = (struct page *)page_private(page);
3033                 __free_page(page);
3034                 drbd_pp_vacant--;
3035         }
3036
3037         /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3038
3039         if (drbd_ee_mempool)
3040                 mempool_destroy(drbd_ee_mempool);
3041         if (drbd_request_mempool)
3042                 mempool_destroy(drbd_request_mempool);
3043         if (drbd_ee_cache)
3044                 kmem_cache_destroy(drbd_ee_cache);
3045         if (drbd_request_cache)
3046                 kmem_cache_destroy(drbd_request_cache);
3047         if (drbd_bm_ext_cache)
3048                 kmem_cache_destroy(drbd_bm_ext_cache);
3049         if (drbd_al_ext_cache)
3050                 kmem_cache_destroy(drbd_al_ext_cache);
3051
3052         drbd_ee_mempool      = NULL;
3053         drbd_request_mempool = NULL;
3054         drbd_ee_cache        = NULL;
3055         drbd_request_cache   = NULL;
3056         drbd_bm_ext_cache    = NULL;
3057         drbd_al_ext_cache    = NULL;
3058
3059         return;
3060 }
3061
3062 static int drbd_create_mempools(void)
3063 {
3064         struct page *page;
3065         const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
3066         int i;
3067
3068         /* prepare our caches and mempools */
3069         drbd_request_mempool = NULL;
3070         drbd_ee_cache        = NULL;
3071         drbd_request_cache   = NULL;
3072         drbd_bm_ext_cache    = NULL;
3073         drbd_al_ext_cache    = NULL;
3074         drbd_pp_pool         = NULL;
3075
3076         /* caches */
3077         drbd_request_cache = kmem_cache_create(
3078                 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3079         if (drbd_request_cache == NULL)
3080                 goto Enomem;
3081
3082         drbd_ee_cache = kmem_cache_create(
3083                 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3084         if (drbd_ee_cache == NULL)
3085                 goto Enomem;
3086
3087         drbd_bm_ext_cache = kmem_cache_create(
3088                 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3089         if (drbd_bm_ext_cache == NULL)
3090                 goto Enomem;
3091
3092         drbd_al_ext_cache = kmem_cache_create(
3093                 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3094         if (drbd_al_ext_cache == NULL)
3095                 goto Enomem;
3096
3097         /* mempools */
3098         drbd_request_mempool = mempool_create(number,
3099                 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3100         if (drbd_request_mempool == NULL)
3101                 goto Enomem;
3102
3103         drbd_ee_mempool = mempool_create(number,
3104                 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
3105         if (drbd_ee_mempool == NULL)
3106                 goto Enomem;
3107
3108         /* drbd's page pool */
3109         spin_lock_init(&drbd_pp_lock);
3110
3111         for (i = 0; i < number; i++) {
3112                 page = alloc_page(GFP_HIGHUSER);
3113                 if (!page)
3114                         goto Enomem;
3115                 set_page_private(page, (unsigned long)drbd_pp_pool);
3116                 drbd_pp_pool = page;
3117         }
3118         drbd_pp_vacant = number;
3119
3120         return 0;
3121
3122 Enomem:
3123         drbd_destroy_mempools(); /* in case we allocated some */
3124         return -ENOMEM;
3125 }
3126
3127 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3128         void *unused)
3129 {
3130         /* just so we have it.  you never know what interesting things we
3131          * might want to do here some day...
3132          */
3133
3134         return NOTIFY_DONE;
3135 }
3136
3137 static struct notifier_block drbd_notifier = {
3138         .notifier_call = drbd_notify_sys,
3139 };
3140
3141 static void drbd_release_ee_lists(struct drbd_conf *mdev)
3142 {
3143         int rr;
3144
3145         rr = drbd_release_ee(mdev, &mdev->active_ee);
3146         if (rr)
3147                 dev_err(DEV, "%d EEs in active list found!\n", rr);
3148
3149         rr = drbd_release_ee(mdev, &mdev->sync_ee);
3150         if (rr)
3151                 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3152
3153         rr = drbd_release_ee(mdev, &mdev->read_ee);
3154         if (rr)
3155                 dev_err(DEV, "%d EEs in read list found!\n", rr);
3156
3157         rr = drbd_release_ee(mdev, &mdev->done_ee);
3158         if (rr)
3159                 dev_err(DEV, "%d EEs in done list found!\n", rr);
3160
3161         rr = drbd_release_ee(mdev, &mdev->net_ee);
3162         if (rr)
3163                 dev_err(DEV, "%d EEs in net list found!\n", rr);
3164 }
3165
3166 /* caution. no locking.
3167  * currently only used from module cleanup code. */
3168 static void drbd_delete_device(unsigned int minor)
3169 {
3170         struct drbd_conf *mdev = minor_to_mdev(minor);
3171
3172         if (!mdev)
3173                 return;
3174
3175         /* paranoia asserts */
3176         if (mdev->open_cnt != 0)
3177                 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3178                                 __FILE__ , __LINE__);
3179
3180         ERR_IF (!list_empty(&mdev->data.work.q)) {
3181                 struct list_head *lp;
3182                 list_for_each(lp, &mdev->data.work.q) {
3183                         dev_err(DEV, "lp = %p\n", lp);
3184                 }
3185         };
3186         /* end paranoia asserts */
3187
3188         del_gendisk(mdev->vdisk);
3189
3190         /* cleanup stuff that may have been allocated during
3191          * device (re-)configuration or state changes */
3192
3193         if (mdev->this_bdev)
3194                 bdput(mdev->this_bdev);
3195
3196         drbd_free_resources(mdev);
3197
3198         drbd_release_ee_lists(mdev);
3199
3200         /* should be free'd on disconnect? */
3201         kfree(mdev->ee_hash);
3202         /*
3203         mdev->ee_hash_s = 0;
3204         mdev->ee_hash = NULL;
3205         */
3206
3207         lc_destroy(mdev->act_log);
3208         lc_destroy(mdev->resync);
3209
3210         kfree(mdev->p_uuid);
3211         /* mdev->p_uuid = NULL; */
3212
3213         kfree(mdev->int_dig_out);
3214         kfree(mdev->int_dig_in);
3215         kfree(mdev->int_dig_vv);
3216
3217         /* cleanup the rest that has been
3218          * allocated from drbd_new_device
3219          * and actually free the mdev itself */
3220         drbd_free_mdev(mdev);
3221 }
3222
3223 static void drbd_cleanup(void)
3224 {
3225         unsigned int i;
3226
3227         unregister_reboot_notifier(&drbd_notifier);
3228
3229         /* first remove proc,
3230          * drbdsetup uses it's presence to detect
3231          * whether DRBD is loaded.
3232          * If we would get stuck in proc removal,
3233          * but have netlink already deregistered,
3234          * some drbdsetup commands may wait forever
3235          * for an answer.
3236          */
3237         if (drbd_proc)
3238                 remove_proc_entry("drbd", NULL);
3239
3240         drbd_nl_cleanup();
3241
3242         if (minor_table) {
3243                 i = minor_count;
3244                 while (i--)
3245                         drbd_delete_device(i);
3246                 drbd_destroy_mempools();
3247         }
3248
3249         kfree(minor_table);
3250
3251         unregister_blkdev(DRBD_MAJOR, "drbd");
3252
3253         printk(KERN_INFO "drbd: module cleanup done.\n");
3254 }
3255
3256 /**
3257  * drbd_congested() - Callback for pdflush
3258  * @congested_data:     User data
3259  * @bdi_bits:           Bits pdflush is currently interested in
3260  *
3261  * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3262  */
3263 static int drbd_congested(void *congested_data, int bdi_bits)
3264 {
3265         struct drbd_conf *mdev = congested_data;
3266         struct request_queue *q;
3267         char reason = '-';
3268         int r = 0;
3269
3270         if (!may_inc_ap_bio(mdev)) {
3271                 /* DRBD has frozen IO */
3272                 r = bdi_bits;
3273                 reason = 'd';
3274                 goto out;
3275         }
3276
3277         if (get_ldev(mdev)) {
3278                 q = bdev_get_queue(mdev->ldev->backing_bdev);
3279                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3280                 put_ldev(mdev);
3281                 if (r)
3282                         reason = 'b';
3283         }
3284
3285         if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3286                 r |= (1 << BDI_async_congested);
3287                 reason = reason == 'b' ? 'a' : 'n';
3288         }
3289
3290 out:
3291         mdev->congestion_reason = reason;
3292         return r;
3293 }
3294
3295 struct drbd_conf *drbd_new_device(unsigned int minor)
3296 {
3297         struct drbd_conf *mdev;
3298         struct gendisk *disk;
3299         struct request_queue *q;
3300
3301         /* GFP_KERNEL, we are outside of all write-out paths */
3302         mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3303         if (!mdev)
3304                 return NULL;
3305         if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3306                 goto out_no_cpumask;
3307
3308         mdev->minor = minor;
3309
3310         drbd_init_set_defaults(mdev);
3311
3312         q = blk_alloc_queue(GFP_KERNEL);
3313         if (!q)
3314                 goto out_no_q;
3315         mdev->rq_queue = q;
3316         q->queuedata   = mdev;
3317
3318         disk = alloc_disk(1);
3319         if (!disk)
3320                 goto out_no_disk;
3321         mdev->vdisk = disk;
3322
3323         set_disk_ro(disk, true);
3324
3325         disk->queue = q;
3326         disk->major = DRBD_MAJOR;
3327         disk->first_minor = minor;
3328         disk->fops = &drbd_ops;
3329         sprintf(disk->disk_name, "drbd%d", minor);
3330         disk->private_data = mdev;
3331
3332         mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3333         /* we have no partitions. we contain only ourselves. */
3334         mdev->this_bdev->bd_contains = mdev->this_bdev;
3335
3336         q->backing_dev_info.congested_fn = drbd_congested;
3337         q->backing_dev_info.congested_data = mdev;
3338
3339         blk_queue_make_request(q, drbd_make_request);
3340         blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9);
3341         blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3342         blk_queue_merge_bvec(q, drbd_merge_bvec);
3343         q->queue_lock = &mdev->req_lock;
3344
3345         mdev->md_io_page = alloc_page(GFP_KERNEL);
3346         if (!mdev->md_io_page)
3347                 goto out_no_io_page;
3348
3349         if (drbd_bm_init(mdev))
3350                 goto out_no_bitmap;
3351         /* no need to lock access, we are still initializing this minor device. */
3352         if (!tl_init(mdev))
3353                 goto out_no_tl;
3354
3355         mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3356         if (!mdev->app_reads_hash)
3357                 goto out_no_app_reads;
3358
3359         mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3360         if (!mdev->current_epoch)
3361                 goto out_no_epoch;
3362
3363         INIT_LIST_HEAD(&mdev->current_epoch->list);
3364         mdev->epochs = 1;
3365
3366         return mdev;
3367
3368 /* out_whatever_else:
3369         kfree(mdev->current_epoch); */
3370 out_no_epoch:
3371         kfree(mdev->app_reads_hash);
3372 out_no_app_reads:
3373         tl_cleanup(mdev);
3374 out_no_tl:
3375         drbd_bm_cleanup(mdev);
3376 out_no_bitmap:
3377         __free_page(mdev->md_io_page);
3378 out_no_io_page:
3379         put_disk(disk);
3380 out_no_disk:
3381         blk_cleanup_queue(q);
3382 out_no_q:
3383         free_cpumask_var(mdev->cpu_mask);
3384 out_no_cpumask:
3385         kfree(mdev);
3386         return NULL;
3387 }
3388
3389 /* counterpart of drbd_new_device.
3390  * last part of drbd_delete_device. */
3391 void drbd_free_mdev(struct drbd_conf *mdev)
3392 {
3393         kfree(mdev->current_epoch);
3394         kfree(mdev->app_reads_hash);
3395         tl_cleanup(mdev);
3396         if (mdev->bitmap) /* should no longer be there. */
3397                 drbd_bm_cleanup(mdev);
3398         __free_page(mdev->md_io_page);
3399         put_disk(mdev->vdisk);
3400         blk_cleanup_queue(mdev->rq_queue);
3401         free_cpumask_var(mdev->cpu_mask);
3402         drbd_free_tl_hash(mdev);
3403         kfree(mdev);
3404 }
3405
3406
3407 int __init drbd_init(void)
3408 {
3409         int err;
3410
3411         if (sizeof(struct p_handshake) != 80) {
3412                 printk(KERN_ERR
3413                        "drbd: never change the size or layout "
3414                        "of the HandShake packet.\n");
3415                 return -EINVAL;
3416         }
3417
3418         if (1 > minor_count || minor_count > 255) {
3419                 printk(KERN_ERR
3420                         "drbd: invalid minor_count (%d)\n", minor_count);
3421 #ifdef MODULE
3422                 return -EINVAL;
3423 #else
3424                 minor_count = 8;
3425 #endif
3426         }
3427
3428         err = drbd_nl_init();
3429         if (err)
3430                 return err;
3431
3432         err = register_blkdev(DRBD_MAJOR, "drbd");
3433         if (err) {
3434                 printk(KERN_ERR
3435                        "drbd: unable to register block device major %d\n",
3436                        DRBD_MAJOR);
3437                 return err;
3438         }
3439
3440         register_reboot_notifier(&drbd_notifier);
3441
3442         /*
3443          * allocate all necessary structs
3444          */
3445         err = -ENOMEM;
3446
3447         init_waitqueue_head(&drbd_pp_wait);
3448
3449         drbd_proc = NULL; /* play safe for drbd_cleanup */
3450         minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3451                                 GFP_KERNEL);
3452         if (!minor_table)
3453                 goto Enomem;
3454
3455         err = drbd_create_mempools();
3456         if (err)
3457                 goto Enomem;
3458
3459         drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
3460         if (!drbd_proc) {
3461                 printk(KERN_ERR "drbd: unable to register proc file\n");
3462                 goto Enomem;
3463         }
3464
3465         rwlock_init(&global_state_lock);
3466
3467         printk(KERN_INFO "drbd: initialized. "
3468                "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3469                API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3470         printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3471         printk(KERN_INFO "drbd: registered as block device major %d\n",
3472                 DRBD_MAJOR);
3473         printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3474
3475         return 0; /* Success! */
3476
3477 Enomem:
3478         drbd_cleanup();
3479         if (err == -ENOMEM)
3480                 /* currently always the case */
3481                 printk(KERN_ERR "drbd: ran out of memory\n");
3482         else
3483                 printk(KERN_ERR "drbd: initialization failure\n");
3484         return err;
3485 }
3486
3487 void drbd_free_bc(struct drbd_backing_dev *ldev)
3488 {
3489         if (ldev == NULL)
3490                 return;
3491
3492         blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3493         blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3494
3495         kfree(ldev);
3496 }
3497
3498 void drbd_free_sock(struct drbd_conf *mdev)
3499 {
3500         if (mdev->data.socket) {
3501                 mutex_lock(&mdev->data.mutex);
3502                 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3503                 sock_release(mdev->data.socket);
3504                 mdev->data.socket = NULL;
3505                 mutex_unlock(&mdev->data.mutex);
3506         }
3507         if (mdev->meta.socket) {
3508                 mutex_lock(&mdev->meta.mutex);
3509                 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3510                 sock_release(mdev->meta.socket);
3511                 mdev->meta.socket = NULL;
3512                 mutex_unlock(&mdev->meta.mutex);
3513         }
3514 }
3515
3516
3517 void drbd_free_resources(struct drbd_conf *mdev)
3518 {
3519         crypto_free_hash(mdev->csums_tfm);
3520         mdev->csums_tfm = NULL;
3521         crypto_free_hash(mdev->verify_tfm);
3522         mdev->verify_tfm = NULL;
3523         crypto_free_hash(mdev->cram_hmac_tfm);
3524         mdev->cram_hmac_tfm = NULL;
3525         crypto_free_hash(mdev->integrity_w_tfm);
3526         mdev->integrity_w_tfm = NULL;
3527         crypto_free_hash(mdev->integrity_r_tfm);
3528         mdev->integrity_r_tfm = NULL;
3529
3530         drbd_free_sock(mdev);
3531
3532         __no_warn(local,
3533                   drbd_free_bc(mdev->ldev);
3534                   mdev->ldev = NULL;);
3535 }
3536
3537 /* meta data management */
3538
3539 struct meta_data_on_disk {
3540         u64 la_size;           /* last agreed size. */
3541         u64 uuid[UI_SIZE];   /* UUIDs. */
3542         u64 device_uuid;
3543         u64 reserved_u64_1;
3544         u32 flags;             /* MDF */
3545         u32 magic;
3546         u32 md_size_sect;
3547         u32 al_offset;         /* offset to this block */
3548         u32 al_nr_extents;     /* important for restoring the AL */
3549               /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3550         u32 bm_offset;         /* offset to the bitmap, from here */
3551         u32 bm_bytes_per_bit;  /* BM_BLOCK_SIZE */
3552         u32 reserved_u32[4];
3553
3554 } __packed;
3555
3556 /**
3557  * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3558  * @mdev:       DRBD device.
3559  */
3560 void drbd_md_sync(struct drbd_conf *mdev)
3561 {
3562         struct meta_data_on_disk *buffer;
3563         sector_t sector;
3564         int i;
3565
3566         del_timer(&mdev->md_sync_timer);
3567         /* timer may be rearmed by drbd_md_mark_dirty() now. */
3568         if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3569                 return;
3570
3571         /* We use here D_FAILED and not D_ATTACHING because we try to write
3572          * metadata even if we detach due to a disk failure! */
3573         if (!get_ldev_if_state(mdev, D_FAILED))
3574                 return;
3575
3576         mutex_lock(&mdev->md_io_mutex);
3577         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3578         memset(buffer, 0, 512);
3579
3580         buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3581         for (i = UI_CURRENT; i < UI_SIZE; i++)
3582                 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3583         buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3584         buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3585
3586         buffer->md_size_sect  = cpu_to_be32(mdev->ldev->md.md_size_sect);
3587         buffer->al_offset     = cpu_to_be32(mdev->ldev->md.al_offset);
3588         buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3589         buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3590         buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3591
3592         buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3593
3594         D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3595         sector = mdev->ldev->md.md_offset;
3596
3597         if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
3598                 /* this was a try anyways ... */
3599                 dev_err(DEV, "meta data update failed!\n");
3600                 drbd_chk_io_error(mdev, 1, true);
3601         }
3602
3603         /* Update mdev->ldev->md.la_size_sect,
3604          * since we updated it on metadata. */
3605         mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3606
3607         mutex_unlock(&mdev->md_io_mutex);
3608         put_ldev(mdev);
3609 }
3610
3611 /**
3612  * drbd_md_read() - Reads in the meta data super block
3613  * @mdev:       DRBD device.
3614  * @bdev:       Device from which the meta data should be read in.
3615  *
3616  * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
3617  * something goes wrong.  Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3618  */
3619 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3620 {
3621         struct meta_data_on_disk *buffer;
3622         int i, rv = NO_ERROR;
3623
3624         if (!get_ldev_if_state(mdev, D_ATTACHING))
3625                 return ERR_IO_MD_DISK;
3626
3627         mutex_lock(&mdev->md_io_mutex);
3628         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3629
3630         if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
3631                 /* NOTE: cant do normal error processing here as this is
3632                    called BEFORE disk is attached */
3633                 dev_err(DEV, "Error while reading metadata.\n");
3634                 rv = ERR_IO_MD_DISK;
3635                 goto err;
3636         }
3637
3638         if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3639                 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3640                 rv = ERR_MD_INVALID;
3641                 goto err;
3642         }
3643         if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3644                 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3645                     be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3646                 rv = ERR_MD_INVALID;
3647                 goto err;
3648         }
3649         if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3650                 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3651                     be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3652                 rv = ERR_MD_INVALID;
3653                 goto err;
3654         }
3655         if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3656                 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3657                     be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3658                 rv = ERR_MD_INVALID;
3659                 goto err;
3660         }
3661
3662         if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3663                 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3664                     be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3665                 rv = ERR_MD_INVALID;
3666                 goto err;
3667         }
3668
3669         bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3670         for (i = UI_CURRENT; i < UI_SIZE; i++)
3671                 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3672         bdev->md.flags = be32_to_cpu(buffer->flags);
3673         mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3674         bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3675
3676         if (mdev->sync_conf.al_extents < 7)
3677                 mdev->sync_conf.al_extents = 127;
3678
3679  err:
3680         mutex_unlock(&mdev->md_io_mutex);
3681         put_ldev(mdev);
3682
3683         return rv;
3684 }
3685
3686 static void debug_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index)
3687 {
3688         static char *uuid_str[UI_EXTENDED_SIZE] = {
3689                 [UI_CURRENT] = "CURRENT",
3690                 [UI_BITMAP] = "BITMAP",
3691                 [UI_HISTORY_START] = "HISTORY_START",
3692                 [UI_HISTORY_END] = "HISTORY_END",
3693                 [UI_SIZE] = "SIZE",
3694                 [UI_FLAGS] = "FLAGS",
3695         };
3696
3697         if (index >= UI_EXTENDED_SIZE) {
3698                 dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n");
3699                 return;
3700         }
3701
3702         dynamic_dev_dbg(DEV, " uuid[%s] now %016llX\n",
3703                  uuid_str[index],
3704                  (unsigned long long)mdev->ldev->md.uuid[index]);
3705 }
3706
3707
3708 /**
3709  * drbd_md_mark_dirty() - Mark meta data super block as dirty
3710  * @mdev:       DRBD device.
3711  *
3712  * Call this function if you change anything that should be written to
3713  * the meta-data super block. This function sets MD_DIRTY, and starts a
3714  * timer that ensures that within five seconds you have to call drbd_md_sync().
3715  */
3716 #ifdef DEBUG
3717 void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3718 {
3719         if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3720                 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3721                 mdev->last_md_mark_dirty.line = line;
3722                 mdev->last_md_mark_dirty.func = func;
3723         }
3724 }
3725 #else
3726 void drbd_md_mark_dirty(struct drbd_conf *mdev)
3727 {
3728         if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
3729                 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
3730 }
3731 #endif
3732
3733 static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3734 {
3735         int i;
3736
3737         for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) {
3738                 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
3739                 debug_drbd_uuid(mdev, i+1);
3740         }
3741 }
3742
3743 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3744 {
3745         if (idx == UI_CURRENT) {
3746                 if (mdev->state.role == R_PRIMARY)
3747                         val |= 1;
3748                 else
3749                         val &= ~((u64)1);
3750
3751                 drbd_set_ed_uuid(mdev, val);
3752         }
3753
3754         mdev->ldev->md.uuid[idx] = val;
3755         debug_drbd_uuid(mdev, idx);
3756         drbd_md_mark_dirty(mdev);
3757 }
3758
3759
3760 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3761 {
3762         if (mdev->ldev->md.uuid[idx]) {
3763                 drbd_uuid_move_history(mdev);
3764                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
3765                 debug_drbd_uuid(mdev, UI_HISTORY_START);
3766         }
3767         _drbd_uuid_set(mdev, idx, val);
3768 }
3769
3770 /**
3771  * drbd_uuid_new_current() - Creates a new current UUID
3772  * @mdev:       DRBD device.
3773  *
3774  * Creates a new current UUID, and rotates the old current UUID into
3775  * the bitmap slot. Causes an incremental resync upon next connect.
3776  */
3777 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3778 {
3779         u64 val;
3780
3781         dev_info(DEV, "Creating new current UUID\n");
3782         D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
3783         mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3784         debug_drbd_uuid(mdev, UI_BITMAP);
3785
3786         get_random_bytes(&val, sizeof(u64));
3787         _drbd_uuid_set(mdev, UI_CURRENT, val);
3788         /* get it to stable storage _now_ */
3789         drbd_md_sync(mdev);
3790 }
3791
3792 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3793 {
3794         if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3795                 return;
3796
3797         if (val == 0) {
3798                 drbd_uuid_move_history(mdev);
3799                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3800                 mdev->ldev->md.uuid[UI_BITMAP] = 0;
3801                 debug_drbd_uuid(mdev, UI_HISTORY_START);
3802                 debug_drbd_uuid(mdev, UI_BITMAP);
3803         } else {
3804                 if (mdev->ldev->md.uuid[UI_BITMAP])
3805                         dev_warn(DEV, "bm UUID already set");
3806
3807                 mdev->ldev->md.uuid[UI_BITMAP] = val;
3808                 mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
3809
3810                 debug_drbd_uuid(mdev, UI_BITMAP);
3811         }
3812         drbd_md_mark_dirty(mdev);
3813 }
3814
3815 /**
3816  * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3817  * @mdev:       DRBD device.
3818  *
3819  * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3820  */
3821 int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3822 {
3823         int rv = -EIO;
3824
3825         if (get_ldev_if_state(mdev, D_ATTACHING)) {
3826                 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3827                 drbd_md_sync(mdev);
3828                 drbd_bm_set_all(mdev);
3829
3830                 rv = drbd_bm_write(mdev);
3831
3832                 if (!rv) {
3833                         drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3834                         drbd_md_sync(mdev);
3835                 }
3836
3837                 put_ldev(mdev);
3838         }
3839
3840         return rv;
3841 }
3842
3843 /**
3844  * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3845  * @mdev:       DRBD device.
3846  *
3847  * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3848  */
3849 int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3850 {
3851         int rv = -EIO;
3852
3853         drbd_resume_al(mdev);
3854         if (get_ldev_if_state(mdev, D_ATTACHING)) {
3855                 drbd_bm_clear_all(mdev);
3856                 rv = drbd_bm_write(mdev);
3857                 put_ldev(mdev);
3858         }
3859
3860         return rv;
3861 }
3862
3863 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3864 {
3865         struct bm_io_work *work = container_of(w, struct bm_io_work, w);
3866         int rv = -EIO;
3867
3868         D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3869
3870         if (get_ldev(mdev)) {
3871                 drbd_bm_lock(mdev, work->why);
3872                 rv = work->io_fn(mdev);
3873                 drbd_bm_unlock(mdev);
3874                 put_ldev(mdev);
3875         }
3876
3877         clear_bit(BITMAP_IO, &mdev->flags);
3878         smp_mb__after_clear_bit();
3879         wake_up(&mdev->misc_wait);
3880
3881         if (work->done)
3882                 work->done(mdev, rv);
3883
3884         clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3885         work->why = NULL;
3886
3887         return 1;
3888 }
3889
3890 void drbd_ldev_destroy(struct drbd_conf *mdev)
3891 {
3892         lc_destroy(mdev->resync);
3893         mdev->resync = NULL;
3894         lc_destroy(mdev->act_log);
3895         mdev->act_log = NULL;
3896         __no_warn(local,
3897                 drbd_free_bc(mdev->ldev);
3898                 mdev->ldev = NULL;);
3899
3900         if (mdev->md_io_tmpp) {
3901                 __free_page(mdev->md_io_tmpp);
3902                 mdev->md_io_tmpp = NULL;
3903         }
3904         clear_bit(GO_DISKLESS, &mdev->flags);
3905 }
3906
3907 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3908 {
3909         D_ASSERT(mdev->state.disk == D_FAILED);
3910         /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3911          * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3912          * the protected members anymore, though, so once put_ldev reaches zero
3913          * again, it will be safe to free them. */
3914         drbd_force_state(mdev, NS(disk, D_DISKLESS));
3915         return 1;
3916 }
3917
3918 void drbd_go_diskless(struct drbd_conf *mdev)
3919 {
3920         D_ASSERT(mdev->state.disk == D_FAILED);
3921         if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
3922                 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
3923 }
3924
3925 /**
3926  * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3927  * @mdev:       DRBD device.
3928  * @io_fn:      IO callback to be called when bitmap IO is possible
3929  * @done:       callback to be called after the bitmap IO was performed
3930  * @why:        Descriptive text of the reason for doing the IO
3931  *
3932  * While IO on the bitmap happens we freeze application IO thus we ensure
3933  * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3934  * called from worker context. It MUST NOT be used while a previous such
3935  * work is still pending!
3936  */
3937 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3938                           int (*io_fn)(struct drbd_conf *),
3939                           void (*done)(struct drbd_conf *, int),
3940                           char *why)
3941 {
3942         D_ASSERT(current == mdev->worker.task);
3943
3944         D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3945         D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3946         D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3947         if (mdev->bm_io_work.why)
3948                 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3949                         why, mdev->bm_io_work.why);
3950
3951         mdev->bm_io_work.io_fn = io_fn;
3952         mdev->bm_io_work.done = done;
3953         mdev->bm_io_work.why = why;
3954
3955         spin_lock_irq(&mdev->req_lock);
3956         set_bit(BITMAP_IO, &mdev->flags);
3957         if (atomic_read(&mdev->ap_bio_cnt) == 0) {
3958                 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
3959                         drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
3960         }
3961         spin_unlock_irq(&mdev->req_lock);
3962 }
3963
3964 /**
3965  * drbd_bitmap_io() -  Does an IO operation on the whole bitmap
3966  * @mdev:       DRBD device.
3967  * @io_fn:      IO callback to be called when bitmap IO is possible
3968  * @why:        Descriptive text of the reason for doing the IO
3969  *
3970  * freezes application IO while that the actual IO operations runs. This
3971  * functions MAY NOT be called from worker context.
3972  */
3973 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
3974 {
3975         int rv;
3976
3977         D_ASSERT(current != mdev->worker.task);
3978
3979         drbd_suspend_io(mdev);
3980
3981         drbd_bm_lock(mdev, why);
3982         rv = io_fn(mdev);
3983         drbd_bm_unlock(mdev);
3984
3985         drbd_resume_io(mdev);
3986
3987         return rv;
3988 }
3989
3990 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3991 {
3992         if ((mdev->ldev->md.flags & flag) != flag) {
3993                 drbd_md_mark_dirty(mdev);
3994                 mdev->ldev->md.flags |= flag;
3995         }
3996 }
3997
3998 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3999 {
4000         if ((mdev->ldev->md.flags & flag) != 0) {
4001                 drbd_md_mark_dirty(mdev);
4002                 mdev->ldev->md.flags &= ~flag;
4003         }
4004 }
4005 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4006 {
4007         return (bdev->md.flags & flag) != 0;
4008 }
4009
4010 static void md_sync_timer_fn(unsigned long data)
4011 {
4012         struct drbd_conf *mdev = (struct drbd_conf *) data;
4013
4014         drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4015 }
4016
4017 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4018 {
4019         dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
4020 #ifdef DEBUG
4021         dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4022                 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4023 #endif
4024         drbd_md_sync(mdev);
4025         return 1;
4026 }
4027
4028 #ifdef CONFIG_DRBD_FAULT_INJECTION
4029 /* Fault insertion support including random number generator shamelessly
4030  * stolen from kernel/rcutorture.c */
4031 struct fault_random_state {
4032         unsigned long state;
4033         unsigned long count;
4034 };
4035
4036 #define FAULT_RANDOM_MULT 39916801  /* prime */
4037 #define FAULT_RANDOM_ADD        479001701 /* prime */
4038 #define FAULT_RANDOM_REFRESH 10000
4039
4040 /*
4041  * Crude but fast random-number generator.  Uses a linear congruential
4042  * generator, with occasional help from get_random_bytes().
4043  */
4044 static unsigned long
4045 _drbd_fault_random(struct fault_random_state *rsp)
4046 {
4047         long refresh;
4048
4049         if (!rsp->count--) {
4050                 get_random_bytes(&refresh, sizeof(refresh));
4051                 rsp->state += refresh;
4052                 rsp->count = FAULT_RANDOM_REFRESH;
4053         }
4054         rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4055         return swahw32(rsp->state);
4056 }
4057
4058 static char *
4059 _drbd_fault_str(unsigned int type) {
4060         static char *_faults[] = {
4061                 [DRBD_FAULT_MD_WR] = "Meta-data write",
4062                 [DRBD_FAULT_MD_RD] = "Meta-data read",
4063                 [DRBD_FAULT_RS_WR] = "Resync write",
4064                 [DRBD_FAULT_RS_RD] = "Resync read",
4065                 [DRBD_FAULT_DT_WR] = "Data write",
4066                 [DRBD_FAULT_DT_RD] = "Data read",
4067                 [DRBD_FAULT_DT_RA] = "Data read ahead",
4068                 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
4069                 [DRBD_FAULT_AL_EE] = "EE allocation",
4070                 [DRBD_FAULT_RECEIVE] = "receive data corruption",
4071         };
4072
4073         return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4074 }
4075
4076 unsigned int
4077 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4078 {
4079         static struct fault_random_state rrs = {0, 0};
4080
4081         unsigned int ret = (
4082                 (fault_devs == 0 ||
4083                         ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4084                 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4085
4086         if (ret) {
4087                 fault_count++;
4088
4089                 if (__ratelimit(&drbd_ratelimit_state))
4090                         dev_warn(DEV, "***Simulating %s failure\n",
4091                                 _drbd_fault_str(type));
4092         }
4093
4094         return ret;
4095 }
4096 #endif
4097
4098 const char *drbd_buildtag(void)
4099 {
4100         /* DRBD built from external sources has here a reference to the
4101            git hash of the source code. */
4102
4103         static char buildtag[38] = "\0uilt-in";
4104
4105         if (buildtag[0] == 0) {
4106 #ifdef CONFIG_MODULES
4107                 if (THIS_MODULE != NULL)
4108                         sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4109                 else
4110 #endif
4111                         buildtag[0] = 'b';
4112         }
4113
4114         return buildtag;
4115 }
4116
4117 module_init(drbd_init)
4118 module_exit(drbd_cleanup)
4119
4120 EXPORT_SYMBOL(drbd_conn_str);
4121 EXPORT_SYMBOL(drbd_role_str);
4122 EXPORT_SYMBOL(drbd_disk_str);
4123 EXPORT_SYMBOL(drbd_set_st_err_str);