4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
53 #include <linux/drbd_limits.h>
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
59 struct after_state_chg_work
{
63 enum chg_state_flags flags
;
64 struct completion
*done
;
67 static DEFINE_MUTEX(drbd_main_mutex
);
68 int drbdd_init(struct drbd_thread
*);
69 int drbd_worker(struct drbd_thread
*);
70 int drbd_asender(struct drbd_thread
*);
73 static int drbd_open(struct block_device
*bdev
, fmode_t mode
);
74 static int drbd_release(struct gendisk
*gd
, fmode_t mode
);
75 static int w_after_state_ch(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
);
76 static void after_state_ch(struct drbd_conf
*mdev
, union drbd_state os
,
77 union drbd_state ns
, enum chg_state_flags flags
);
78 static int w_md_sync(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
);
79 static void md_sync_timer_fn(unsigned long data
);
80 static int w_bitmap_io(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
);
81 static int w_go_diskless(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
);
83 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>");
85 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION
);
86 MODULE_VERSION(REL_VERSION
);
87 MODULE_LICENSE("GPL");
88 MODULE_PARM_DESC(minor_count
, "Maximum number of drbd devices ("
89 __stringify(DRBD_MINOR_COUNT_MIN
) "-" __stringify(DRBD_MINOR_COUNT_MAX
) ")");
90 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR
);
92 #include <linux/moduleparam.h>
93 /* allow_open_on_secondary */
94 MODULE_PARM_DESC(allow_oos
, "DONT USE!");
95 /* thanks to these macros, if compiled into the kernel (not-module),
96 * this becomes the boot parameter drbd.minor_count */
97 module_param(minor_count
, uint
, 0444);
98 module_param(disable_sendpage
, bool, 0644);
99 module_param(allow_oos
, bool, 0);
100 module_param(cn_idx
, uint
, 0444);
101 module_param(proc_details
, int, 0644);
103 #ifdef CONFIG_DRBD_FAULT_INJECTION
106 static int fault_count
;
108 /* bitmap of enabled faults */
109 module_param(enable_faults
, int, 0664);
110 /* fault rate % value - applies to all enabled faults */
111 module_param(fault_rate
, int, 0664);
112 /* count of faults inserted */
113 module_param(fault_count
, int, 0664);
114 /* bitmap of devices to insert faults on */
115 module_param(fault_devs
, int, 0644);
118 /* module parameter, defined */
119 unsigned int minor_count
= DRBD_MINOR_COUNT_DEF
;
120 int disable_sendpage
;
122 unsigned int cn_idx
= CN_IDX_DRBD
;
123 int proc_details
; /* Detail level in proc drbd*/
125 /* Module parameter for setting the user mode helper program
126 * to run. Default is /sbin/drbdadm */
127 char usermode_helper
[80] = "/sbin/drbdadm";
129 module_param_string(usermode_helper
, usermode_helper
, sizeof(usermode_helper
), 0644);
131 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
132 * as member "struct gendisk *vdisk;"
134 struct drbd_conf
**minor_table
;
136 struct kmem_cache
*drbd_request_cache
;
137 struct kmem_cache
*drbd_ee_cache
; /* epoch entries */
138 struct kmem_cache
*drbd_bm_ext_cache
; /* bitmap extents */
139 struct kmem_cache
*drbd_al_ext_cache
; /* activity log extents */
140 mempool_t
*drbd_request_mempool
;
141 mempool_t
*drbd_ee_mempool
;
143 /* I do not use a standard mempool, because:
144 1) I want to hand out the pre-allocated objects first.
145 2) I want to be able to interrupt sleeping allocation with a signal.
146 Note: This is a single linked list, the next pointer is the private
147 member of struct page.
149 struct page
*drbd_pp_pool
;
150 spinlock_t drbd_pp_lock
;
152 wait_queue_head_t drbd_pp_wait
;
154 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state
, 5 * HZ
, 5);
156 static const struct block_device_operations drbd_ops
= {
157 .owner
= THIS_MODULE
,
159 .release
= drbd_release
,
162 #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
165 /* When checking with sparse, and this is an inline function, sparse will
166 give tons of false positives. When this is a real functions sparse works.
168 int _get_ldev_if_state(struct drbd_conf
*mdev
, enum drbd_disk_state mins
)
172 atomic_inc(&mdev
->local_cnt
);
173 io_allowed
= (mdev
->state
.disk
>= mins
);
175 if (atomic_dec_and_test(&mdev
->local_cnt
))
176 wake_up(&mdev
->misc_wait
);
184 * DOC: The transfer log
186 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188 * of the list. There is always at least one &struct drbd_tl_epoch object.
190 * Each &struct drbd_tl_epoch has a circular double linked list of requests
193 static int tl_init(struct drbd_conf
*mdev
)
195 struct drbd_tl_epoch
*b
;
197 /* during device minor initialization, we may well use GFP_KERNEL */
198 b
= kmalloc(sizeof(struct drbd_tl_epoch
), GFP_KERNEL
);
201 INIT_LIST_HEAD(&b
->requests
);
202 INIT_LIST_HEAD(&b
->w
.list
);
206 b
->w
.cb
= NULL
; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
208 mdev
->oldest_tle
= b
;
209 mdev
->newest_tle
= b
;
210 INIT_LIST_HEAD(&mdev
->out_of_sequence_requests
);
212 mdev
->tl_hash
= NULL
;
218 static void tl_cleanup(struct drbd_conf
*mdev
)
220 D_ASSERT(mdev
->oldest_tle
== mdev
->newest_tle
);
221 D_ASSERT(list_empty(&mdev
->out_of_sequence_requests
));
222 kfree(mdev
->oldest_tle
);
223 mdev
->oldest_tle
= NULL
;
224 kfree(mdev
->unused_spare_tle
);
225 mdev
->unused_spare_tle
= NULL
;
226 kfree(mdev
->tl_hash
);
227 mdev
->tl_hash
= NULL
;
232 * _tl_add_barrier() - Adds a barrier to the transfer log
233 * @mdev: DRBD device.
234 * @new: Barrier to be added before the current head of the TL.
236 * The caller must hold the req_lock.
238 void _tl_add_barrier(struct drbd_conf
*mdev
, struct drbd_tl_epoch
*new)
240 struct drbd_tl_epoch
*newest_before
;
242 INIT_LIST_HEAD(&new->requests
);
243 INIT_LIST_HEAD(&new->w
.list
);
244 new->w
.cb
= NULL
; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
248 newest_before
= mdev
->newest_tle
;
249 /* never send a barrier number == 0, because that is special-cased
250 * when using TCQ for our write ordering code */
251 new->br_number
= (newest_before
->br_number
+1) ?: 1;
252 if (mdev
->newest_tle
!= new) {
253 mdev
->newest_tle
->next
= new;
254 mdev
->newest_tle
= new;
259 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
260 * @mdev: DRBD device.
261 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
262 * @set_size: Expected number of requests before that barrier.
264 * In case the passed barrier_nr or set_size does not match the oldest
265 * &struct drbd_tl_epoch objects this function will cause a termination
268 void tl_release(struct drbd_conf
*mdev
, unsigned int barrier_nr
,
269 unsigned int set_size
)
271 struct drbd_tl_epoch
*b
, *nob
; /* next old barrier */
272 struct list_head
*le
, *tle
;
273 struct drbd_request
*r
;
275 spin_lock_irq(&mdev
->req_lock
);
277 b
= mdev
->oldest_tle
;
279 /* first some paranoia code */
281 dev_err(DEV
, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
285 if (b
->br_number
!= barrier_nr
) {
286 dev_err(DEV
, "BAD! BarrierAck #%u received, expected #%u!\n",
287 barrier_nr
, b
->br_number
);
290 if (b
->n_writes
!= set_size
) {
291 dev_err(DEV
, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
292 barrier_nr
, set_size
, b
->n_writes
);
296 /* Clean up list of requests processed during current epoch */
297 list_for_each_safe(le
, tle
, &b
->requests
) {
298 r
= list_entry(le
, struct drbd_request
, tl_requests
);
299 _req_mod(r
, barrier_acked
);
301 /* There could be requests on the list waiting for completion
302 of the write to the local disk. To avoid corruptions of
303 slab's data structures we have to remove the lists head.
305 Also there could have been a barrier ack out of sequence, overtaking
306 the write acks - which would be a bug and violating write ordering.
307 To not deadlock in case we lose connection while such requests are
308 still pending, we need some way to find them for the
309 _req_mode(connection_lost_while_pending).
311 These have been list_move'd to the out_of_sequence_requests list in
312 _req_mod(, barrier_acked) above.
314 list_del_init(&b
->requests
);
317 if (test_and_clear_bit(CREATE_BARRIER
, &mdev
->flags
)) {
318 _tl_add_barrier(mdev
, b
);
320 mdev
->oldest_tle
= nob
;
321 /* if nob == NULL b was the only barrier, and becomes the new
322 barrier. Therefore mdev->oldest_tle points already to b */
324 D_ASSERT(nob
!= NULL
);
325 mdev
->oldest_tle
= nob
;
329 spin_unlock_irq(&mdev
->req_lock
);
330 dec_ap_pending(mdev
);
335 spin_unlock_irq(&mdev
->req_lock
);
336 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
340 /* In C_AHEAD mode only out_of_sync packets are sent for requests. Detach
341 * those requests from the newsest barrier when changing to an other cstate.
343 * That headless list vanishes when the last request finished its write or
344 * send out_of_sync packet. */
345 static void tl_forget(struct drbd_conf
*mdev
)
347 struct drbd_tl_epoch
*b
;
349 if (test_bit(CREATE_BARRIER
, &mdev
->flags
))
352 b
= mdev
->newest_tle
;
353 list_del(&b
->requests
);
354 _tl_add_barrier(mdev
, b
);
358 * _tl_restart() - Walks the transfer log, and applies an action to all requests
359 * @mdev: DRBD device.
360 * @what: The action/event to perform with all request objects
362 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
363 * restart_frozen_disk_io.
365 static void _tl_restart(struct drbd_conf
*mdev
, enum drbd_req_event what
)
367 struct drbd_tl_epoch
*b
, *tmp
, **pn
;
368 struct list_head
*le
, *tle
, carry_reads
;
369 struct drbd_request
*req
;
370 int rv
, n_writes
, n_reads
;
372 b
= mdev
->oldest_tle
;
373 pn
= &mdev
->oldest_tle
;
377 INIT_LIST_HEAD(&carry_reads
);
378 list_for_each_safe(le
, tle
, &b
->requests
) {
379 req
= list_entry(le
, struct drbd_request
, tl_requests
);
380 rv
= _req_mod(req
, what
);
382 n_writes
+= (rv
& MR_WRITE
) >> MR_WRITE_SHIFT
;
383 n_reads
+= (rv
& MR_READ
) >> MR_READ_SHIFT
;
388 if (what
== resend
) {
389 b
->n_writes
= n_writes
;
390 if (b
->w
.cb
== NULL
) {
391 b
->w
.cb
= w_send_barrier
;
392 inc_ap_pending(mdev
);
393 set_bit(CREATE_BARRIER
, &mdev
->flags
);
396 drbd_queue_work(&mdev
->data
.work
, &b
->w
);
401 list_add(&carry_reads
, &b
->requests
);
402 /* there could still be requests on that ring list,
403 * in case local io is still pending */
404 list_del(&b
->requests
);
406 /* dec_ap_pending corresponding to queue_barrier.
407 * the newest barrier may not have been queued yet,
408 * in which case w.cb is still NULL. */
410 dec_ap_pending(mdev
);
412 if (b
== mdev
->newest_tle
) {
413 /* recycle, but reinit! */
414 D_ASSERT(tmp
== NULL
);
415 INIT_LIST_HEAD(&b
->requests
);
416 list_splice(&carry_reads
, &b
->requests
);
417 INIT_LIST_HEAD(&b
->w
.list
);
419 b
->br_number
= net_random();
429 list_splice(&carry_reads
, &b
->requests
);
435 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
436 * @mdev: DRBD device.
438 * This is called after the connection to the peer was lost. The storage covered
439 * by the requests on the transfer gets marked as our of sync. Called from the
440 * receiver thread and the worker thread.
442 void tl_clear(struct drbd_conf
*mdev
)
444 struct list_head
*le
, *tle
;
445 struct drbd_request
*r
;
447 spin_lock_irq(&mdev
->req_lock
);
449 _tl_restart(mdev
, connection_lost_while_pending
);
451 /* we expect this list to be empty. */
452 D_ASSERT(list_empty(&mdev
->out_of_sequence_requests
));
454 /* but just in case, clean it up anyways! */
455 list_for_each_safe(le
, tle
, &mdev
->out_of_sequence_requests
) {
456 r
= list_entry(le
, struct drbd_request
, tl_requests
);
457 /* It would be nice to complete outside of spinlock.
458 * But this is easier for now. */
459 _req_mod(r
, connection_lost_while_pending
);
462 /* ensure bit indicating barrier is required is clear */
463 clear_bit(CREATE_BARRIER
, &mdev
->flags
);
465 memset(mdev
->app_reads_hash
, 0, APP_R_HSIZE
*sizeof(void *));
467 spin_unlock_irq(&mdev
->req_lock
);
470 void tl_restart(struct drbd_conf
*mdev
, enum drbd_req_event what
)
472 spin_lock_irq(&mdev
->req_lock
);
473 _tl_restart(mdev
, what
);
474 spin_unlock_irq(&mdev
->req_lock
);
478 * cl_wide_st_chg() - true if the state change is a cluster wide one
479 * @mdev: DRBD device.
480 * @os: old (current) state.
481 * @ns: new (wanted) state.
483 static int cl_wide_st_chg(struct drbd_conf
*mdev
,
484 union drbd_state os
, union drbd_state ns
)
486 return (os
.conn
>= C_CONNECTED
&& ns
.conn
>= C_CONNECTED
&&
487 ((os
.role
!= R_PRIMARY
&& ns
.role
== R_PRIMARY
) ||
488 (os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
489 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
) ||
490 (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
))) ||
491 (os
.conn
>= C_CONNECTED
&& ns
.conn
== C_DISCONNECTING
) ||
492 (os
.conn
== C_CONNECTED
&& ns
.conn
== C_VERIFY_S
);
496 drbd_change_state(struct drbd_conf
*mdev
, enum chg_state_flags f
,
497 union drbd_state mask
, union drbd_state val
)
500 union drbd_state os
, ns
;
501 enum drbd_state_rv rv
;
503 spin_lock_irqsave(&mdev
->req_lock
, flags
);
505 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
506 rv
= _drbd_set_state(mdev
, ns
, f
, NULL
);
508 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
514 * drbd_force_state() - Impose a change which happens outside our control on our state
515 * @mdev: DRBD device.
516 * @mask: mask of state bits to change.
517 * @val: value of new state bits.
519 void drbd_force_state(struct drbd_conf
*mdev
,
520 union drbd_state mask
, union drbd_state val
)
522 drbd_change_state(mdev
, CS_HARD
, mask
, val
);
525 static enum drbd_state_rv
is_valid_state(struct drbd_conf
*, union drbd_state
);
526 static enum drbd_state_rv
is_valid_state_transition(struct drbd_conf
*,
529 static union drbd_state
sanitize_state(struct drbd_conf
*mdev
, union drbd_state os
,
530 union drbd_state ns
, const char **warn_sync_abort
);
531 int drbd_send_state_req(struct drbd_conf
*,
532 union drbd_state
, union drbd_state
);
534 static enum drbd_state_rv
535 _req_st_cond(struct drbd_conf
*mdev
, union drbd_state mask
,
536 union drbd_state val
)
538 union drbd_state os
, ns
;
540 enum drbd_state_rv rv
;
542 if (test_and_clear_bit(CL_ST_CHG_SUCCESS
, &mdev
->flags
))
543 return SS_CW_SUCCESS
;
545 if (test_and_clear_bit(CL_ST_CHG_FAIL
, &mdev
->flags
))
546 return SS_CW_FAILED_BY_PEER
;
549 spin_lock_irqsave(&mdev
->req_lock
, flags
);
551 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
552 ns
= sanitize_state(mdev
, os
, ns
, NULL
);
554 if (!cl_wide_st_chg(mdev
, os
, ns
))
557 rv
= is_valid_state(mdev
, ns
);
558 if (rv
== SS_SUCCESS
) {
559 rv
= is_valid_state_transition(mdev
, ns
, os
);
560 if (rv
== SS_SUCCESS
)
561 rv
= SS_UNKNOWN_ERROR
; /* cont waiting, otherwise fail. */
564 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
570 * drbd_req_state() - Perform an eventually cluster wide state change
571 * @mdev: DRBD device.
572 * @mask: mask of state bits to change.
573 * @val: value of new state bits.
576 * Should not be called directly, use drbd_request_state() or
577 * _drbd_request_state().
579 static enum drbd_state_rv
580 drbd_req_state(struct drbd_conf
*mdev
, union drbd_state mask
,
581 union drbd_state val
, enum chg_state_flags f
)
583 struct completion done
;
585 union drbd_state os
, ns
;
586 enum drbd_state_rv rv
;
588 init_completion(&done
);
590 if (f
& CS_SERIALIZE
)
591 mutex_lock(&mdev
->state_mutex
);
593 spin_lock_irqsave(&mdev
->req_lock
, flags
);
595 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
596 ns
= sanitize_state(mdev
, os
, ns
, NULL
);
598 if (cl_wide_st_chg(mdev
, os
, ns
)) {
599 rv
= is_valid_state(mdev
, ns
);
600 if (rv
== SS_SUCCESS
)
601 rv
= is_valid_state_transition(mdev
, ns
, os
);
602 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
604 if (rv
< SS_SUCCESS
) {
606 print_st_err(mdev
, os
, ns
, rv
);
610 drbd_state_lock(mdev
);
611 if (!drbd_send_state_req(mdev
, mask
, val
)) {
612 drbd_state_unlock(mdev
);
613 rv
= SS_CW_FAILED_BY_PEER
;
615 print_st_err(mdev
, os
, ns
, rv
);
619 wait_event(mdev
->state_wait
,
620 (rv
= _req_st_cond(mdev
, mask
, val
)));
622 if (rv
< SS_SUCCESS
) {
623 drbd_state_unlock(mdev
);
625 print_st_err(mdev
, os
, ns
, rv
);
628 spin_lock_irqsave(&mdev
->req_lock
, flags
);
630 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
631 rv
= _drbd_set_state(mdev
, ns
, f
, &done
);
632 drbd_state_unlock(mdev
);
634 rv
= _drbd_set_state(mdev
, ns
, f
, &done
);
637 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
639 if (f
& CS_WAIT_COMPLETE
&& rv
== SS_SUCCESS
) {
640 D_ASSERT(current
!= mdev
->worker
.task
);
641 wait_for_completion(&done
);
645 if (f
& CS_SERIALIZE
)
646 mutex_unlock(&mdev
->state_mutex
);
652 * _drbd_request_state() - Request a state change (with flags)
653 * @mdev: DRBD device.
654 * @mask: mask of state bits to change.
655 * @val: value of new state bits.
658 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
659 * flag, or when logging of failed state change requests is not desired.
662 _drbd_request_state(struct drbd_conf
*mdev
, union drbd_state mask
,
663 union drbd_state val
, enum chg_state_flags f
)
665 enum drbd_state_rv rv
;
667 wait_event(mdev
->state_wait
,
668 (rv
= drbd_req_state(mdev
, mask
, val
, f
)) != SS_IN_TRANSIENT_STATE
);
673 static void print_st(struct drbd_conf
*mdev
, char *name
, union drbd_state ns
)
675 dev_err(DEV
, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
677 drbd_conn_str(ns
.conn
),
678 drbd_role_str(ns
.role
),
679 drbd_role_str(ns
.peer
),
680 drbd_disk_str(ns
.disk
),
681 drbd_disk_str(ns
.pdsk
),
682 is_susp(ns
) ? 's' : 'r',
683 ns
.aftr_isp
? 'a' : '-',
684 ns
.peer_isp
? 'p' : '-',
685 ns
.user_isp
? 'u' : '-'
689 void print_st_err(struct drbd_conf
*mdev
, union drbd_state os
,
690 union drbd_state ns
, enum drbd_state_rv err
)
692 if (err
== SS_IN_TRANSIENT_STATE
)
694 dev_err(DEV
, "State change failed: %s\n", drbd_set_st_err_str(err
));
695 print_st(mdev
, " state", os
);
696 print_st(mdev
, "wanted", ns
);
701 * is_valid_state() - Returns an SS_ error code if ns is not valid
702 * @mdev: DRBD device.
703 * @ns: State to consider.
705 static enum drbd_state_rv
706 is_valid_state(struct drbd_conf
*mdev
, union drbd_state ns
)
708 /* See drbd_state_sw_errors in drbd_strings.c */
710 enum drbd_fencing_p fp
;
711 enum drbd_state_rv rv
= SS_SUCCESS
;
714 if (get_ldev(mdev
)) {
715 fp
= mdev
->ldev
->dc
.fencing
;
719 if (get_net_conf(mdev
)) {
720 if (!mdev
->net_conf
->two_primaries
&&
721 ns
.role
== R_PRIMARY
&& ns
.peer
== R_PRIMARY
)
722 rv
= SS_TWO_PRIMARIES
;
727 /* already found a reason to abort */;
728 else if (ns
.role
== R_SECONDARY
&& mdev
->open_cnt
)
729 rv
= SS_DEVICE_IN_USE
;
731 else if (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
)
732 rv
= SS_NO_UP_TO_DATE_DISK
;
734 else if (fp
>= FP_RESOURCE
&&
735 ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
>= D_UNKNOWN
)
738 else if (ns
.role
== R_PRIMARY
&& ns
.disk
<= D_INCONSISTENT
&& ns
.pdsk
<= D_INCONSISTENT
)
739 rv
= SS_NO_UP_TO_DATE_DISK
;
741 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_INCONSISTENT
)
742 rv
= SS_NO_LOCAL_DISK
;
744 else if (ns
.conn
> C_CONNECTED
&& ns
.pdsk
< D_INCONSISTENT
)
745 rv
= SS_NO_REMOTE_DISK
;
747 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
)
748 rv
= SS_NO_UP_TO_DATE_DISK
;
750 else if ((ns
.conn
== C_CONNECTED
||
751 ns
.conn
== C_WF_BITMAP_S
||
752 ns
.conn
== C_SYNC_SOURCE
||
753 ns
.conn
== C_PAUSED_SYNC_S
) &&
754 ns
.disk
== D_OUTDATED
)
755 rv
= SS_CONNECTED_OUTDATES
;
757 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
758 (mdev
->sync_conf
.verify_alg
[0] == 0))
759 rv
= SS_NO_VERIFY_ALG
;
761 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
762 mdev
->agreed_pro_version
< 88)
763 rv
= SS_NOT_SUPPORTED
;
769 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
770 * @mdev: DRBD device.
774 static enum drbd_state_rv
775 is_valid_state_transition(struct drbd_conf
*mdev
, union drbd_state ns
,
778 enum drbd_state_rv rv
= SS_SUCCESS
;
780 if ((ns
.conn
== C_STARTING_SYNC_T
|| ns
.conn
== C_STARTING_SYNC_S
) &&
781 os
.conn
> C_CONNECTED
)
782 rv
= SS_RESYNC_RUNNING
;
784 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_STANDALONE
)
785 rv
= SS_ALREADY_STANDALONE
;
787 if (ns
.disk
> D_ATTACHING
&& os
.disk
== D_DISKLESS
)
790 if (ns
.conn
== C_WF_CONNECTION
&& os
.conn
< C_UNCONNECTED
)
791 rv
= SS_NO_NET_CONFIG
;
793 if (ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
&& os
.disk
!= D_ATTACHING
)
794 rv
= SS_LOWER_THAN_OUTDATED
;
796 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_UNCONNECTED
)
797 rv
= SS_IN_TRANSIENT_STATE
;
799 if (ns
.conn
== os
.conn
&& ns
.conn
== C_WF_REPORT_PARAMS
)
800 rv
= SS_IN_TRANSIENT_STATE
;
802 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) && os
.conn
< C_CONNECTED
)
803 rv
= SS_NEED_CONNECTION
;
805 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
806 ns
.conn
!= os
.conn
&& os
.conn
> C_CONNECTED
)
807 rv
= SS_RESYNC_RUNNING
;
809 if ((ns
.conn
== C_STARTING_SYNC_S
|| ns
.conn
== C_STARTING_SYNC_T
) &&
810 os
.conn
< C_CONNECTED
)
811 rv
= SS_NEED_CONNECTION
;
813 if ((ns
.conn
== C_SYNC_TARGET
|| ns
.conn
== C_SYNC_SOURCE
)
814 && os
.conn
< C_WF_REPORT_PARAMS
)
815 rv
= SS_NEED_CONNECTION
; /* No NetworkFailure -> SyncTarget etc... */
821 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
822 * @mdev: DRBD device.
827 * When we loose connection, we have to set the state of the peers disk (pdsk)
828 * to D_UNKNOWN. This rule and many more along those lines are in this function.
830 static union drbd_state
sanitize_state(struct drbd_conf
*mdev
, union drbd_state os
,
831 union drbd_state ns
, const char **warn_sync_abort
)
833 enum drbd_fencing_p fp
;
834 enum drbd_disk_state disk_min
, disk_max
, pdsk_min
, pdsk_max
;
837 if (get_ldev(mdev
)) {
838 fp
= mdev
->ldev
->dc
.fencing
;
842 /* Disallow Network errors to configure a device's network part */
843 if ((ns
.conn
>= C_TIMEOUT
&& ns
.conn
<= C_TEAR_DOWN
) &&
844 os
.conn
<= C_DISCONNECTING
)
847 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
848 * If you try to go into some Sync* state, that shall fail (elsewhere). */
849 if (os
.conn
>= C_TIMEOUT
&& os
.conn
<= C_TEAR_DOWN
&&
850 ns
.conn
!= C_UNCONNECTED
&& ns
.conn
!= C_DISCONNECTING
&& ns
.conn
<= C_TEAR_DOWN
)
853 /* we cannot fail (again) if we already detached */
854 if (ns
.disk
== D_FAILED
&& os
.disk
== D_DISKLESS
)
855 ns
.disk
= D_DISKLESS
;
857 /* if we are only D_ATTACHING yet,
858 * we can (and should) go directly to D_DISKLESS. */
859 if (ns
.disk
== D_FAILED
&& os
.disk
== D_ATTACHING
)
860 ns
.disk
= D_DISKLESS
;
862 /* After C_DISCONNECTING only C_STANDALONE may follow */
863 if (os
.conn
== C_DISCONNECTING
&& ns
.conn
!= C_STANDALONE
)
866 if (ns
.conn
< C_CONNECTED
) {
869 if (ns
.pdsk
> D_UNKNOWN
|| ns
.pdsk
< D_INCONSISTENT
)
873 /* Clear the aftr_isp when becoming unconfigured */
874 if (ns
.conn
== C_STANDALONE
&& ns
.disk
== D_DISKLESS
&& ns
.role
== R_SECONDARY
)
877 /* Abort resync if a disk fails/detaches */
878 if (os
.conn
> C_CONNECTED
&& ns
.conn
> C_CONNECTED
&&
879 (ns
.disk
<= D_FAILED
|| ns
.pdsk
<= D_FAILED
)) {
882 os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
?
883 "Online-verify" : "Resync";
884 ns
.conn
= C_CONNECTED
;
887 /* Connection breaks down before we finished "Negotiating" */
888 if (ns
.conn
< C_CONNECTED
&& ns
.disk
== D_NEGOTIATING
&&
889 get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
890 if (mdev
->ed_uuid
== mdev
->ldev
->md
.uuid
[UI_CURRENT
]) {
891 ns
.disk
= mdev
->new_state_tmp
.disk
;
892 ns
.pdsk
= mdev
->new_state_tmp
.pdsk
;
894 dev_alert(DEV
, "Connection lost while negotiating, no data!\n");
895 ns
.disk
= D_DISKLESS
;
901 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
902 if (ns
.conn
>= C_CONNECTED
&& ns
.conn
< C_AHEAD
) {
903 if (ns
.disk
== D_CONSISTENT
|| ns
.disk
== D_OUTDATED
)
904 ns
.disk
= D_UP_TO_DATE
;
905 if (ns
.pdsk
== D_CONSISTENT
|| ns
.pdsk
== D_OUTDATED
)
906 ns
.pdsk
= D_UP_TO_DATE
;
909 /* Implications of the connection stat on the disk states */
910 disk_min
= D_DISKLESS
;
911 disk_max
= D_UP_TO_DATE
;
912 pdsk_min
= D_INCONSISTENT
;
913 pdsk_max
= D_UNKNOWN
;
914 switch ((enum drbd_conns
)ns
.conn
) {
916 case C_PAUSED_SYNC_T
:
917 case C_STARTING_SYNC_T
:
920 disk_min
= D_INCONSISTENT
;
921 disk_max
= D_OUTDATED
;
922 pdsk_min
= D_UP_TO_DATE
;
923 pdsk_max
= D_UP_TO_DATE
;
927 disk_min
= D_UP_TO_DATE
;
928 disk_max
= D_UP_TO_DATE
;
929 pdsk_min
= D_UP_TO_DATE
;
930 pdsk_max
= D_UP_TO_DATE
;
933 disk_min
= D_DISKLESS
;
934 disk_max
= D_UP_TO_DATE
;
935 pdsk_min
= D_DISKLESS
;
936 pdsk_max
= D_UP_TO_DATE
;
939 case C_PAUSED_SYNC_S
:
940 case C_STARTING_SYNC_S
:
942 disk_min
= D_UP_TO_DATE
;
943 disk_max
= D_UP_TO_DATE
;
944 pdsk_min
= D_INCONSISTENT
;
945 pdsk_max
= D_CONSISTENT
; /* D_OUTDATED would be nice. But explicit outdate necessary*/
948 disk_min
= D_INCONSISTENT
;
949 disk_max
= D_INCONSISTENT
;
950 pdsk_min
= D_UP_TO_DATE
;
951 pdsk_max
= D_UP_TO_DATE
;
954 disk_min
= D_UP_TO_DATE
;
955 disk_max
= D_UP_TO_DATE
;
956 pdsk_min
= D_INCONSISTENT
;
957 pdsk_max
= D_INCONSISTENT
;
960 case C_DISCONNECTING
:
964 case C_NETWORK_FAILURE
:
965 case C_PROTOCOL_ERROR
:
967 case C_WF_CONNECTION
:
968 case C_WF_REPORT_PARAMS
:
972 if (ns
.disk
> disk_max
)
975 if (ns
.disk
< disk_min
) {
976 dev_warn(DEV
, "Implicitly set disk from %s to %s\n",
977 drbd_disk_str(ns
.disk
), drbd_disk_str(disk_min
));
980 if (ns
.pdsk
> pdsk_max
)
983 if (ns
.pdsk
< pdsk_min
) {
984 dev_warn(DEV
, "Implicitly set pdsk from %s to %s\n",
985 drbd_disk_str(ns
.pdsk
), drbd_disk_str(pdsk_min
));
989 if (fp
== FP_STONITH
&&
990 (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
> D_OUTDATED
) &&
991 !(os
.role
== R_PRIMARY
&& os
.conn
< C_CONNECTED
&& os
.pdsk
> D_OUTDATED
))
992 ns
.susp_fen
= 1; /* Suspend IO while fence-peer handler runs (peer lost) */
994 if (mdev
->sync_conf
.on_no_data
== OND_SUSPEND_IO
&&
995 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
) &&
996 !(os
.role
== R_PRIMARY
&& os
.disk
< D_UP_TO_DATE
&& os
.pdsk
< D_UP_TO_DATE
))
997 ns
.susp_nod
= 1; /* Suspend IO while no data available (no accessible data available) */
999 if (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
) {
1000 if (ns
.conn
== C_SYNC_SOURCE
)
1001 ns
.conn
= C_PAUSED_SYNC_S
;
1002 if (ns
.conn
== C_SYNC_TARGET
)
1003 ns
.conn
= C_PAUSED_SYNC_T
;
1005 if (ns
.conn
== C_PAUSED_SYNC_S
)
1006 ns
.conn
= C_SYNC_SOURCE
;
1007 if (ns
.conn
== C_PAUSED_SYNC_T
)
1008 ns
.conn
= C_SYNC_TARGET
;
1014 /* helper for __drbd_set_state */
1015 static void set_ov_position(struct drbd_conf
*mdev
, enum drbd_conns cs
)
1017 if (mdev
->agreed_pro_version
< 90)
1018 mdev
->ov_start_sector
= 0;
1019 mdev
->rs_total
= drbd_bm_bits(mdev
);
1020 mdev
->ov_position
= 0;
1021 if (cs
== C_VERIFY_T
) {
1022 /* starting online verify from an arbitrary position
1023 * does not fit well into the existing protocol.
1024 * on C_VERIFY_T, we initialize ov_left and friends
1025 * implicitly in receive_DataRequest once the
1026 * first P_OV_REQUEST is received */
1027 mdev
->ov_start_sector
= ~(sector_t
)0;
1029 unsigned long bit
= BM_SECT_TO_BIT(mdev
->ov_start_sector
);
1030 if (bit
>= mdev
->rs_total
) {
1031 mdev
->ov_start_sector
=
1032 BM_BIT_TO_SECT(mdev
->rs_total
- 1);
1035 mdev
->rs_total
-= bit
;
1036 mdev
->ov_position
= mdev
->ov_start_sector
;
1038 mdev
->ov_left
= mdev
->rs_total
;
1041 static void drbd_resume_al(struct drbd_conf
*mdev
)
1043 if (test_and_clear_bit(AL_SUSPENDED
, &mdev
->flags
))
1044 dev_info(DEV
, "Resumed AL updates\n");
1048 * __drbd_set_state() - Set a new DRBD state
1049 * @mdev: DRBD device.
1052 * @done: Optional completion, that will get completed after the after_state_ch() finished
1054 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1057 __drbd_set_state(struct drbd_conf
*mdev
, union drbd_state ns
,
1058 enum chg_state_flags flags
, struct completion
*done
)
1060 union drbd_state os
;
1061 enum drbd_state_rv rv
= SS_SUCCESS
;
1062 const char *warn_sync_abort
= NULL
;
1063 struct after_state_chg_work
*ascw
;
1067 ns
= sanitize_state(mdev
, os
, ns
, &warn_sync_abort
);
1070 return SS_NOTHING_TO_DO
;
1072 if (!(flags
& CS_HARD
)) {
1073 /* pre-state-change checks ; only look at ns */
1074 /* See drbd_state_sw_errors in drbd_strings.c */
1076 rv
= is_valid_state(mdev
, ns
);
1077 if (rv
< SS_SUCCESS
) {
1078 /* If the old state was illegal as well, then let
1081 if (is_valid_state(mdev
, os
) == rv
)
1082 rv
= is_valid_state_transition(mdev
, ns
, os
);
1084 rv
= is_valid_state_transition(mdev
, ns
, os
);
1087 if (rv
< SS_SUCCESS
) {
1088 if (flags
& CS_VERBOSE
)
1089 print_st_err(mdev
, os
, ns
, rv
);
1093 if (warn_sync_abort
)
1094 dev_warn(DEV
, "%s aborted.\n", warn_sync_abort
);
1100 if (ns
.role
!= os
.role
)
1101 pbp
+= sprintf(pbp
, "role( %s -> %s ) ",
1102 drbd_role_str(os
.role
),
1103 drbd_role_str(ns
.role
));
1104 if (ns
.peer
!= os
.peer
)
1105 pbp
+= sprintf(pbp
, "peer( %s -> %s ) ",
1106 drbd_role_str(os
.peer
),
1107 drbd_role_str(ns
.peer
));
1108 if (ns
.conn
!= os
.conn
)
1109 pbp
+= sprintf(pbp
, "conn( %s -> %s ) ",
1110 drbd_conn_str(os
.conn
),
1111 drbd_conn_str(ns
.conn
));
1112 if (ns
.disk
!= os
.disk
)
1113 pbp
+= sprintf(pbp
, "disk( %s -> %s ) ",
1114 drbd_disk_str(os
.disk
),
1115 drbd_disk_str(ns
.disk
));
1116 if (ns
.pdsk
!= os
.pdsk
)
1117 pbp
+= sprintf(pbp
, "pdsk( %s -> %s ) ",
1118 drbd_disk_str(os
.pdsk
),
1119 drbd_disk_str(ns
.pdsk
));
1120 if (is_susp(ns
) != is_susp(os
))
1121 pbp
+= sprintf(pbp
, "susp( %d -> %d ) ",
1124 if (ns
.aftr_isp
!= os
.aftr_isp
)
1125 pbp
+= sprintf(pbp
, "aftr_isp( %d -> %d ) ",
1128 if (ns
.peer_isp
!= os
.peer_isp
)
1129 pbp
+= sprintf(pbp
, "peer_isp( %d -> %d ) ",
1132 if (ns
.user_isp
!= os
.user_isp
)
1133 pbp
+= sprintf(pbp
, "user_isp( %d -> %d ) ",
1136 dev_info(DEV
, "%s\n", pb
);
1139 /* solve the race between becoming unconfigured,
1140 * worker doing the cleanup, and
1141 * admin reconfiguring us:
1142 * on (re)configure, first set CONFIG_PENDING,
1143 * then wait for a potentially exiting worker,
1144 * start the worker, and schedule one no_op.
1145 * then proceed with configuration.
1147 if (ns
.disk
== D_DISKLESS
&&
1148 ns
.conn
== C_STANDALONE
&&
1149 ns
.role
== R_SECONDARY
&&
1150 !test_and_set_bit(CONFIG_PENDING
, &mdev
->flags
))
1151 set_bit(DEVICE_DYING
, &mdev
->flags
);
1153 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1154 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1155 * drbd_ldev_destroy() won't happen before our corresponding
1156 * after_state_ch works run, where we put_ldev again. */
1157 if ((os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
) ||
1158 (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
))
1159 atomic_inc(&mdev
->local_cnt
);
1162 wake_up(&mdev
->misc_wait
);
1163 wake_up(&mdev
->state_wait
);
1165 /* aborted verify run. log the last position */
1166 if ((os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
) &&
1167 ns
.conn
< C_CONNECTED
) {
1168 mdev
->ov_start_sector
=
1169 BM_BIT_TO_SECT(drbd_bm_bits(mdev
) - mdev
->ov_left
);
1170 dev_info(DEV
, "Online Verify reached sector %llu\n",
1171 (unsigned long long)mdev
->ov_start_sector
);
1174 if ((os
.conn
== C_PAUSED_SYNC_T
|| os
.conn
== C_PAUSED_SYNC_S
) &&
1175 (ns
.conn
== C_SYNC_TARGET
|| ns
.conn
== C_SYNC_SOURCE
)) {
1176 dev_info(DEV
, "Syncer continues.\n");
1177 mdev
->rs_paused
+= (long)jiffies
1178 -(long)mdev
->rs_mark_time
[mdev
->rs_last_mark
];
1179 if (ns
.conn
== C_SYNC_TARGET
)
1180 mod_timer(&mdev
->resync_timer
, jiffies
);
1183 if ((os
.conn
== C_SYNC_TARGET
|| os
.conn
== C_SYNC_SOURCE
) &&
1184 (ns
.conn
== C_PAUSED_SYNC_T
|| ns
.conn
== C_PAUSED_SYNC_S
)) {
1185 dev_info(DEV
, "Resync suspended\n");
1186 mdev
->rs_mark_time
[mdev
->rs_last_mark
] = jiffies
;
1189 if (os
.conn
== C_CONNECTED
&&
1190 (ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
)) {
1191 unsigned long now
= jiffies
;
1194 set_ov_position(mdev
, ns
.conn
);
1195 mdev
->rs_start
= now
;
1196 mdev
->rs_last_events
= 0;
1197 mdev
->rs_last_sect_ev
= 0;
1198 mdev
->ov_last_oos_size
= 0;
1199 mdev
->ov_last_oos_start
= 0;
1201 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
1202 mdev
->rs_mark_left
[i
] = mdev
->ov_left
;
1203 mdev
->rs_mark_time
[i
] = now
;
1206 drbd_rs_controller_reset(mdev
);
1208 if (ns
.conn
== C_VERIFY_S
) {
1209 dev_info(DEV
, "Starting Online Verify from sector %llu\n",
1210 (unsigned long long)mdev
->ov_position
);
1211 mod_timer(&mdev
->resync_timer
, jiffies
);
1215 if (get_ldev(mdev
)) {
1216 u32 mdf
= mdev
->ldev
->md
.flags
& ~(MDF_CONSISTENT
|MDF_PRIMARY_IND
|
1217 MDF_CONNECTED_IND
|MDF_WAS_UP_TO_DATE
|
1218 MDF_PEER_OUT_DATED
|MDF_CRASHED_PRIMARY
);
1220 if (test_bit(CRASHED_PRIMARY
, &mdev
->flags
))
1221 mdf
|= MDF_CRASHED_PRIMARY
;
1222 if (mdev
->state
.role
== R_PRIMARY
||
1223 (mdev
->state
.pdsk
< D_INCONSISTENT
&& mdev
->state
.peer
== R_PRIMARY
))
1224 mdf
|= MDF_PRIMARY_IND
;
1225 if (mdev
->state
.conn
> C_WF_REPORT_PARAMS
)
1226 mdf
|= MDF_CONNECTED_IND
;
1227 if (mdev
->state
.disk
> D_INCONSISTENT
)
1228 mdf
|= MDF_CONSISTENT
;
1229 if (mdev
->state
.disk
> D_OUTDATED
)
1230 mdf
|= MDF_WAS_UP_TO_DATE
;
1231 if (mdev
->state
.pdsk
<= D_OUTDATED
&& mdev
->state
.pdsk
>= D_INCONSISTENT
)
1232 mdf
|= MDF_PEER_OUT_DATED
;
1233 if (mdf
!= mdev
->ldev
->md
.flags
) {
1234 mdev
->ldev
->md
.flags
= mdf
;
1235 drbd_md_mark_dirty(mdev
);
1237 if (os
.disk
< D_CONSISTENT
&& ns
.disk
>= D_CONSISTENT
)
1238 drbd_set_ed_uuid(mdev
, mdev
->ldev
->md
.uuid
[UI_CURRENT
]);
1242 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1243 if (os
.disk
== D_INCONSISTENT
&& os
.pdsk
== D_INCONSISTENT
&&
1244 os
.peer
== R_SECONDARY
&& ns
.peer
== R_PRIMARY
)
1245 set_bit(CONSIDER_RESYNC
, &mdev
->flags
);
1247 /* Receiver should clean up itself */
1248 if (os
.conn
!= C_DISCONNECTING
&& ns
.conn
== C_DISCONNECTING
)
1249 drbd_thread_stop_nowait(&mdev
->receiver
);
1251 /* Now the receiver finished cleaning up itself, it should die */
1252 if (os
.conn
!= C_STANDALONE
&& ns
.conn
== C_STANDALONE
)
1253 drbd_thread_stop_nowait(&mdev
->receiver
);
1255 /* Upon network failure, we need to restart the receiver. */
1256 if (os
.conn
> C_TEAR_DOWN
&&
1257 ns
.conn
<= C_TEAR_DOWN
&& ns
.conn
>= C_TIMEOUT
)
1258 drbd_thread_restart_nowait(&mdev
->receiver
);
1260 /* Resume AL writing if we get a connection */
1261 if (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
)
1262 drbd_resume_al(mdev
);
1264 if (os
.conn
== C_AHEAD
&& ns
.conn
!= C_AHEAD
)
1267 ascw
= kmalloc(sizeof(*ascw
), GFP_ATOMIC
);
1271 ascw
->flags
= flags
;
1272 ascw
->w
.cb
= w_after_state_ch
;
1274 drbd_queue_work(&mdev
->data
.work
, &ascw
->w
);
1276 dev_warn(DEV
, "Could not kmalloc an ascw\n");
1282 static int w_after_state_ch(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
1284 struct after_state_chg_work
*ascw
=
1285 container_of(w
, struct after_state_chg_work
, w
);
1286 after_state_ch(mdev
, ascw
->os
, ascw
->ns
, ascw
->flags
);
1287 if (ascw
->flags
& CS_WAIT_COMPLETE
) {
1288 D_ASSERT(ascw
->done
!= NULL
);
1289 complete(ascw
->done
);
1296 static void abw_start_sync(struct drbd_conf
*mdev
, int rv
)
1299 dev_err(DEV
, "Writing the bitmap failed not starting resync.\n");
1300 _drbd_request_state(mdev
, NS(conn
, C_CONNECTED
), CS_VERBOSE
);
1304 switch (mdev
->state
.conn
) {
1305 case C_STARTING_SYNC_T
:
1306 _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
1308 case C_STARTING_SYNC_S
:
1309 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
1314 int drbd_bitmap_io_from_worker(struct drbd_conf
*mdev
, int (*io_fn
)(struct drbd_conf
*), char *why
)
1318 D_ASSERT(current
== mdev
->worker
.task
);
1320 /* open coded non-blocking drbd_suspend_io(mdev); */
1321 set_bit(SUSPEND_IO
, &mdev
->flags
);
1322 if (!is_susp(mdev
->state
))
1323 D_ASSERT(atomic_read(&mdev
->ap_bio_cnt
) == 0);
1325 drbd_bm_lock(mdev
, why
);
1327 drbd_bm_unlock(mdev
);
1329 drbd_resume_io(mdev
);
1335 * after_state_ch() - Perform after state change actions that may sleep
1336 * @mdev: DRBD device.
1341 static void after_state_ch(struct drbd_conf
*mdev
, union drbd_state os
,
1342 union drbd_state ns
, enum chg_state_flags flags
)
1344 enum drbd_fencing_p fp
;
1345 enum drbd_req_event what
= nothing
;
1346 union drbd_state nsm
= (union drbd_state
){ .i
= -1 };
1348 if (os
.conn
!= C_CONNECTED
&& ns
.conn
== C_CONNECTED
) {
1349 clear_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1351 mdev
->p_uuid
[UI_FLAGS
] &= ~((u64
)2);
1355 if (get_ldev(mdev
)) {
1356 fp
= mdev
->ldev
->dc
.fencing
;
1360 /* Inform userspace about the change... */
1361 drbd_bcast_state(mdev
, ns
);
1363 if (!(os
.role
== R_PRIMARY
&& os
.disk
< D_UP_TO_DATE
&& os
.pdsk
< D_UP_TO_DATE
) &&
1364 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
))
1365 drbd_khelper(mdev
, "pri-on-incon-degr");
1367 /* Here we have the actions that are performed after a
1368 state change. This function might sleep */
1372 if (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
)
1375 if (os
.disk
== D_ATTACHING
&& ns
.disk
> D_ATTACHING
)
1376 what
= restart_frozen_disk_io
;
1378 if (what
!= nothing
)
1383 /* case1: The outdate peer handler is successful: */
1384 if (os
.pdsk
> D_OUTDATED
&& ns
.pdsk
<= D_OUTDATED
) {
1386 if (test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
1387 drbd_uuid_new_current(mdev
);
1388 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
1390 spin_lock_irq(&mdev
->req_lock
);
1391 _drbd_set_state(_NS(mdev
, susp_fen
, 0), CS_VERBOSE
, NULL
);
1392 spin_unlock_irq(&mdev
->req_lock
);
1394 /* case2: The connection was established again: */
1395 if (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
) {
1396 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
1402 if (what
!= nothing
) {
1403 spin_lock_irq(&mdev
->req_lock
);
1404 _tl_restart(mdev
, what
);
1405 nsm
.i
&= mdev
->state
.i
;
1406 _drbd_set_state(mdev
, nsm
, CS_VERBOSE
, NULL
);
1407 spin_unlock_irq(&mdev
->req_lock
);
1410 /* Became sync source. With protocol >= 96, we still need to send out
1411 * the sync uuid now. Need to do that before any drbd_send_state, or
1412 * the other side may go "paused sync" before receiving the sync uuids,
1413 * which is unexpected. */
1414 if ((os
.conn
!= C_SYNC_SOURCE
&& os
.conn
!= C_PAUSED_SYNC_S
) &&
1415 (ns
.conn
== C_SYNC_SOURCE
|| ns
.conn
== C_PAUSED_SYNC_S
) &&
1416 mdev
->agreed_pro_version
>= 96 && get_ldev(mdev
)) {
1417 drbd_gen_and_send_sync_uuid(mdev
);
1421 /* Do not change the order of the if above and the two below... */
1422 if (os
.pdsk
== D_DISKLESS
&& ns
.pdsk
> D_DISKLESS
) { /* attach on the peer */
1423 drbd_send_uuids(mdev
);
1424 drbd_send_state(mdev
);
1426 if (os
.conn
!= C_WF_BITMAP_S
&& ns
.conn
== C_WF_BITMAP_S
)
1427 drbd_queue_bitmap_io(mdev
, &drbd_send_bitmap
, NULL
, "send_bitmap (WFBitMapS)");
1429 /* Lost contact to peer's copy of the data */
1430 if ((os
.pdsk
>= D_INCONSISTENT
&&
1431 os
.pdsk
!= D_UNKNOWN
&&
1432 os
.pdsk
!= D_OUTDATED
)
1433 && (ns
.pdsk
< D_INCONSISTENT
||
1434 ns
.pdsk
== D_UNKNOWN
||
1435 ns
.pdsk
== D_OUTDATED
)) {
1436 if (get_ldev(mdev
)) {
1437 if ((ns
.role
== R_PRIMARY
|| ns
.peer
== R_PRIMARY
) &&
1438 mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && ns
.disk
>= D_UP_TO_DATE
) {
1439 if (is_susp(mdev
->state
)) {
1440 set_bit(NEW_CUR_UUID
, &mdev
->flags
);
1442 drbd_uuid_new_current(mdev
);
1443 drbd_send_uuids(mdev
);
1450 if (ns
.pdsk
< D_INCONSISTENT
&& get_ldev(mdev
)) {
1451 if (ns
.peer
== R_PRIMARY
&& mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0) {
1452 drbd_uuid_new_current(mdev
);
1453 drbd_send_uuids(mdev
);
1456 /* D_DISKLESS Peer becomes secondary */
1457 if (os
.peer
== R_PRIMARY
&& ns
.peer
== R_SECONDARY
)
1458 drbd_bitmap_io_from_worker(mdev
, &drbd_bm_write
, "demote diskless peer");
1462 /* Write out all changed bits on demote.
1463 * Though, no need to da that just yet
1464 * if there is a resync going on still */
1465 if (os
.role
== R_PRIMARY
&& ns
.role
== R_SECONDARY
&&
1466 mdev
->state
.conn
<= C_CONNECTED
&& get_ldev(mdev
)) {
1467 drbd_bitmap_io_from_worker(mdev
, &drbd_bm_write
, "demote");
1471 /* Last part of the attaching process ... */
1472 if (ns
.conn
>= C_CONNECTED
&&
1473 os
.disk
== D_ATTACHING
&& ns
.disk
== D_NEGOTIATING
) {
1474 drbd_send_sizes(mdev
, 0, 0); /* to start sync... */
1475 drbd_send_uuids(mdev
);
1476 drbd_send_state(mdev
);
1479 /* We want to pause/continue resync, tell peer. */
1480 if (ns
.conn
>= C_CONNECTED
&&
1481 ((os
.aftr_isp
!= ns
.aftr_isp
) ||
1482 (os
.user_isp
!= ns
.user_isp
)))
1483 drbd_send_state(mdev
);
1485 /* In case one of the isp bits got set, suspend other devices. */
1486 if ((!os
.aftr_isp
&& !os
.peer_isp
&& !os
.user_isp
) &&
1487 (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
))
1488 suspend_other_sg(mdev
);
1490 /* Make sure the peer gets informed about eventual state
1491 changes (ISP bits) while we were in WFReportParams. */
1492 if (os
.conn
== C_WF_REPORT_PARAMS
&& ns
.conn
>= C_CONNECTED
)
1493 drbd_send_state(mdev
);
1495 if (os
.conn
!= C_AHEAD
&& ns
.conn
== C_AHEAD
)
1496 drbd_send_state(mdev
);
1498 /* We are in the progress to start a full sync... */
1499 if ((os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
1500 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
))
1501 drbd_queue_bitmap_io(mdev
, &drbd_bmio_set_n_write
, &abw_start_sync
, "set_n_write from StartingSync");
1503 /* We are invalidating our self... */
1504 if (os
.conn
< C_CONNECTED
&& ns
.conn
< C_CONNECTED
&&
1505 os
.disk
> D_INCONSISTENT
&& ns
.disk
== D_INCONSISTENT
)
1506 drbd_queue_bitmap_io(mdev
, &drbd_bmio_set_n_write
, NULL
, "set_n_write from invalidate");
1508 /* first half of local IO error, failure to attach,
1509 * or administrative detach */
1510 if (os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
) {
1511 enum drbd_io_error_p eh
;
1513 /* corresponding get_ldev was in __drbd_set_state, to serialize
1514 * our cleanup here with the transition to D_DISKLESS,
1515 * so it is safe to dreference ldev here. */
1516 eh
= mdev
->ldev
->dc
.on_io_error
;
1517 was_io_error
= test_and_clear_bit(WAS_IO_ERROR
, &mdev
->flags
);
1519 /* current state still has to be D_FAILED,
1520 * there is only one way out: to D_DISKLESS,
1521 * and that may only happen after our put_ldev below. */
1522 if (mdev
->state
.disk
!= D_FAILED
)
1524 "ASSERT FAILED: disk is %s during detach\n",
1525 drbd_disk_str(mdev
->state
.disk
));
1527 if (drbd_send_state(mdev
))
1528 dev_warn(DEV
, "Notified peer that I am detaching my disk\n");
1530 dev_err(DEV
, "Sending state for detaching disk failed\n");
1532 drbd_rs_cancel_all(mdev
);
1534 /* In case we want to get something to stable storage still,
1535 * this may be the last chance.
1536 * Following put_ldev may transition to D_DISKLESS. */
1540 if (was_io_error
&& eh
== EP_CALL_HELPER
)
1541 drbd_khelper(mdev
, "local-io-error");
1544 /* second half of local IO error, failure to attach,
1545 * or administrative detach,
1546 * after local_cnt references have reached zero again */
1547 if (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
) {
1548 /* We must still be diskless,
1549 * re-attach has to be serialized with this! */
1550 if (mdev
->state
.disk
!= D_DISKLESS
)
1552 "ASSERT FAILED: disk is %s while going diskless\n",
1553 drbd_disk_str(mdev
->state
.disk
));
1556 mdev
->rs_failed
= 0;
1557 atomic_set(&mdev
->rs_pending_cnt
, 0);
1559 if (drbd_send_state(mdev
))
1560 dev_warn(DEV
, "Notified peer that I'm now diskless.\n");
1562 dev_err(DEV
, "Sending state for being diskless failed\n");
1563 /* corresponding get_ldev in __drbd_set_state
1564 * this may finaly trigger drbd_ldev_destroy. */
1568 /* Disks got bigger while they were detached */
1569 if (ns
.disk
> D_NEGOTIATING
&& ns
.pdsk
> D_NEGOTIATING
&&
1570 test_and_clear_bit(RESYNC_AFTER_NEG
, &mdev
->flags
)) {
1571 if (ns
.conn
== C_CONNECTED
)
1572 resync_after_online_grow(mdev
);
1575 /* A resync finished or aborted, wake paused devices... */
1576 if ((os
.conn
> C_CONNECTED
&& ns
.conn
<= C_CONNECTED
) ||
1577 (os
.peer_isp
&& !ns
.peer_isp
) ||
1578 (os
.user_isp
&& !ns
.user_isp
))
1579 resume_next_sg(mdev
);
1581 /* sync target done with resync. Explicitly notify peer, even though
1582 * it should (at least for non-empty resyncs) already know itself. */
1583 if (os
.disk
< D_UP_TO_DATE
&& os
.conn
>= C_SYNC_SOURCE
&& ns
.conn
== C_CONNECTED
)
1584 drbd_send_state(mdev
);
1586 if (os
.conn
> C_CONNECTED
&& ns
.conn
<= C_CONNECTED
)
1587 drbd_queue_bitmap_io(mdev
, &drbd_bm_write
, NULL
, "write from resync_finished");
1589 /* free tl_hash if we Got thawed and are C_STANDALONE */
1590 if (ns
.conn
== C_STANDALONE
&& !is_susp(ns
) && mdev
->tl_hash
)
1591 drbd_free_tl_hash(mdev
);
1593 /* Upon network connection, we need to start the receiver */
1594 if (os
.conn
== C_STANDALONE
&& ns
.conn
== C_UNCONNECTED
)
1595 drbd_thread_start(&mdev
->receiver
);
1597 /* Terminate worker thread if we are unconfigured - it will be
1598 restarted as needed... */
1599 if (ns
.disk
== D_DISKLESS
&&
1600 ns
.conn
== C_STANDALONE
&&
1601 ns
.role
== R_SECONDARY
) {
1602 if (os
.aftr_isp
!= ns
.aftr_isp
)
1603 resume_next_sg(mdev
);
1604 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1605 if (test_bit(DEVICE_DYING
, &mdev
->flags
))
1606 drbd_thread_stop_nowait(&mdev
->worker
);
1613 static int drbd_thread_setup(void *arg
)
1615 struct drbd_thread
*thi
= (struct drbd_thread
*) arg
;
1616 struct drbd_conf
*mdev
= thi
->mdev
;
1617 unsigned long flags
;
1621 retval
= thi
->function(thi
);
1623 spin_lock_irqsave(&thi
->t_lock
, flags
);
1625 /* if the receiver has been "Exiting", the last thing it did
1626 * was set the conn state to "StandAlone",
1627 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1628 * and receiver thread will be "started".
1629 * drbd_thread_start needs to set "Restarting" in that case.
1630 * t_state check and assignment needs to be within the same spinlock,
1631 * so either thread_start sees Exiting, and can remap to Restarting,
1632 * or thread_start see None, and can proceed as normal.
1635 if (thi
->t_state
== Restarting
) {
1636 dev_info(DEV
, "Restarting %s\n", current
->comm
);
1637 thi
->t_state
= Running
;
1638 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1643 thi
->t_state
= None
;
1645 complete(&thi
->stop
);
1646 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1648 dev_info(DEV
, "Terminating %s\n", current
->comm
);
1650 /* Release mod reference taken when thread was started */
1651 module_put(THIS_MODULE
);
1655 static void drbd_thread_init(struct drbd_conf
*mdev
, struct drbd_thread
*thi
,
1656 int (*func
) (struct drbd_thread
*))
1658 spin_lock_init(&thi
->t_lock
);
1660 thi
->t_state
= None
;
1661 thi
->function
= func
;
1665 int drbd_thread_start(struct drbd_thread
*thi
)
1667 struct drbd_conf
*mdev
= thi
->mdev
;
1668 struct task_struct
*nt
;
1669 unsigned long flags
;
1672 thi
== &mdev
->receiver
? "receiver" :
1673 thi
== &mdev
->asender
? "asender" :
1674 thi
== &mdev
->worker
? "worker" : "NONSENSE";
1676 /* is used from state engine doing drbd_thread_stop_nowait,
1677 * while holding the req lock irqsave */
1678 spin_lock_irqsave(&thi
->t_lock
, flags
);
1680 switch (thi
->t_state
) {
1682 dev_info(DEV
, "Starting %s thread (from %s [%d])\n",
1683 me
, current
->comm
, current
->pid
);
1685 /* Get ref on module for thread - this is released when thread exits */
1686 if (!try_module_get(THIS_MODULE
)) {
1687 dev_err(DEV
, "Failed to get module reference in drbd_thread_start\n");
1688 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1692 init_completion(&thi
->stop
);
1693 D_ASSERT(thi
->task
== NULL
);
1694 thi
->reset_cpu_mask
= 1;
1695 thi
->t_state
= Running
;
1696 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1697 flush_signals(current
); /* otherw. may get -ERESTARTNOINTR */
1699 nt
= kthread_create(drbd_thread_setup
, (void *) thi
,
1700 "drbd%d_%s", mdev_to_minor(mdev
), me
);
1703 dev_err(DEV
, "Couldn't start thread\n");
1705 module_put(THIS_MODULE
);
1708 spin_lock_irqsave(&thi
->t_lock
, flags
);
1710 thi
->t_state
= Running
;
1711 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1712 wake_up_process(nt
);
1715 thi
->t_state
= Restarting
;
1716 dev_info(DEV
, "Restarting %s thread (from %s [%d])\n",
1717 me
, current
->comm
, current
->pid
);
1722 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1730 void _drbd_thread_stop(struct drbd_thread
*thi
, int restart
, int wait
)
1732 unsigned long flags
;
1734 enum drbd_thread_state ns
= restart
? Restarting
: Exiting
;
1736 /* may be called from state engine, holding the req lock irqsave */
1737 spin_lock_irqsave(&thi
->t_lock
, flags
);
1739 if (thi
->t_state
== None
) {
1740 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1742 drbd_thread_start(thi
);
1746 if (thi
->t_state
!= ns
) {
1747 if (thi
->task
== NULL
) {
1748 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1754 init_completion(&thi
->stop
);
1755 if (thi
->task
!= current
)
1756 force_sig(DRBD_SIGKILL
, thi
->task
);
1760 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1763 wait_for_completion(&thi
->stop
);
1768 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1769 * @mdev: DRBD device.
1771 * Forces all threads of a device onto the same CPU. This is beneficial for
1772 * DRBD's performance. May be overwritten by user's configuration.
1774 void drbd_calc_cpu_mask(struct drbd_conf
*mdev
)
1778 /* user override. */
1779 if (cpumask_weight(mdev
->cpu_mask
))
1782 ord
= mdev_to_minor(mdev
) % cpumask_weight(cpu_online_mask
);
1783 for_each_online_cpu(cpu
) {
1785 cpumask_set_cpu(cpu
, mdev
->cpu_mask
);
1789 /* should not be reached */
1790 cpumask_setall(mdev
->cpu_mask
);
1794 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1795 * @mdev: DRBD device.
1797 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1800 void drbd_thread_current_set_cpu(struct drbd_conf
*mdev
)
1802 struct task_struct
*p
= current
;
1803 struct drbd_thread
*thi
=
1804 p
== mdev
->asender
.task
? &mdev
->asender
:
1805 p
== mdev
->receiver
.task
? &mdev
->receiver
:
1806 p
== mdev
->worker
.task
? &mdev
->worker
:
1810 if (!thi
->reset_cpu_mask
)
1812 thi
->reset_cpu_mask
= 0;
1813 set_cpus_allowed_ptr(p
, mdev
->cpu_mask
);
1817 /* the appropriate socket mutex must be held already */
1818 int _drbd_send_cmd(struct drbd_conf
*mdev
, struct socket
*sock
,
1819 enum drbd_packets cmd
, struct p_header80
*h
,
1820 size_t size
, unsigned msg_flags
)
1824 ERR_IF(!h
) return false;
1825 ERR_IF(!size
) return false;
1827 h
->magic
= BE_DRBD_MAGIC
;
1828 h
->command
= cpu_to_be16(cmd
);
1829 h
->length
= cpu_to_be16(size
-sizeof(struct p_header80
));
1831 sent
= drbd_send(mdev
, sock
, h
, size
, msg_flags
);
1833 ok
= (sent
== size
);
1835 dev_err(DEV
, "short sent %s size=%d sent=%d\n",
1836 cmdname(cmd
), (int)size
, sent
);
1840 /* don't pass the socket. we may only look at it
1841 * when we hold the appropriate socket mutex.
1843 int drbd_send_cmd(struct drbd_conf
*mdev
, int use_data_socket
,
1844 enum drbd_packets cmd
, struct p_header80
*h
, size_t size
)
1847 struct socket
*sock
;
1849 if (use_data_socket
) {
1850 mutex_lock(&mdev
->data
.mutex
);
1851 sock
= mdev
->data
.socket
;
1853 mutex_lock(&mdev
->meta
.mutex
);
1854 sock
= mdev
->meta
.socket
;
1857 /* drbd_disconnect() could have called drbd_free_sock()
1858 * while we were waiting in down()... */
1859 if (likely(sock
!= NULL
))
1860 ok
= _drbd_send_cmd(mdev
, sock
, cmd
, h
, size
, 0);
1862 if (use_data_socket
)
1863 mutex_unlock(&mdev
->data
.mutex
);
1865 mutex_unlock(&mdev
->meta
.mutex
);
1869 int drbd_send_cmd2(struct drbd_conf
*mdev
, enum drbd_packets cmd
, char *data
,
1872 struct p_header80 h
;
1875 h
.magic
= BE_DRBD_MAGIC
;
1876 h
.command
= cpu_to_be16(cmd
);
1877 h
.length
= cpu_to_be16(size
);
1879 if (!drbd_get_data_sock(mdev
))
1883 drbd_send(mdev
, mdev
->data
.socket
, &h
, sizeof(h
), 0));
1885 drbd_send(mdev
, mdev
->data
.socket
, data
, size
, 0));
1887 drbd_put_data_sock(mdev
);
1892 int drbd_send_sync_param(struct drbd_conf
*mdev
, struct syncer_conf
*sc
)
1894 struct p_rs_param_95
*p
;
1895 struct socket
*sock
;
1897 const int apv
= mdev
->agreed_pro_version
;
1899 size
= apv
<= 87 ? sizeof(struct p_rs_param
)
1900 : apv
== 88 ? sizeof(struct p_rs_param
)
1901 + strlen(mdev
->sync_conf
.verify_alg
) + 1
1902 : apv
<= 94 ? sizeof(struct p_rs_param_89
)
1903 : /* apv >= 95 */ sizeof(struct p_rs_param_95
);
1905 /* used from admin command context and receiver/worker context.
1906 * to avoid kmalloc, grab the socket right here,
1907 * then use the pre-allocated sbuf there */
1908 mutex_lock(&mdev
->data
.mutex
);
1909 sock
= mdev
->data
.socket
;
1911 if (likely(sock
!= NULL
)) {
1912 enum drbd_packets cmd
= apv
>= 89 ? P_SYNC_PARAM89
: P_SYNC_PARAM
;
1914 p
= &mdev
->data
.sbuf
.rs_param_95
;
1916 /* initialize verify_alg and csums_alg */
1917 memset(p
->verify_alg
, 0, 2 * SHARED_SECRET_MAX
);
1919 p
->rate
= cpu_to_be32(sc
->rate
);
1920 p
->c_plan_ahead
= cpu_to_be32(sc
->c_plan_ahead
);
1921 p
->c_delay_target
= cpu_to_be32(sc
->c_delay_target
);
1922 p
->c_fill_target
= cpu_to_be32(sc
->c_fill_target
);
1923 p
->c_max_rate
= cpu_to_be32(sc
->c_max_rate
);
1926 strcpy(p
->verify_alg
, mdev
->sync_conf
.verify_alg
);
1928 strcpy(p
->csums_alg
, mdev
->sync_conf
.csums_alg
);
1930 rv
= _drbd_send_cmd(mdev
, sock
, cmd
, &p
->head
, size
, 0);
1932 rv
= 0; /* not ok */
1934 mutex_unlock(&mdev
->data
.mutex
);
1939 int drbd_send_protocol(struct drbd_conf
*mdev
)
1941 struct p_protocol
*p
;
1944 size
= sizeof(struct p_protocol
);
1946 if (mdev
->agreed_pro_version
>= 87)
1947 size
+= strlen(mdev
->net_conf
->integrity_alg
) + 1;
1949 /* we must not recurse into our own queue,
1950 * as that is blocked during handshake */
1951 p
= kmalloc(size
, GFP_NOIO
);
1955 p
->protocol
= cpu_to_be32(mdev
->net_conf
->wire_protocol
);
1956 p
->after_sb_0p
= cpu_to_be32(mdev
->net_conf
->after_sb_0p
);
1957 p
->after_sb_1p
= cpu_to_be32(mdev
->net_conf
->after_sb_1p
);
1958 p
->after_sb_2p
= cpu_to_be32(mdev
->net_conf
->after_sb_2p
);
1959 p
->two_primaries
= cpu_to_be32(mdev
->net_conf
->two_primaries
);
1962 if (mdev
->net_conf
->want_lose
)
1964 if (mdev
->net_conf
->dry_run
) {
1965 if (mdev
->agreed_pro_version
>= 92)
1968 dev_err(DEV
, "--dry-run is not supported by peer");
1973 p
->conn_flags
= cpu_to_be32(cf
);
1975 if (mdev
->agreed_pro_version
>= 87)
1976 strcpy(p
->integrity_alg
, mdev
->net_conf
->integrity_alg
);
1978 rv
= drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_PROTOCOL
,
1979 (struct p_header80
*)p
, size
);
1984 int _drbd_send_uuids(struct drbd_conf
*mdev
, u64 uuid_flags
)
1989 if (!get_ldev_if_state(mdev
, D_NEGOTIATING
))
1992 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
1993 p
.uuid
[i
] = mdev
->ldev
? cpu_to_be64(mdev
->ldev
->md
.uuid
[i
]) : 0;
1995 mdev
->comm_bm_set
= drbd_bm_total_weight(mdev
);
1996 p
.uuid
[UI_SIZE
] = cpu_to_be64(mdev
->comm_bm_set
);
1997 uuid_flags
|= mdev
->net_conf
->want_lose
? 1 : 0;
1998 uuid_flags
|= test_bit(CRASHED_PRIMARY
, &mdev
->flags
) ? 2 : 0;
1999 uuid_flags
|= mdev
->new_state_tmp
.disk
== D_INCONSISTENT
? 4 : 0;
2000 p
.uuid
[UI_FLAGS
] = cpu_to_be64(uuid_flags
);
2004 return drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_UUIDS
,
2005 (struct p_header80
*)&p
, sizeof(p
));
2008 int drbd_send_uuids(struct drbd_conf
*mdev
)
2010 return _drbd_send_uuids(mdev
, 0);
2013 int drbd_send_uuids_skip_initial_sync(struct drbd_conf
*mdev
)
2015 return _drbd_send_uuids(mdev
, 8);
2018 int drbd_gen_and_send_sync_uuid(struct drbd_conf
*mdev
)
2023 D_ASSERT(mdev
->state
.disk
== D_UP_TO_DATE
);
2025 uuid
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] + UUID_NEW_BM_OFFSET
;
2026 drbd_uuid_set(mdev
, UI_BITMAP
, uuid
);
2028 p
.uuid
= cpu_to_be64(uuid
);
2030 return drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_SYNC_UUID
,
2031 (struct p_header80
*)&p
, sizeof(p
));
2034 int drbd_send_sizes(struct drbd_conf
*mdev
, int trigger_reply
, enum dds_flags flags
)
2037 sector_t d_size
, u_size
;
2041 if (get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
2042 D_ASSERT(mdev
->ldev
->backing_bdev
);
2043 d_size
= drbd_get_max_capacity(mdev
->ldev
);
2044 u_size
= mdev
->ldev
->dc
.disk_size
;
2045 q_order_type
= drbd_queue_order_type(mdev
);
2050 q_order_type
= QUEUE_ORDERED_NONE
;
2053 p
.d_size
= cpu_to_be64(d_size
);
2054 p
.u_size
= cpu_to_be64(u_size
);
2055 p
.c_size
= cpu_to_be64(trigger_reply
? 0 : drbd_get_capacity(mdev
->this_bdev
));
2056 p
.max_bio_size
= cpu_to_be32(queue_max_hw_sectors(mdev
->rq_queue
) << 9);
2057 p
.queue_order_type
= cpu_to_be16(q_order_type
);
2058 p
.dds_flags
= cpu_to_be16(flags
);
2060 ok
= drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_SIZES
,
2061 (struct p_header80
*)&p
, sizeof(p
));
2066 * drbd_send_state() - Sends the drbd state to the peer
2067 * @mdev: DRBD device.
2069 int drbd_send_state(struct drbd_conf
*mdev
)
2071 struct socket
*sock
;
2075 /* Grab state lock so we wont send state if we're in the middle
2076 * of a cluster wide state change on another thread */
2077 drbd_state_lock(mdev
);
2079 mutex_lock(&mdev
->data
.mutex
);
2081 p
.state
= cpu_to_be32(mdev
->state
.i
); /* Within the send mutex */
2082 sock
= mdev
->data
.socket
;
2084 if (likely(sock
!= NULL
)) {
2085 ok
= _drbd_send_cmd(mdev
, sock
, P_STATE
,
2086 (struct p_header80
*)&p
, sizeof(p
), 0);
2089 mutex_unlock(&mdev
->data
.mutex
);
2091 drbd_state_unlock(mdev
);
2095 int drbd_send_state_req(struct drbd_conf
*mdev
,
2096 union drbd_state mask
, union drbd_state val
)
2098 struct p_req_state p
;
2100 p
.mask
= cpu_to_be32(mask
.i
);
2101 p
.val
= cpu_to_be32(val
.i
);
2103 return drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_STATE_CHG_REQ
,
2104 (struct p_header80
*)&p
, sizeof(p
));
2107 int drbd_send_sr_reply(struct drbd_conf
*mdev
, enum drbd_state_rv retcode
)
2109 struct p_req_state_reply p
;
2111 p
.retcode
= cpu_to_be32(retcode
);
2113 return drbd_send_cmd(mdev
, USE_META_SOCKET
, P_STATE_CHG_REPLY
,
2114 (struct p_header80
*)&p
, sizeof(p
));
2117 int fill_bitmap_rle_bits(struct drbd_conf
*mdev
,
2118 struct p_compressed_bm
*p
,
2119 struct bm_xfer_ctx
*c
)
2121 struct bitstream bs
;
2122 unsigned long plain_bits
;
2129 /* may we use this feature? */
2130 if ((mdev
->sync_conf
.use_rle
== 0) ||
2131 (mdev
->agreed_pro_version
< 90))
2134 if (c
->bit_offset
>= c
->bm_bits
)
2135 return 0; /* nothing to do. */
2137 /* use at most thus many bytes */
2138 bitstream_init(&bs
, p
->code
, BM_PACKET_VLI_BYTES_MAX
, 0);
2139 memset(p
->code
, 0, BM_PACKET_VLI_BYTES_MAX
);
2140 /* plain bits covered in this code string */
2143 /* p->encoding & 0x80 stores whether the first run length is set.
2144 * bit offset is implicit.
2145 * start with toggle == 2 to be able to tell the first iteration */
2148 /* see how much plain bits we can stuff into one packet
2149 * using RLE and VLI. */
2151 tmp
= (toggle
== 0) ? _drbd_bm_find_next_zero(mdev
, c
->bit_offset
)
2152 : _drbd_bm_find_next(mdev
, c
->bit_offset
);
2155 rl
= tmp
- c
->bit_offset
;
2157 if (toggle
== 2) { /* first iteration */
2159 /* the first checked bit was set,
2160 * store start value, */
2161 DCBP_set_start(p
, 1);
2162 /* but skip encoding of zero run length */
2166 DCBP_set_start(p
, 0);
2169 /* paranoia: catch zero runlength.
2170 * can only happen if bitmap is modified while we scan it. */
2172 dev_err(DEV
, "unexpected zero runlength while encoding bitmap "
2173 "t:%u bo:%lu\n", toggle
, c
->bit_offset
);
2177 bits
= vli_encode_bits(&bs
, rl
);
2178 if (bits
== -ENOBUFS
) /* buffer full */
2181 dev_err(DEV
, "error while encoding bitmap: %d\n", bits
);
2187 c
->bit_offset
= tmp
;
2188 } while (c
->bit_offset
< c
->bm_bits
);
2190 len
= bs
.cur
.b
- p
->code
+ !!bs
.cur
.bit
;
2192 if (plain_bits
< (len
<< 3)) {
2193 /* incompressible with this method.
2194 * we need to rewind both word and bit position. */
2195 c
->bit_offset
-= plain_bits
;
2196 bm_xfer_ctx_bit_to_word_offset(c
);
2197 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
2201 /* RLE + VLI was able to compress it just fine.
2202 * update c->word_offset. */
2203 bm_xfer_ctx_bit_to_word_offset(c
);
2205 /* store pad_bits */
2206 DCBP_set_pad_bits(p
, (8 - bs
.cur
.bit
) & 0x7);
2212 * send_bitmap_rle_or_plain
2214 * Return 0 when done, 1 when another iteration is needed, and a negative error
2215 * code upon failure.
2218 send_bitmap_rle_or_plain(struct drbd_conf
*mdev
,
2219 struct p_header80
*h
, struct bm_xfer_ctx
*c
)
2221 struct p_compressed_bm
*p
= (void*)h
;
2222 unsigned long num_words
;
2226 len
= fill_bitmap_rle_bits(mdev
, p
, c
);
2232 DCBP_set_code(p
, RLE_VLI_Bits
);
2233 ok
= _drbd_send_cmd(mdev
, mdev
->data
.socket
, P_COMPRESSED_BITMAP
, h
,
2234 sizeof(*p
) + len
, 0);
2237 c
->bytes
[0] += sizeof(*p
) + len
;
2239 if (c
->bit_offset
>= c
->bm_bits
)
2242 /* was not compressible.
2243 * send a buffer full of plain text bits instead. */
2244 num_words
= min_t(size_t, BM_PACKET_WORDS
, c
->bm_words
- c
->word_offset
);
2245 len
= num_words
* sizeof(long);
2247 drbd_bm_get_lel(mdev
, c
->word_offset
, num_words
, (unsigned long*)h
->payload
);
2248 ok
= _drbd_send_cmd(mdev
, mdev
->data
.socket
, P_BITMAP
,
2249 h
, sizeof(struct p_header80
) + len
, 0);
2250 c
->word_offset
+= num_words
;
2251 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
2254 c
->bytes
[1] += sizeof(struct p_header80
) + len
;
2256 if (c
->bit_offset
> c
->bm_bits
)
2257 c
->bit_offset
= c
->bm_bits
;
2261 INFO_bm_xfer_stats(mdev
, "send", c
);
2269 /* See the comment at receive_bitmap() */
2270 int _drbd_send_bitmap(struct drbd_conf
*mdev
)
2272 struct bm_xfer_ctx c
;
2273 struct p_header80
*p
;
2276 ERR_IF(!mdev
->bitmap
) return false;
2278 /* maybe we should use some per thread scratch page,
2279 * and allocate that during initial device creation? */
2280 p
= (struct p_header80
*) __get_free_page(GFP_NOIO
);
2282 dev_err(DEV
, "failed to allocate one page buffer in %s\n", __func__
);
2286 if (get_ldev(mdev
)) {
2287 if (drbd_md_test_flag(mdev
->ldev
, MDF_FULL_SYNC
)) {
2288 dev_info(DEV
, "Writing the whole bitmap, MDF_FullSync was set.\n");
2289 drbd_bm_set_all(mdev
);
2290 if (drbd_bm_write(mdev
)) {
2291 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2292 * but otherwise process as per normal - need to tell other
2293 * side that a full resync is required! */
2294 dev_err(DEV
, "Failed to write bitmap to disk!\n");
2296 drbd_md_clear_flag(mdev
, MDF_FULL_SYNC
);
2303 c
= (struct bm_xfer_ctx
) {
2304 .bm_bits
= drbd_bm_bits(mdev
),
2305 .bm_words
= drbd_bm_words(mdev
),
2309 err
= send_bitmap_rle_or_plain(mdev
, p
, &c
);
2312 free_page((unsigned long) p
);
2316 int drbd_send_bitmap(struct drbd_conf
*mdev
)
2320 if (!drbd_get_data_sock(mdev
))
2322 err
= !_drbd_send_bitmap(mdev
);
2323 drbd_put_data_sock(mdev
);
2327 int drbd_send_b_ack(struct drbd_conf
*mdev
, u32 barrier_nr
, u32 set_size
)
2330 struct p_barrier_ack p
;
2332 p
.barrier
= barrier_nr
;
2333 p
.set_size
= cpu_to_be32(set_size
);
2335 if (mdev
->state
.conn
< C_CONNECTED
)
2337 ok
= drbd_send_cmd(mdev
, USE_META_SOCKET
, P_BARRIER_ACK
,
2338 (struct p_header80
*)&p
, sizeof(p
));
2343 * _drbd_send_ack() - Sends an ack packet
2344 * @mdev: DRBD device.
2345 * @cmd: Packet command code.
2346 * @sector: sector, needs to be in big endian byte order
2347 * @blksize: size in byte, needs to be in big endian byte order
2348 * @block_id: Id, big endian byte order
2350 static int _drbd_send_ack(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2356 struct p_block_ack p
;
2359 p
.block_id
= block_id
;
2360 p
.blksize
= blksize
;
2361 p
.seq_num
= cpu_to_be32(atomic_add_return(1, &mdev
->packet_seq
));
2363 if (!mdev
->meta
.socket
|| mdev
->state
.conn
< C_CONNECTED
)
2365 ok
= drbd_send_cmd(mdev
, USE_META_SOCKET
, cmd
,
2366 (struct p_header80
*)&p
, sizeof(p
));
2370 /* dp->sector and dp->block_id already/still in network byte order,
2371 * data_size is payload size according to dp->head,
2372 * and may need to be corrected for digest size. */
2373 int drbd_send_ack_dp(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2374 struct p_data
*dp
, int data_size
)
2376 data_size
-= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_r_tfm
) ?
2377 crypto_hash_digestsize(mdev
->integrity_r_tfm
) : 0;
2378 return _drbd_send_ack(mdev
, cmd
, dp
->sector
, cpu_to_be32(data_size
),
2382 int drbd_send_ack_rp(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2383 struct p_block_req
*rp
)
2385 return _drbd_send_ack(mdev
, cmd
, rp
->sector
, rp
->blksize
, rp
->block_id
);
2389 * drbd_send_ack() - Sends an ack packet
2390 * @mdev: DRBD device.
2391 * @cmd: Packet command code.
2394 int drbd_send_ack(struct drbd_conf
*mdev
,
2395 enum drbd_packets cmd
, struct drbd_epoch_entry
*e
)
2397 return _drbd_send_ack(mdev
, cmd
,
2398 cpu_to_be64(e
->sector
),
2399 cpu_to_be32(e
->size
),
2403 /* This function misuses the block_id field to signal if the blocks
2404 * are is sync or not. */
2405 int drbd_send_ack_ex(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2406 sector_t sector
, int blksize
, u64 block_id
)
2408 return _drbd_send_ack(mdev
, cmd
,
2409 cpu_to_be64(sector
),
2410 cpu_to_be32(blksize
),
2411 cpu_to_be64(block_id
));
2414 int drbd_send_drequest(struct drbd_conf
*mdev
, int cmd
,
2415 sector_t sector
, int size
, u64 block_id
)
2418 struct p_block_req p
;
2420 p
.sector
= cpu_to_be64(sector
);
2421 p
.block_id
= block_id
;
2422 p
.blksize
= cpu_to_be32(size
);
2424 ok
= drbd_send_cmd(mdev
, USE_DATA_SOCKET
, cmd
,
2425 (struct p_header80
*)&p
, sizeof(p
));
2429 int drbd_send_drequest_csum(struct drbd_conf
*mdev
,
2430 sector_t sector
, int size
,
2431 void *digest
, int digest_size
,
2432 enum drbd_packets cmd
)
2435 struct p_block_req p
;
2437 p
.sector
= cpu_to_be64(sector
);
2438 p
.block_id
= BE_DRBD_MAGIC
+ 0xbeef;
2439 p
.blksize
= cpu_to_be32(size
);
2441 p
.head
.magic
= BE_DRBD_MAGIC
;
2442 p
.head
.command
= cpu_to_be16(cmd
);
2443 p
.head
.length
= cpu_to_be16(sizeof(p
) - sizeof(struct p_header80
) + digest_size
);
2445 mutex_lock(&mdev
->data
.mutex
);
2447 ok
= (sizeof(p
) == drbd_send(mdev
, mdev
->data
.socket
, &p
, sizeof(p
), 0));
2448 ok
= ok
&& (digest_size
== drbd_send(mdev
, mdev
->data
.socket
, digest
, digest_size
, 0));
2450 mutex_unlock(&mdev
->data
.mutex
);
2455 int drbd_send_ov_request(struct drbd_conf
*mdev
, sector_t sector
, int size
)
2458 struct p_block_req p
;
2460 p
.sector
= cpu_to_be64(sector
);
2461 p
.block_id
= BE_DRBD_MAGIC
+ 0xbabe;
2462 p
.blksize
= cpu_to_be32(size
);
2464 ok
= drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_OV_REQUEST
,
2465 (struct p_header80
*)&p
, sizeof(p
));
2469 /* called on sndtimeo
2470 * returns false if we should retry,
2471 * true if we think connection is dead
2473 static int we_should_drop_the_connection(struct drbd_conf
*mdev
, struct socket
*sock
)
2476 /* long elapsed = (long)(jiffies - mdev->last_received); */
2478 drop_it
= mdev
->meta
.socket
== sock
2479 || !mdev
->asender
.task
2480 || get_t_state(&mdev
->asender
) != Running
2481 || mdev
->state
.conn
< C_CONNECTED
;
2486 drop_it
= !--mdev
->ko_count
;
2488 dev_err(DEV
, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2489 current
->comm
, current
->pid
, mdev
->ko_count
);
2493 return drop_it
; /* && (mdev->state == R_PRIMARY) */;
2496 /* The idea of sendpage seems to be to put some kind of reference
2497 * to the page into the skb, and to hand it over to the NIC. In
2498 * this process get_page() gets called.
2500 * As soon as the page was really sent over the network put_page()
2501 * gets called by some part of the network layer. [ NIC driver? ]
2503 * [ get_page() / put_page() increment/decrement the count. If count
2504 * reaches 0 the page will be freed. ]
2506 * This works nicely with pages from FSs.
2507 * But this means that in protocol A we might signal IO completion too early!
2509 * In order not to corrupt data during a resync we must make sure
2510 * that we do not reuse our own buffer pages (EEs) to early, therefore
2511 * we have the net_ee list.
2513 * XFS seems to have problems, still, it submits pages with page_count == 0!
2514 * As a workaround, we disable sendpage on pages
2515 * with page_count == 0 or PageSlab.
2517 static int _drbd_no_send_page(struct drbd_conf
*mdev
, struct page
*page
,
2518 int offset
, size_t size
, unsigned msg_flags
)
2520 int sent
= drbd_send(mdev
, mdev
->data
.socket
, kmap(page
) + offset
, size
, msg_flags
);
2523 mdev
->send_cnt
+= size
>>9;
2524 return sent
== size
;
2527 static int _drbd_send_page(struct drbd_conf
*mdev
, struct page
*page
,
2528 int offset
, size_t size
, unsigned msg_flags
)
2530 mm_segment_t oldfs
= get_fs();
2534 /* e.g. XFS meta- & log-data is in slab pages, which have a
2535 * page_count of 0 and/or have PageSlab() set.
2536 * we cannot use send_page for those, as that does get_page();
2537 * put_page(); and would cause either a VM_BUG directly, or
2538 * __page_cache_release a page that would actually still be referenced
2539 * by someone, leading to some obscure delayed Oops somewhere else. */
2540 if (disable_sendpage
|| (page_count(page
) < 1) || PageSlab(page
))
2541 return _drbd_no_send_page(mdev
, page
, offset
, size
, msg_flags
);
2543 msg_flags
|= MSG_NOSIGNAL
;
2544 drbd_update_congested(mdev
);
2547 sent
= mdev
->data
.socket
->ops
->sendpage(mdev
->data
.socket
, page
,
2550 if (sent
== -EAGAIN
) {
2551 if (we_should_drop_the_connection(mdev
,
2558 dev_warn(DEV
, "%s: size=%d len=%d sent=%d\n",
2559 __func__
, (int)size
, len
, sent
);
2564 } while (len
> 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2566 clear_bit(NET_CONGESTED
, &mdev
->flags
);
2570 mdev
->send_cnt
+= size
>>9;
2574 static int _drbd_send_bio(struct drbd_conf
*mdev
, struct bio
*bio
)
2576 struct bio_vec
*bvec
;
2578 /* hint all but last page with MSG_MORE */
2579 __bio_for_each_segment(bvec
, bio
, i
, 0) {
2580 if (!_drbd_no_send_page(mdev
, bvec
->bv_page
,
2581 bvec
->bv_offset
, bvec
->bv_len
,
2582 i
== bio
->bi_vcnt
-1 ? 0 : MSG_MORE
))
2588 static int _drbd_send_zc_bio(struct drbd_conf
*mdev
, struct bio
*bio
)
2590 struct bio_vec
*bvec
;
2592 /* hint all but last page with MSG_MORE */
2593 __bio_for_each_segment(bvec
, bio
, i
, 0) {
2594 if (!_drbd_send_page(mdev
, bvec
->bv_page
,
2595 bvec
->bv_offset
, bvec
->bv_len
,
2596 i
== bio
->bi_vcnt
-1 ? 0 : MSG_MORE
))
2602 static int _drbd_send_zc_ee(struct drbd_conf
*mdev
, struct drbd_epoch_entry
*e
)
2604 struct page
*page
= e
->pages
;
2605 unsigned len
= e
->size
;
2606 /* hint all but last page with MSG_MORE */
2607 page_chain_for_each(page
) {
2608 unsigned l
= min_t(unsigned, len
, PAGE_SIZE
);
2609 if (!_drbd_send_page(mdev
, page
, 0, l
,
2610 page_chain_next(page
) ? MSG_MORE
: 0))
2617 static u32
bio_flags_to_wire(struct drbd_conf
*mdev
, unsigned long bi_rw
)
2619 if (mdev
->agreed_pro_version
>= 95)
2620 return (bi_rw
& REQ_SYNC
? DP_RW_SYNC
: 0) |
2621 (bi_rw
& REQ_FUA
? DP_FUA
: 0) |
2622 (bi_rw
& REQ_FLUSH
? DP_FLUSH
: 0) |
2623 (bi_rw
& REQ_DISCARD
? DP_DISCARD
: 0);
2625 return bi_rw
& REQ_SYNC
? DP_RW_SYNC
: 0;
2628 /* Used to send write requests
2629 * R_PRIMARY -> Peer (P_DATA)
2631 int drbd_send_dblock(struct drbd_conf
*mdev
, struct drbd_request
*req
)
2635 unsigned int dp_flags
= 0;
2639 if (!drbd_get_data_sock(mdev
))
2642 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_w_tfm
) ?
2643 crypto_hash_digestsize(mdev
->integrity_w_tfm
) : 0;
2645 if (req
->size
<= DRBD_MAX_SIZE_H80_PACKET
) {
2646 p
.head
.h80
.magic
= BE_DRBD_MAGIC
;
2647 p
.head
.h80
.command
= cpu_to_be16(P_DATA
);
2649 cpu_to_be16(sizeof(p
) - sizeof(union p_header
) + dgs
+ req
->size
);
2651 p
.head
.h95
.magic
= BE_DRBD_MAGIC_BIG
;
2652 p
.head
.h95
.command
= cpu_to_be16(P_DATA
);
2654 cpu_to_be32(sizeof(p
) - sizeof(union p_header
) + dgs
+ req
->size
);
2657 p
.sector
= cpu_to_be64(req
->sector
);
2658 p
.block_id
= (unsigned long)req
;
2659 p
.seq_num
= cpu_to_be32(req
->seq_num
=
2660 atomic_add_return(1, &mdev
->packet_seq
));
2662 dp_flags
= bio_flags_to_wire(mdev
, req
->master_bio
->bi_rw
);
2664 if (mdev
->state
.conn
>= C_SYNC_SOURCE
&&
2665 mdev
->state
.conn
<= C_PAUSED_SYNC_T
)
2666 dp_flags
|= DP_MAY_SET_IN_SYNC
;
2668 p
.dp_flags
= cpu_to_be32(dp_flags
);
2669 set_bit(UNPLUG_REMOTE
, &mdev
->flags
);
2671 drbd_send(mdev
, mdev
->data
.socket
, &p
, sizeof(p
), dgs
? MSG_MORE
: 0));
2673 dgb
= mdev
->int_dig_out
;
2674 drbd_csum_bio(mdev
, mdev
->integrity_w_tfm
, req
->master_bio
, dgb
);
2675 ok
= dgs
== drbd_send(mdev
, mdev
->data
.socket
, dgb
, dgs
, 0);
2678 /* For protocol A, we have to memcpy the payload into
2679 * socket buffers, as we may complete right away
2680 * as soon as we handed it over to tcp, at which point the data
2681 * pages may become invalid.
2683 * For data-integrity enabled, we copy it as well, so we can be
2684 * sure that even if the bio pages may still be modified, it
2685 * won't change the data on the wire, thus if the digest checks
2686 * out ok after sending on this side, but does not fit on the
2687 * receiving side, we sure have detected corruption elsewhere.
2689 if (mdev
->net_conf
->wire_protocol
== DRBD_PROT_A
|| dgs
)
2690 ok
= _drbd_send_bio(mdev
, req
->master_bio
);
2692 ok
= _drbd_send_zc_bio(mdev
, req
->master_bio
);
2694 /* double check digest, sometimes buffers have been modified in flight. */
2695 if (dgs
> 0 && dgs
<= 64) {
2696 /* 64 byte, 512 bit, is the larges digest size
2697 * currently supported in kernel crypto. */
2698 unsigned char digest
[64];
2699 drbd_csum_bio(mdev
, mdev
->integrity_w_tfm
, req
->master_bio
, digest
);
2700 if (memcmp(mdev
->int_dig_out
, digest
, dgs
)) {
2702 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2703 (unsigned long long)req
->sector
, req
->size
);
2705 } /* else if (dgs > 64) {
2706 ... Be noisy about digest too large ...
2710 drbd_put_data_sock(mdev
);
2715 /* answer packet, used to send data back for read requests:
2716 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2717 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2719 int drbd_send_block(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2720 struct drbd_epoch_entry
*e
)
2727 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_w_tfm
) ?
2728 crypto_hash_digestsize(mdev
->integrity_w_tfm
) : 0;
2730 if (e
->size
<= DRBD_MAX_SIZE_H80_PACKET
) {
2731 p
.head
.h80
.magic
= BE_DRBD_MAGIC
;
2732 p
.head
.h80
.command
= cpu_to_be16(cmd
);
2734 cpu_to_be16(sizeof(p
) - sizeof(struct p_header80
) + dgs
+ e
->size
);
2736 p
.head
.h95
.magic
= BE_DRBD_MAGIC_BIG
;
2737 p
.head
.h95
.command
= cpu_to_be16(cmd
);
2739 cpu_to_be32(sizeof(p
) - sizeof(struct p_header80
) + dgs
+ e
->size
);
2742 p
.sector
= cpu_to_be64(e
->sector
);
2743 p
.block_id
= e
->block_id
;
2744 /* p.seq_num = 0; No sequence numbers here.. */
2746 /* Only called by our kernel thread.
2747 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2748 * in response to admin command or module unload.
2750 if (!drbd_get_data_sock(mdev
))
2753 ok
= sizeof(p
) == drbd_send(mdev
, mdev
->data
.socket
, &p
, sizeof(p
), dgs
? MSG_MORE
: 0);
2755 dgb
= mdev
->int_dig_out
;
2756 drbd_csum_ee(mdev
, mdev
->integrity_w_tfm
, e
, dgb
);
2757 ok
= dgs
== drbd_send(mdev
, mdev
->data
.socket
, dgb
, dgs
, 0);
2760 ok
= _drbd_send_zc_ee(mdev
, e
);
2762 drbd_put_data_sock(mdev
);
2767 int drbd_send_oos(struct drbd_conf
*mdev
, struct drbd_request
*req
)
2769 struct p_block_desc p
;
2771 p
.sector
= cpu_to_be64(req
->sector
);
2772 p
.blksize
= cpu_to_be32(req
->size
);
2774 return drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_OUT_OF_SYNC
, &p
.head
, sizeof(p
));
2778 drbd_send distinguishes two cases:
2780 Packets sent via the data socket "sock"
2781 and packets sent via the meta data socket "msock"
2784 -----------------+-------------------------+------------------------------
2785 timeout conf.timeout / 2 conf.timeout / 2
2786 timeout action send a ping via msock Abort communication
2787 and close all sockets
2791 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2793 int drbd_send(struct drbd_conf
*mdev
, struct socket
*sock
,
2794 void *buf
, size_t size
, unsigned msg_flags
)
2803 /* THINK if (signal_pending) return ... ? */
2808 msg
.msg_name
= NULL
;
2809 msg
.msg_namelen
= 0;
2810 msg
.msg_control
= NULL
;
2811 msg
.msg_controllen
= 0;
2812 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
2814 if (sock
== mdev
->data
.socket
) {
2815 mdev
->ko_count
= mdev
->net_conf
->ko_count
;
2816 drbd_update_congested(mdev
);
2820 * tcp_sendmsg does _not_ use its size parameter at all ?
2822 * -EAGAIN on timeout, -EINTR on signal.
2825 * do we need to block DRBD_SIG if sock == &meta.socket ??
2826 * otherwise wake_asender() might interrupt some send_*Ack !
2828 rv
= kernel_sendmsg(sock
, &msg
, &iov
, 1, size
);
2829 if (rv
== -EAGAIN
) {
2830 if (we_should_drop_the_connection(mdev
, sock
))
2837 flush_signals(current
);
2845 } while (sent
< size
);
2847 if (sock
== mdev
->data
.socket
)
2848 clear_bit(NET_CONGESTED
, &mdev
->flags
);
2851 if (rv
!= -EAGAIN
) {
2852 dev_err(DEV
, "%s_sendmsg returned %d\n",
2853 sock
== mdev
->meta
.socket
? "msock" : "sock",
2855 drbd_force_state(mdev
, NS(conn
, C_BROKEN_PIPE
));
2857 drbd_force_state(mdev
, NS(conn
, C_TIMEOUT
));
2863 static int drbd_open(struct block_device
*bdev
, fmode_t mode
)
2865 struct drbd_conf
*mdev
= bdev
->bd_disk
->private_data
;
2866 unsigned long flags
;
2869 mutex_lock(&drbd_main_mutex
);
2870 spin_lock_irqsave(&mdev
->req_lock
, flags
);
2871 /* to have a stable mdev->state.role
2872 * and no race with updating open_cnt */
2874 if (mdev
->state
.role
!= R_PRIMARY
) {
2875 if (mode
& FMODE_WRITE
)
2877 else if (!allow_oos
)
2883 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
2884 mutex_unlock(&drbd_main_mutex
);
2889 static int drbd_release(struct gendisk
*gd
, fmode_t mode
)
2891 struct drbd_conf
*mdev
= gd
->private_data
;
2892 mutex_lock(&drbd_main_mutex
);
2894 mutex_unlock(&drbd_main_mutex
);
2898 static void drbd_set_defaults(struct drbd_conf
*mdev
)
2900 /* This way we get a compile error when sync_conf grows,
2901 and we forgot to initialize it here */
2902 mdev
->sync_conf
= (struct syncer_conf
) {
2903 /* .rate = */ DRBD_RATE_DEF
,
2904 /* .after = */ DRBD_AFTER_DEF
,
2905 /* .al_extents = */ DRBD_AL_EXTENTS_DEF
,
2906 /* .verify_alg = */ {}, 0,
2907 /* .cpu_mask = */ {}, 0,
2908 /* .csums_alg = */ {}, 0,
2910 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF
,
2911 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF
,
2912 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF
,
2913 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF
,
2914 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF
,
2915 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
2918 /* Have to use that way, because the layout differs between
2919 big endian and little endian */
2920 mdev
->state
= (union drbd_state
) {
2921 { .role
= R_SECONDARY
,
2923 .conn
= C_STANDALONE
,
2932 void drbd_init_set_defaults(struct drbd_conf
*mdev
)
2934 /* the memset(,0,) did most of this.
2935 * note: only assignments, no allocation in here */
2937 drbd_set_defaults(mdev
);
2939 atomic_set(&mdev
->ap_bio_cnt
, 0);
2940 atomic_set(&mdev
->ap_pending_cnt
, 0);
2941 atomic_set(&mdev
->rs_pending_cnt
, 0);
2942 atomic_set(&mdev
->unacked_cnt
, 0);
2943 atomic_set(&mdev
->local_cnt
, 0);
2944 atomic_set(&mdev
->net_cnt
, 0);
2945 atomic_set(&mdev
->packet_seq
, 0);
2946 atomic_set(&mdev
->pp_in_use
, 0);
2947 atomic_set(&mdev
->pp_in_use_by_net
, 0);
2948 atomic_set(&mdev
->rs_sect_in
, 0);
2949 atomic_set(&mdev
->rs_sect_ev
, 0);
2950 atomic_set(&mdev
->ap_in_flight
, 0);
2952 mutex_init(&mdev
->md_io_mutex
);
2953 mutex_init(&mdev
->data
.mutex
);
2954 mutex_init(&mdev
->meta
.mutex
);
2955 sema_init(&mdev
->data
.work
.s
, 0);
2956 sema_init(&mdev
->meta
.work
.s
, 0);
2957 mutex_init(&mdev
->state_mutex
);
2959 spin_lock_init(&mdev
->data
.work
.q_lock
);
2960 spin_lock_init(&mdev
->meta
.work
.q_lock
);
2962 spin_lock_init(&mdev
->al_lock
);
2963 spin_lock_init(&mdev
->req_lock
);
2964 spin_lock_init(&mdev
->peer_seq_lock
);
2965 spin_lock_init(&mdev
->epoch_lock
);
2967 INIT_LIST_HEAD(&mdev
->active_ee
);
2968 INIT_LIST_HEAD(&mdev
->sync_ee
);
2969 INIT_LIST_HEAD(&mdev
->done_ee
);
2970 INIT_LIST_HEAD(&mdev
->read_ee
);
2971 INIT_LIST_HEAD(&mdev
->net_ee
);
2972 INIT_LIST_HEAD(&mdev
->resync_reads
);
2973 INIT_LIST_HEAD(&mdev
->data
.work
.q
);
2974 INIT_LIST_HEAD(&mdev
->meta
.work
.q
);
2975 INIT_LIST_HEAD(&mdev
->resync_work
.list
);
2976 INIT_LIST_HEAD(&mdev
->unplug_work
.list
);
2977 INIT_LIST_HEAD(&mdev
->go_diskless
.list
);
2978 INIT_LIST_HEAD(&mdev
->md_sync_work
.list
);
2979 INIT_LIST_HEAD(&mdev
->start_resync_work
.list
);
2980 INIT_LIST_HEAD(&mdev
->bm_io_work
.w
.list
);
2982 mdev
->resync_work
.cb
= w_resync_timer
;
2983 mdev
->unplug_work
.cb
= w_send_write_hint
;
2984 mdev
->go_diskless
.cb
= w_go_diskless
;
2985 mdev
->md_sync_work
.cb
= w_md_sync
;
2986 mdev
->bm_io_work
.w
.cb
= w_bitmap_io
;
2987 mdev
->start_resync_work
.cb
= w_start_resync
;
2988 init_timer(&mdev
->resync_timer
);
2989 init_timer(&mdev
->md_sync_timer
);
2990 init_timer(&mdev
->start_resync_timer
);
2991 mdev
->resync_timer
.function
= resync_timer_fn
;
2992 mdev
->resync_timer
.data
= (unsigned long) mdev
;
2993 mdev
->md_sync_timer
.function
= md_sync_timer_fn
;
2994 mdev
->md_sync_timer
.data
= (unsigned long) mdev
;
2995 mdev
->start_resync_timer
.function
= start_resync_timer_fn
;
2996 mdev
->start_resync_timer
.data
= (unsigned long) mdev
;
2998 init_waitqueue_head(&mdev
->misc_wait
);
2999 init_waitqueue_head(&mdev
->state_wait
);
3000 init_waitqueue_head(&mdev
->net_cnt_wait
);
3001 init_waitqueue_head(&mdev
->ee_wait
);
3002 init_waitqueue_head(&mdev
->al_wait
);
3003 init_waitqueue_head(&mdev
->seq_wait
);
3005 drbd_thread_init(mdev
, &mdev
->receiver
, drbdd_init
);
3006 drbd_thread_init(mdev
, &mdev
->worker
, drbd_worker
);
3007 drbd_thread_init(mdev
, &mdev
->asender
, drbd_asender
);
3009 mdev
->agreed_pro_version
= PRO_VERSION_MAX
;
3010 mdev
->write_ordering
= WO_bdev_flush
;
3011 mdev
->resync_wenr
= LC_FREE
;
3014 void drbd_mdev_cleanup(struct drbd_conf
*mdev
)
3017 if (mdev
->receiver
.t_state
!= None
)
3018 dev_err(DEV
, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3019 mdev
->receiver
.t_state
);
3021 /* no need to lock it, I'm the only thread alive */
3022 if (atomic_read(&mdev
->current_epoch
->epoch_size
) != 0)
3023 dev_err(DEV
, "epoch_size:%d\n", atomic_read(&mdev
->current_epoch
->epoch_size
));
3033 mdev
->rs_failed
= 0;
3034 mdev
->rs_last_events
= 0;
3035 mdev
->rs_last_sect_ev
= 0;
3036 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
3037 mdev
->rs_mark_left
[i
] = 0;
3038 mdev
->rs_mark_time
[i
] = 0;
3040 D_ASSERT(mdev
->net_conf
== NULL
);
3042 drbd_set_my_capacity(mdev
, 0);
3044 /* maybe never allocated. */
3045 drbd_bm_resize(mdev
, 0, 1);
3046 drbd_bm_cleanup(mdev
);
3049 drbd_free_resources(mdev
);
3050 clear_bit(AL_SUSPENDED
, &mdev
->flags
);
3053 * currently we drbd_init_ee only on module load, so
3054 * we may do drbd_release_ee only on module unload!
3056 D_ASSERT(list_empty(&mdev
->active_ee
));
3057 D_ASSERT(list_empty(&mdev
->sync_ee
));
3058 D_ASSERT(list_empty(&mdev
->done_ee
));
3059 D_ASSERT(list_empty(&mdev
->read_ee
));
3060 D_ASSERT(list_empty(&mdev
->net_ee
));
3061 D_ASSERT(list_empty(&mdev
->resync_reads
));
3062 D_ASSERT(list_empty(&mdev
->data
.work
.q
));
3063 D_ASSERT(list_empty(&mdev
->meta
.work
.q
));
3064 D_ASSERT(list_empty(&mdev
->resync_work
.list
));
3065 D_ASSERT(list_empty(&mdev
->unplug_work
.list
));
3066 D_ASSERT(list_empty(&mdev
->go_diskless
.list
));
3068 drbd_set_defaults(mdev
);
3072 static void drbd_destroy_mempools(void)
3076 while (drbd_pp_pool
) {
3077 page
= drbd_pp_pool
;
3078 drbd_pp_pool
= (struct page
*)page_private(page
);
3083 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3085 if (drbd_ee_mempool
)
3086 mempool_destroy(drbd_ee_mempool
);
3087 if (drbd_request_mempool
)
3088 mempool_destroy(drbd_request_mempool
);
3090 kmem_cache_destroy(drbd_ee_cache
);
3091 if (drbd_request_cache
)
3092 kmem_cache_destroy(drbd_request_cache
);
3093 if (drbd_bm_ext_cache
)
3094 kmem_cache_destroy(drbd_bm_ext_cache
);
3095 if (drbd_al_ext_cache
)
3096 kmem_cache_destroy(drbd_al_ext_cache
);
3098 drbd_ee_mempool
= NULL
;
3099 drbd_request_mempool
= NULL
;
3100 drbd_ee_cache
= NULL
;
3101 drbd_request_cache
= NULL
;
3102 drbd_bm_ext_cache
= NULL
;
3103 drbd_al_ext_cache
= NULL
;
3108 static int drbd_create_mempools(void)
3111 const int number
= (DRBD_MAX_BIO_SIZE
/PAGE_SIZE
) * minor_count
;
3114 /* prepare our caches and mempools */
3115 drbd_request_mempool
= NULL
;
3116 drbd_ee_cache
= NULL
;
3117 drbd_request_cache
= NULL
;
3118 drbd_bm_ext_cache
= NULL
;
3119 drbd_al_ext_cache
= NULL
;
3120 drbd_pp_pool
= NULL
;
3123 drbd_request_cache
= kmem_cache_create(
3124 "drbd_req", sizeof(struct drbd_request
), 0, 0, NULL
);
3125 if (drbd_request_cache
== NULL
)
3128 drbd_ee_cache
= kmem_cache_create(
3129 "drbd_ee", sizeof(struct drbd_epoch_entry
), 0, 0, NULL
);
3130 if (drbd_ee_cache
== NULL
)
3133 drbd_bm_ext_cache
= kmem_cache_create(
3134 "drbd_bm", sizeof(struct bm_extent
), 0, 0, NULL
);
3135 if (drbd_bm_ext_cache
== NULL
)
3138 drbd_al_ext_cache
= kmem_cache_create(
3139 "drbd_al", sizeof(struct lc_element
), 0, 0, NULL
);
3140 if (drbd_al_ext_cache
== NULL
)
3144 drbd_request_mempool
= mempool_create(number
,
3145 mempool_alloc_slab
, mempool_free_slab
, drbd_request_cache
);
3146 if (drbd_request_mempool
== NULL
)
3149 drbd_ee_mempool
= mempool_create(number
,
3150 mempool_alloc_slab
, mempool_free_slab
, drbd_ee_cache
);
3151 if (drbd_ee_mempool
== NULL
)
3154 /* drbd's page pool */
3155 spin_lock_init(&drbd_pp_lock
);
3157 for (i
= 0; i
< number
; i
++) {
3158 page
= alloc_page(GFP_HIGHUSER
);
3161 set_page_private(page
, (unsigned long)drbd_pp_pool
);
3162 drbd_pp_pool
= page
;
3164 drbd_pp_vacant
= number
;
3169 drbd_destroy_mempools(); /* in case we allocated some */
3173 static int drbd_notify_sys(struct notifier_block
*this, unsigned long code
,
3176 /* just so we have it. you never know what interesting things we
3177 * might want to do here some day...
3183 static struct notifier_block drbd_notifier
= {
3184 .notifier_call
= drbd_notify_sys
,
3187 static void drbd_release_ee_lists(struct drbd_conf
*mdev
)
3191 rr
= drbd_release_ee(mdev
, &mdev
->active_ee
);
3193 dev_err(DEV
, "%d EEs in active list found!\n", rr
);
3195 rr
= drbd_release_ee(mdev
, &mdev
->sync_ee
);
3197 dev_err(DEV
, "%d EEs in sync list found!\n", rr
);
3199 rr
= drbd_release_ee(mdev
, &mdev
->read_ee
);
3201 dev_err(DEV
, "%d EEs in read list found!\n", rr
);
3203 rr
= drbd_release_ee(mdev
, &mdev
->done_ee
);
3205 dev_err(DEV
, "%d EEs in done list found!\n", rr
);
3207 rr
= drbd_release_ee(mdev
, &mdev
->net_ee
);
3209 dev_err(DEV
, "%d EEs in net list found!\n", rr
);
3212 /* caution. no locking.
3213 * currently only used from module cleanup code. */
3214 static void drbd_delete_device(unsigned int minor
)
3216 struct drbd_conf
*mdev
= minor_to_mdev(minor
);
3221 /* paranoia asserts */
3222 if (mdev
->open_cnt
!= 0)
3223 dev_err(DEV
, "open_cnt = %d in %s:%u", mdev
->open_cnt
,
3224 __FILE__
, __LINE__
);
3226 ERR_IF (!list_empty(&mdev
->data
.work
.q
)) {
3227 struct list_head
*lp
;
3228 list_for_each(lp
, &mdev
->data
.work
.q
) {
3229 dev_err(DEV
, "lp = %p\n", lp
);
3232 /* end paranoia asserts */
3234 del_gendisk(mdev
->vdisk
);
3236 /* cleanup stuff that may have been allocated during
3237 * device (re-)configuration or state changes */
3239 if (mdev
->this_bdev
)
3240 bdput(mdev
->this_bdev
);
3242 drbd_free_resources(mdev
);
3244 drbd_release_ee_lists(mdev
);
3246 /* should be free'd on disconnect? */
3247 kfree(mdev
->ee_hash
);
3249 mdev->ee_hash_s = 0;
3250 mdev->ee_hash = NULL;
3253 lc_destroy(mdev
->act_log
);
3254 lc_destroy(mdev
->resync
);
3256 kfree(mdev
->p_uuid
);
3257 /* mdev->p_uuid = NULL; */
3259 kfree(mdev
->int_dig_out
);
3260 kfree(mdev
->int_dig_in
);
3261 kfree(mdev
->int_dig_vv
);
3263 /* cleanup the rest that has been
3264 * allocated from drbd_new_device
3265 * and actually free the mdev itself */
3266 drbd_free_mdev(mdev
);
3269 static void drbd_cleanup(void)
3273 unregister_reboot_notifier(&drbd_notifier
);
3275 /* first remove proc,
3276 * drbdsetup uses it's presence to detect
3277 * whether DRBD is loaded.
3278 * If we would get stuck in proc removal,
3279 * but have netlink already deregistered,
3280 * some drbdsetup commands may wait forever
3284 remove_proc_entry("drbd", NULL
);
3291 drbd_delete_device(i
);
3292 drbd_destroy_mempools();
3297 unregister_blkdev(DRBD_MAJOR
, "drbd");
3299 printk(KERN_INFO
"drbd: module cleanup done.\n");
3303 * drbd_congested() - Callback for pdflush
3304 * @congested_data: User data
3305 * @bdi_bits: Bits pdflush is currently interested in
3307 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3309 static int drbd_congested(void *congested_data
, int bdi_bits
)
3311 struct drbd_conf
*mdev
= congested_data
;
3312 struct request_queue
*q
;
3316 if (!may_inc_ap_bio(mdev
)) {
3317 /* DRBD has frozen IO */
3323 if (get_ldev(mdev
)) {
3324 q
= bdev_get_queue(mdev
->ldev
->backing_bdev
);
3325 r
= bdi_congested(&q
->backing_dev_info
, bdi_bits
);
3331 if (bdi_bits
& (1 << BDI_async_congested
) && test_bit(NET_CONGESTED
, &mdev
->flags
)) {
3332 r
|= (1 << BDI_async_congested
);
3333 reason
= reason
== 'b' ? 'a' : 'n';
3337 mdev
->congestion_reason
= reason
;
3341 struct drbd_conf
*drbd_new_device(unsigned int minor
)
3343 struct drbd_conf
*mdev
;
3344 struct gendisk
*disk
;
3345 struct request_queue
*q
;
3347 /* GFP_KERNEL, we are outside of all write-out paths */
3348 mdev
= kzalloc(sizeof(struct drbd_conf
), GFP_KERNEL
);
3351 if (!zalloc_cpumask_var(&mdev
->cpu_mask
, GFP_KERNEL
))
3352 goto out_no_cpumask
;
3354 mdev
->minor
= minor
;
3356 drbd_init_set_defaults(mdev
);
3358 q
= blk_alloc_queue(GFP_KERNEL
);
3362 q
->queuedata
= mdev
;
3364 disk
= alloc_disk(1);
3369 set_disk_ro(disk
, true);
3372 disk
->major
= DRBD_MAJOR
;
3373 disk
->first_minor
= minor
;
3374 disk
->fops
= &drbd_ops
;
3375 sprintf(disk
->disk_name
, "drbd%d", minor
);
3376 disk
->private_data
= mdev
;
3378 mdev
->this_bdev
= bdget(MKDEV(DRBD_MAJOR
, minor
));
3379 /* we have no partitions. we contain only ourselves. */
3380 mdev
->this_bdev
->bd_contains
= mdev
->this_bdev
;
3382 q
->backing_dev_info
.congested_fn
= drbd_congested
;
3383 q
->backing_dev_info
.congested_data
= mdev
;
3385 blk_queue_make_request(q
, drbd_make_request
);
3386 blk_queue_max_hw_sectors(q
, DRBD_MAX_BIO_SIZE
>> 9);
3387 blk_queue_bounce_limit(q
, BLK_BOUNCE_ANY
);
3388 blk_queue_merge_bvec(q
, drbd_merge_bvec
);
3389 q
->queue_lock
= &mdev
->req_lock
;
3391 mdev
->md_io_page
= alloc_page(GFP_KERNEL
);
3392 if (!mdev
->md_io_page
)
3393 goto out_no_io_page
;
3395 if (drbd_bm_init(mdev
))
3397 /* no need to lock access, we are still initializing this minor device. */
3401 mdev
->app_reads_hash
= kzalloc(APP_R_HSIZE
*sizeof(void *), GFP_KERNEL
);
3402 if (!mdev
->app_reads_hash
)
3403 goto out_no_app_reads
;
3405 mdev
->current_epoch
= kzalloc(sizeof(struct drbd_epoch
), GFP_KERNEL
);
3406 if (!mdev
->current_epoch
)
3409 INIT_LIST_HEAD(&mdev
->current_epoch
->list
);
3414 /* out_whatever_else:
3415 kfree(mdev->current_epoch); */
3417 kfree(mdev
->app_reads_hash
);
3421 drbd_bm_cleanup(mdev
);
3423 __free_page(mdev
->md_io_page
);
3427 blk_cleanup_queue(q
);
3429 free_cpumask_var(mdev
->cpu_mask
);
3435 /* counterpart of drbd_new_device.
3436 * last part of drbd_delete_device. */
3437 void drbd_free_mdev(struct drbd_conf
*mdev
)
3439 kfree(mdev
->current_epoch
);
3440 kfree(mdev
->app_reads_hash
);
3442 if (mdev
->bitmap
) /* should no longer be there. */
3443 drbd_bm_cleanup(mdev
);
3444 __free_page(mdev
->md_io_page
);
3445 put_disk(mdev
->vdisk
);
3446 blk_cleanup_queue(mdev
->rq_queue
);
3447 free_cpumask_var(mdev
->cpu_mask
);
3448 drbd_free_tl_hash(mdev
);
3453 int __init
drbd_init(void)
3457 if (sizeof(struct p_handshake
) != 80) {
3459 "drbd: never change the size or layout "
3460 "of the HandShake packet.\n");
3464 if (minor_count
< DRBD_MINOR_COUNT_MIN
|| minor_count
> DRBD_MINOR_COUNT_MAX
) {
3466 "drbd: invalid minor_count (%d)\n", minor_count
);
3474 err
= drbd_nl_init();
3478 err
= register_blkdev(DRBD_MAJOR
, "drbd");
3481 "drbd: unable to register block device major %d\n",
3486 register_reboot_notifier(&drbd_notifier
);
3489 * allocate all necessary structs
3493 init_waitqueue_head(&drbd_pp_wait
);
3495 drbd_proc
= NULL
; /* play safe for drbd_cleanup */
3496 minor_table
= kzalloc(sizeof(struct drbd_conf
*)*minor_count
,
3501 err
= drbd_create_mempools();
3505 drbd_proc
= proc_create_data("drbd", S_IFREG
| S_IRUGO
, NULL
, &drbd_proc_fops
, NULL
);
3507 printk(KERN_ERR
"drbd: unable to register proc file\n");
3511 rwlock_init(&global_state_lock
);
3513 printk(KERN_INFO
"drbd: initialized. "
3514 "Version: " REL_VERSION
" (api:%d/proto:%d-%d)\n",
3515 API_VERSION
, PRO_VERSION_MIN
, PRO_VERSION_MAX
);
3516 printk(KERN_INFO
"drbd: %s\n", drbd_buildtag());
3517 printk(KERN_INFO
"drbd: registered as block device major %d\n",
3519 printk(KERN_INFO
"drbd: minor_table @ 0x%p\n", minor_table
);
3521 return 0; /* Success! */
3526 /* currently always the case */
3527 printk(KERN_ERR
"drbd: ran out of memory\n");
3529 printk(KERN_ERR
"drbd: initialization failure\n");
3533 void drbd_free_bc(struct drbd_backing_dev
*ldev
)
3538 blkdev_put(ldev
->backing_bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
3539 blkdev_put(ldev
->md_bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
3544 void drbd_free_sock(struct drbd_conf
*mdev
)
3546 if (mdev
->data
.socket
) {
3547 mutex_lock(&mdev
->data
.mutex
);
3548 kernel_sock_shutdown(mdev
->data
.socket
, SHUT_RDWR
);
3549 sock_release(mdev
->data
.socket
);
3550 mdev
->data
.socket
= NULL
;
3551 mutex_unlock(&mdev
->data
.mutex
);
3553 if (mdev
->meta
.socket
) {
3554 mutex_lock(&mdev
->meta
.mutex
);
3555 kernel_sock_shutdown(mdev
->meta
.socket
, SHUT_RDWR
);
3556 sock_release(mdev
->meta
.socket
);
3557 mdev
->meta
.socket
= NULL
;
3558 mutex_unlock(&mdev
->meta
.mutex
);
3563 void drbd_free_resources(struct drbd_conf
*mdev
)
3565 crypto_free_hash(mdev
->csums_tfm
);
3566 mdev
->csums_tfm
= NULL
;
3567 crypto_free_hash(mdev
->verify_tfm
);
3568 mdev
->verify_tfm
= NULL
;
3569 crypto_free_hash(mdev
->cram_hmac_tfm
);
3570 mdev
->cram_hmac_tfm
= NULL
;
3571 crypto_free_hash(mdev
->integrity_w_tfm
);
3572 mdev
->integrity_w_tfm
= NULL
;
3573 crypto_free_hash(mdev
->integrity_r_tfm
);
3574 mdev
->integrity_r_tfm
= NULL
;
3576 drbd_free_sock(mdev
);
3579 drbd_free_bc(mdev
->ldev
);
3580 mdev
->ldev
= NULL
;);
3583 /* meta data management */
3585 struct meta_data_on_disk
{
3586 u64 la_size
; /* last agreed size. */
3587 u64 uuid
[UI_SIZE
]; /* UUIDs. */
3590 u32 flags
; /* MDF */
3593 u32 al_offset
; /* offset to this block */
3594 u32 al_nr_extents
; /* important for restoring the AL */
3595 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3596 u32 bm_offset
; /* offset to the bitmap, from here */
3597 u32 bm_bytes_per_bit
; /* BM_BLOCK_SIZE */
3598 u32 reserved_u32
[4];
3603 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3604 * @mdev: DRBD device.
3606 void drbd_md_sync(struct drbd_conf
*mdev
)
3608 struct meta_data_on_disk
*buffer
;
3612 del_timer(&mdev
->md_sync_timer
);
3613 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3614 if (!test_and_clear_bit(MD_DIRTY
, &mdev
->flags
))
3617 /* We use here D_FAILED and not D_ATTACHING because we try to write
3618 * metadata even if we detach due to a disk failure! */
3619 if (!get_ldev_if_state(mdev
, D_FAILED
))
3622 mutex_lock(&mdev
->md_io_mutex
);
3623 buffer
= (struct meta_data_on_disk
*)page_address(mdev
->md_io_page
);
3624 memset(buffer
, 0, 512);
3626 buffer
->la_size
= cpu_to_be64(drbd_get_capacity(mdev
->this_bdev
));
3627 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
3628 buffer
->uuid
[i
] = cpu_to_be64(mdev
->ldev
->md
.uuid
[i
]);
3629 buffer
->flags
= cpu_to_be32(mdev
->ldev
->md
.flags
);
3630 buffer
->magic
= cpu_to_be32(DRBD_MD_MAGIC
);
3632 buffer
->md_size_sect
= cpu_to_be32(mdev
->ldev
->md
.md_size_sect
);
3633 buffer
->al_offset
= cpu_to_be32(mdev
->ldev
->md
.al_offset
);
3634 buffer
->al_nr_extents
= cpu_to_be32(mdev
->act_log
->nr_elements
);
3635 buffer
->bm_bytes_per_bit
= cpu_to_be32(BM_BLOCK_SIZE
);
3636 buffer
->device_uuid
= cpu_to_be64(mdev
->ldev
->md
.device_uuid
);
3638 buffer
->bm_offset
= cpu_to_be32(mdev
->ldev
->md
.bm_offset
);
3640 D_ASSERT(drbd_md_ss__(mdev
, mdev
->ldev
) == mdev
->ldev
->md
.md_offset
);
3641 sector
= mdev
->ldev
->md
.md_offset
;
3643 if (!drbd_md_sync_page_io(mdev
, mdev
->ldev
, sector
, WRITE
)) {
3644 /* this was a try anyways ... */
3645 dev_err(DEV
, "meta data update failed!\n");
3646 drbd_chk_io_error(mdev
, 1, true);
3649 /* Update mdev->ldev->md.la_size_sect,
3650 * since we updated it on metadata. */
3651 mdev
->ldev
->md
.la_size_sect
= drbd_get_capacity(mdev
->this_bdev
);
3653 mutex_unlock(&mdev
->md_io_mutex
);
3658 * drbd_md_read() - Reads in the meta data super block
3659 * @mdev: DRBD device.
3660 * @bdev: Device from which the meta data should be read in.
3662 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
3663 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3665 int drbd_md_read(struct drbd_conf
*mdev
, struct drbd_backing_dev
*bdev
)
3667 struct meta_data_on_disk
*buffer
;
3668 int i
, rv
= NO_ERROR
;
3670 if (!get_ldev_if_state(mdev
, D_ATTACHING
))
3671 return ERR_IO_MD_DISK
;
3673 mutex_lock(&mdev
->md_io_mutex
);
3674 buffer
= (struct meta_data_on_disk
*)page_address(mdev
->md_io_page
);
3676 if (!drbd_md_sync_page_io(mdev
, bdev
, bdev
->md
.md_offset
, READ
)) {
3677 /* NOTE: cant do normal error processing here as this is
3678 called BEFORE disk is attached */
3679 dev_err(DEV
, "Error while reading metadata.\n");
3680 rv
= ERR_IO_MD_DISK
;
3684 if (be32_to_cpu(buffer
->magic
) != DRBD_MD_MAGIC
) {
3685 dev_err(DEV
, "Error while reading metadata, magic not found.\n");
3686 rv
= ERR_MD_INVALID
;
3689 if (be32_to_cpu(buffer
->al_offset
) != bdev
->md
.al_offset
) {
3690 dev_err(DEV
, "unexpected al_offset: %d (expected %d)\n",
3691 be32_to_cpu(buffer
->al_offset
), bdev
->md
.al_offset
);
3692 rv
= ERR_MD_INVALID
;
3695 if (be32_to_cpu(buffer
->bm_offset
) != bdev
->md
.bm_offset
) {
3696 dev_err(DEV
, "unexpected bm_offset: %d (expected %d)\n",
3697 be32_to_cpu(buffer
->bm_offset
), bdev
->md
.bm_offset
);
3698 rv
= ERR_MD_INVALID
;
3701 if (be32_to_cpu(buffer
->md_size_sect
) != bdev
->md
.md_size_sect
) {
3702 dev_err(DEV
, "unexpected md_size: %u (expected %u)\n",
3703 be32_to_cpu(buffer
->md_size_sect
), bdev
->md
.md_size_sect
);
3704 rv
= ERR_MD_INVALID
;
3708 if (be32_to_cpu(buffer
->bm_bytes_per_bit
) != BM_BLOCK_SIZE
) {
3709 dev_err(DEV
, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3710 be32_to_cpu(buffer
->bm_bytes_per_bit
), BM_BLOCK_SIZE
);
3711 rv
= ERR_MD_INVALID
;
3715 bdev
->md
.la_size_sect
= be64_to_cpu(buffer
->la_size
);
3716 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
3717 bdev
->md
.uuid
[i
] = be64_to_cpu(buffer
->uuid
[i
]);
3718 bdev
->md
.flags
= be32_to_cpu(buffer
->flags
);
3719 mdev
->sync_conf
.al_extents
= be32_to_cpu(buffer
->al_nr_extents
);
3720 bdev
->md
.device_uuid
= be64_to_cpu(buffer
->device_uuid
);
3722 if (mdev
->sync_conf
.al_extents
< 7)
3723 mdev
->sync_conf
.al_extents
= 127;
3726 mutex_unlock(&mdev
->md_io_mutex
);
3732 static void debug_drbd_uuid(struct drbd_conf
*mdev
, enum drbd_uuid_index index
)
3734 static char *uuid_str
[UI_EXTENDED_SIZE
] = {
3735 [UI_CURRENT
] = "CURRENT",
3736 [UI_BITMAP
] = "BITMAP",
3737 [UI_HISTORY_START
] = "HISTORY_START",
3738 [UI_HISTORY_END
] = "HISTORY_END",
3740 [UI_FLAGS
] = "FLAGS",
3743 if (index
>= UI_EXTENDED_SIZE
) {
3744 dev_warn(DEV
, " uuid_index >= EXTENDED_SIZE\n");
3748 dynamic_dev_dbg(DEV
, " uuid[%s] now %016llX\n",
3750 (unsigned long long)mdev
->ldev
->md
.uuid
[index
]);
3755 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3756 * @mdev: DRBD device.
3758 * Call this function if you change anything that should be written to
3759 * the meta-data super block. This function sets MD_DIRTY, and starts a
3760 * timer that ensures that within five seconds you have to call drbd_md_sync().
3763 void drbd_md_mark_dirty_(struct drbd_conf
*mdev
, unsigned int line
, const char *func
)
3765 if (!test_and_set_bit(MD_DIRTY
, &mdev
->flags
)) {
3766 mod_timer(&mdev
->md_sync_timer
, jiffies
+ HZ
);
3767 mdev
->last_md_mark_dirty
.line
= line
;
3768 mdev
->last_md_mark_dirty
.func
= func
;
3772 void drbd_md_mark_dirty(struct drbd_conf
*mdev
)
3774 if (!test_and_set_bit(MD_DIRTY
, &mdev
->flags
))
3775 mod_timer(&mdev
->md_sync_timer
, jiffies
+ 5*HZ
);
3779 static void drbd_uuid_move_history(struct drbd_conf
*mdev
) __must_hold(local
)
3783 for (i
= UI_HISTORY_START
; i
< UI_HISTORY_END
; i
++) {
3784 mdev
->ldev
->md
.uuid
[i
+1] = mdev
->ldev
->md
.uuid
[i
];
3785 debug_drbd_uuid(mdev
, i
+1);
3789 void _drbd_uuid_set(struct drbd_conf
*mdev
, int idx
, u64 val
) __must_hold(local
)
3791 if (idx
== UI_CURRENT
) {
3792 if (mdev
->state
.role
== R_PRIMARY
)
3797 drbd_set_ed_uuid(mdev
, val
);
3800 mdev
->ldev
->md
.uuid
[idx
] = val
;
3801 debug_drbd_uuid(mdev
, idx
);
3802 drbd_md_mark_dirty(mdev
);
3806 void drbd_uuid_set(struct drbd_conf
*mdev
, int idx
, u64 val
) __must_hold(local
)
3808 if (mdev
->ldev
->md
.uuid
[idx
]) {
3809 drbd_uuid_move_history(mdev
);
3810 mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] = mdev
->ldev
->md
.uuid
[idx
];
3811 debug_drbd_uuid(mdev
, UI_HISTORY_START
);
3813 _drbd_uuid_set(mdev
, idx
, val
);
3817 * drbd_uuid_new_current() - Creates a new current UUID
3818 * @mdev: DRBD device.
3820 * Creates a new current UUID, and rotates the old current UUID into
3821 * the bitmap slot. Causes an incremental resync upon next connect.
3823 void drbd_uuid_new_current(struct drbd_conf
*mdev
) __must_hold(local
)
3827 dev_info(DEV
, "Creating new current UUID\n");
3828 D_ASSERT(mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0);
3829 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = mdev
->ldev
->md
.uuid
[UI_CURRENT
];
3830 debug_drbd_uuid(mdev
, UI_BITMAP
);
3832 get_random_bytes(&val
, sizeof(u64
));
3833 _drbd_uuid_set(mdev
, UI_CURRENT
, val
);
3834 /* get it to stable storage _now_ */
3838 void drbd_uuid_set_bm(struct drbd_conf
*mdev
, u64 val
) __must_hold(local
)
3840 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && val
== 0)
3844 drbd_uuid_move_history(mdev
);
3845 mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] = mdev
->ldev
->md
.uuid
[UI_BITMAP
];
3846 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = 0;
3847 debug_drbd_uuid(mdev
, UI_HISTORY_START
);
3848 debug_drbd_uuid(mdev
, UI_BITMAP
);
3850 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
])
3851 dev_warn(DEV
, "bm UUID already set");
3853 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = val
;
3854 mdev
->ldev
->md
.uuid
[UI_BITMAP
] &= ~((u64
)1);
3856 debug_drbd_uuid(mdev
, UI_BITMAP
);
3858 drbd_md_mark_dirty(mdev
);
3862 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3863 * @mdev: DRBD device.
3865 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3867 int drbd_bmio_set_n_write(struct drbd_conf
*mdev
)
3871 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
3872 drbd_md_set_flag(mdev
, MDF_FULL_SYNC
);
3874 drbd_bm_set_all(mdev
);
3876 rv
= drbd_bm_write(mdev
);
3879 drbd_md_clear_flag(mdev
, MDF_FULL_SYNC
);
3890 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3891 * @mdev: DRBD device.
3893 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3895 int drbd_bmio_clear_n_write(struct drbd_conf
*mdev
)
3899 drbd_resume_al(mdev
);
3900 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
3901 drbd_bm_clear_all(mdev
);
3902 rv
= drbd_bm_write(mdev
);
3909 static int w_bitmap_io(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
3911 struct bm_io_work
*work
= container_of(w
, struct bm_io_work
, w
);
3914 D_ASSERT(atomic_read(&mdev
->ap_bio_cnt
) == 0);
3916 if (get_ldev(mdev
)) {
3917 drbd_bm_lock(mdev
, work
->why
);
3918 rv
= work
->io_fn(mdev
);
3919 drbd_bm_unlock(mdev
);
3923 clear_bit(BITMAP_IO
, &mdev
->flags
);
3924 smp_mb__after_clear_bit();
3925 wake_up(&mdev
->misc_wait
);
3928 work
->done(mdev
, rv
);
3930 clear_bit(BITMAP_IO_QUEUED
, &mdev
->flags
);
3936 void drbd_ldev_destroy(struct drbd_conf
*mdev
)
3938 lc_destroy(mdev
->resync
);
3939 mdev
->resync
= NULL
;
3940 lc_destroy(mdev
->act_log
);
3941 mdev
->act_log
= NULL
;
3943 drbd_free_bc(mdev
->ldev
);
3944 mdev
->ldev
= NULL
;);
3946 if (mdev
->md_io_tmpp
) {
3947 __free_page(mdev
->md_io_tmpp
);
3948 mdev
->md_io_tmpp
= NULL
;
3950 clear_bit(GO_DISKLESS
, &mdev
->flags
);
3953 static int w_go_diskless(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
3955 D_ASSERT(mdev
->state
.disk
== D_FAILED
);
3956 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3957 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3958 * the protected members anymore, though, so once put_ldev reaches zero
3959 * again, it will be safe to free them. */
3960 drbd_force_state(mdev
, NS(disk
, D_DISKLESS
));
3964 void drbd_go_diskless(struct drbd_conf
*mdev
)
3966 D_ASSERT(mdev
->state
.disk
== D_FAILED
);
3967 if (!test_and_set_bit(GO_DISKLESS
, &mdev
->flags
))
3968 drbd_queue_work(&mdev
->data
.work
, &mdev
->go_diskless
);
3972 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3973 * @mdev: DRBD device.
3974 * @io_fn: IO callback to be called when bitmap IO is possible
3975 * @done: callback to be called after the bitmap IO was performed
3976 * @why: Descriptive text of the reason for doing the IO
3978 * While IO on the bitmap happens we freeze application IO thus we ensure
3979 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3980 * called from worker context. It MUST NOT be used while a previous such
3981 * work is still pending!
3983 void drbd_queue_bitmap_io(struct drbd_conf
*mdev
,
3984 int (*io_fn
)(struct drbd_conf
*),
3985 void (*done
)(struct drbd_conf
*, int),
3988 D_ASSERT(current
== mdev
->worker
.task
);
3990 D_ASSERT(!test_bit(BITMAP_IO_QUEUED
, &mdev
->flags
));
3991 D_ASSERT(!test_bit(BITMAP_IO
, &mdev
->flags
));
3992 D_ASSERT(list_empty(&mdev
->bm_io_work
.w
.list
));
3993 if (mdev
->bm_io_work
.why
)
3994 dev_err(DEV
, "FIXME going to queue '%s' but '%s' still pending?\n",
3995 why
, mdev
->bm_io_work
.why
);
3997 mdev
->bm_io_work
.io_fn
= io_fn
;
3998 mdev
->bm_io_work
.done
= done
;
3999 mdev
->bm_io_work
.why
= why
;
4001 spin_lock_irq(&mdev
->req_lock
);
4002 set_bit(BITMAP_IO
, &mdev
->flags
);
4003 if (atomic_read(&mdev
->ap_bio_cnt
) == 0) {
4004 if (!test_and_set_bit(BITMAP_IO_QUEUED
, &mdev
->flags
))
4005 drbd_queue_work(&mdev
->data
.work
, &mdev
->bm_io_work
.w
);
4007 spin_unlock_irq(&mdev
->req_lock
);
4011 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
4012 * @mdev: DRBD device.
4013 * @io_fn: IO callback to be called when bitmap IO is possible
4014 * @why: Descriptive text of the reason for doing the IO
4016 * freezes application IO while that the actual IO operations runs. This
4017 * functions MAY NOT be called from worker context.
4019 int drbd_bitmap_io(struct drbd_conf
*mdev
, int (*io_fn
)(struct drbd_conf
*), char *why
)
4023 D_ASSERT(current
!= mdev
->worker
.task
);
4025 drbd_suspend_io(mdev
);
4027 drbd_bm_lock(mdev
, why
);
4029 drbd_bm_unlock(mdev
);
4031 drbd_resume_io(mdev
);
4036 void drbd_md_set_flag(struct drbd_conf
*mdev
, int flag
) __must_hold(local
)
4038 if ((mdev
->ldev
->md
.flags
& flag
) != flag
) {
4039 drbd_md_mark_dirty(mdev
);
4040 mdev
->ldev
->md
.flags
|= flag
;
4044 void drbd_md_clear_flag(struct drbd_conf
*mdev
, int flag
) __must_hold(local
)
4046 if ((mdev
->ldev
->md
.flags
& flag
) != 0) {
4047 drbd_md_mark_dirty(mdev
);
4048 mdev
->ldev
->md
.flags
&= ~flag
;
4051 int drbd_md_test_flag(struct drbd_backing_dev
*bdev
, int flag
)
4053 return (bdev
->md
.flags
& flag
) != 0;
4056 static void md_sync_timer_fn(unsigned long data
)
4058 struct drbd_conf
*mdev
= (struct drbd_conf
*) data
;
4060 drbd_queue_work_front(&mdev
->data
.work
, &mdev
->md_sync_work
);
4063 static int w_md_sync(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
4065 dev_warn(DEV
, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
4067 dev_warn(DEV
, "last md_mark_dirty: %s:%u\n",
4068 mdev
->last_md_mark_dirty
.func
, mdev
->last_md_mark_dirty
.line
);
4074 #ifdef CONFIG_DRBD_FAULT_INJECTION
4075 /* Fault insertion support including random number generator shamelessly
4076 * stolen from kernel/rcutorture.c */
4077 struct fault_random_state
{
4078 unsigned long state
;
4079 unsigned long count
;
4082 #define FAULT_RANDOM_MULT 39916801 /* prime */
4083 #define FAULT_RANDOM_ADD 479001701 /* prime */
4084 #define FAULT_RANDOM_REFRESH 10000
4087 * Crude but fast random-number generator. Uses a linear congruential
4088 * generator, with occasional help from get_random_bytes().
4090 static unsigned long
4091 _drbd_fault_random(struct fault_random_state
*rsp
)
4095 if (!rsp
->count
--) {
4096 get_random_bytes(&refresh
, sizeof(refresh
));
4097 rsp
->state
+= refresh
;
4098 rsp
->count
= FAULT_RANDOM_REFRESH
;
4100 rsp
->state
= rsp
->state
* FAULT_RANDOM_MULT
+ FAULT_RANDOM_ADD
;
4101 return swahw32(rsp
->state
);
4105 _drbd_fault_str(unsigned int type
) {
4106 static char *_faults
[] = {
4107 [DRBD_FAULT_MD_WR
] = "Meta-data write",
4108 [DRBD_FAULT_MD_RD
] = "Meta-data read",
4109 [DRBD_FAULT_RS_WR
] = "Resync write",
4110 [DRBD_FAULT_RS_RD
] = "Resync read",
4111 [DRBD_FAULT_DT_WR
] = "Data write",
4112 [DRBD_FAULT_DT_RD
] = "Data read",
4113 [DRBD_FAULT_DT_RA
] = "Data read ahead",
4114 [DRBD_FAULT_BM_ALLOC
] = "BM allocation",
4115 [DRBD_FAULT_AL_EE
] = "EE allocation",
4116 [DRBD_FAULT_RECEIVE
] = "receive data corruption",
4119 return (type
< DRBD_FAULT_MAX
) ? _faults
[type
] : "**Unknown**";
4123 _drbd_insert_fault(struct drbd_conf
*mdev
, unsigned int type
)
4125 static struct fault_random_state rrs
= {0, 0};
4127 unsigned int ret
= (
4129 ((1 << mdev_to_minor(mdev
)) & fault_devs
) != 0) &&
4130 (((_drbd_fault_random(&rrs
) % 100) + 1) <= fault_rate
));
4135 if (__ratelimit(&drbd_ratelimit_state
))
4136 dev_warn(DEV
, "***Simulating %s failure\n",
4137 _drbd_fault_str(type
));
4144 const char *drbd_buildtag(void)
4146 /* DRBD built from external sources has here a reference to the
4147 git hash of the source code. */
4149 static char buildtag
[38] = "\0uilt-in";
4151 if (buildtag
[0] == 0) {
4152 #ifdef CONFIG_MODULES
4153 if (THIS_MODULE
!= NULL
)
4154 sprintf(buildtag
, "srcversion: %-24s", THIS_MODULE
->srcversion
);
4163 module_init(drbd_init
)
4164 module_exit(drbd_cleanup
)
4166 EXPORT_SYMBOL(drbd_conn_str
);
4167 EXPORT_SYMBOL(drbd_role_str
);
4168 EXPORT_SYMBOL(drbd_disk_str
);
4169 EXPORT_SYMBOL(drbd_set_st_err_str
);