drbd: queue bitmap writeout more intelligently
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / block / drbd / drbd_main.c
CommitLineData
b411b363
PR
1/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
b411b363 29#include <linux/module.h>
b411b363
PR
30#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
2a48fc0a 35#include <linux/mutex.h>
b411b363
PR
36#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
b411b363
PR
55#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
59struct after_state_chg_work {
60 struct drbd_work w;
61 union drbd_state os;
62 union drbd_state ns;
63 enum chg_state_flags flags;
64 struct completion *done;
65};
66
2a48fc0a 67static DEFINE_MUTEX(drbd_main_mutex);
b411b363
PR
68int drbdd_init(struct drbd_thread *);
69int drbd_worker(struct drbd_thread *);
70int drbd_asender(struct drbd_thread *);
71
72int drbd_init(void);
73static int drbd_open(struct block_device *bdev, fmode_t mode);
74static int drbd_release(struct gendisk *gd, fmode_t mode);
75static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77 union drbd_state ns, enum chg_state_flags flags);
78static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79static void md_sync_timer_fn(unsigned long data);
80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
e9e6f3ec 81static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
b411b363 82
b411b363
PR
83MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>");
85MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86MODULE_VERSION(REL_VERSION);
87MODULE_LICENSE("GPL");
2b8a90b5
PR
88MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
89 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
b411b363
PR
90MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
91
92#include <linux/moduleparam.h>
93/* allow_open_on_secondary */
94MODULE_PARM_DESC(allow_oos, "DONT USE!");
95/* thanks to these macros, if compiled into the kernel (not-module),
96 * this becomes the boot parameter drbd.minor_count */
97module_param(minor_count, uint, 0444);
98module_param(disable_sendpage, bool, 0644);
99module_param(allow_oos, bool, 0);
100module_param(cn_idx, uint, 0444);
101module_param(proc_details, int, 0644);
102
103#ifdef CONFIG_DRBD_FAULT_INJECTION
104int enable_faults;
105int fault_rate;
106static int fault_count;
107int fault_devs;
108/* bitmap of enabled faults */
109module_param(enable_faults, int, 0664);
110/* fault rate % value - applies to all enabled faults */
111module_param(fault_rate, int, 0664);
112/* count of faults inserted */
113module_param(fault_count, int, 0664);
114/* bitmap of devices to insert faults on */
115module_param(fault_devs, int, 0644);
116#endif
117
118/* module parameter, defined */
2b8a90b5 119unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
b411b363
PR
120int disable_sendpage;
121int allow_oos;
122unsigned int cn_idx = CN_IDX_DRBD;
123int proc_details; /* Detail level in proc drbd*/
124
125/* Module parameter for setting the user mode helper program
126 * to run. Default is /sbin/drbdadm */
127char usermode_helper[80] = "/sbin/drbdadm";
128
129module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
130
131/* in 2.6.x, our device mapping and config info contains our virtual gendisks
132 * as member "struct gendisk *vdisk;"
133 */
134struct drbd_conf **minor_table;
135
136struct kmem_cache *drbd_request_cache;
137struct kmem_cache *drbd_ee_cache; /* epoch entries */
138struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
139struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
140mempool_t *drbd_request_mempool;
141mempool_t *drbd_ee_mempool;
142
143/* I do not use a standard mempool, because:
144 1) I want to hand out the pre-allocated objects first.
145 2) I want to be able to interrupt sleeping allocation with a signal.
146 Note: This is a single linked list, the next pointer is the private
147 member of struct page.
148 */
149struct page *drbd_pp_pool;
150spinlock_t drbd_pp_lock;
151int drbd_pp_vacant;
152wait_queue_head_t drbd_pp_wait;
153
154DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
155
7d4e9d09 156static const struct block_device_operations drbd_ops = {
b411b363
PR
157 .owner = THIS_MODULE,
158 .open = drbd_open,
159 .release = drbd_release,
160};
161
162#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
163
164#ifdef __CHECKER__
165/* When checking with sparse, and this is an inline function, sparse will
166 give tons of false positives. When this is a real functions sparse works.
167 */
168int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169{
170 int io_allowed;
171
172 atomic_inc(&mdev->local_cnt);
173 io_allowed = (mdev->state.disk >= mins);
174 if (!io_allowed) {
175 if (atomic_dec_and_test(&mdev->local_cnt))
176 wake_up(&mdev->misc_wait);
177 }
178 return io_allowed;
179}
180
181#endif
182
183/**
184 * DOC: The transfer log
185 *
186 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188 * of the list. There is always at least one &struct drbd_tl_epoch object.
189 *
190 * Each &struct drbd_tl_epoch has a circular double linked list of requests
191 * attached.
192 */
193static int tl_init(struct drbd_conf *mdev)
194{
195 struct drbd_tl_epoch *b;
196
197 /* during device minor initialization, we may well use GFP_KERNEL */
198 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
199 if (!b)
200 return 0;
201 INIT_LIST_HEAD(&b->requests);
202 INIT_LIST_HEAD(&b->w.list);
203 b->next = NULL;
204 b->br_number = 4711;
7e602c0a 205 b->n_writes = 0;
b411b363
PR
206 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207
208 mdev->oldest_tle = b;
209 mdev->newest_tle = b;
210 INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
211
212 mdev->tl_hash = NULL;
213 mdev->tl_hash_s = 0;
214
215 return 1;
216}
217
218static void tl_cleanup(struct drbd_conf *mdev)
219{
220 D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
221 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
222 kfree(mdev->oldest_tle);
223 mdev->oldest_tle = NULL;
224 kfree(mdev->unused_spare_tle);
225 mdev->unused_spare_tle = NULL;
226 kfree(mdev->tl_hash);
227 mdev->tl_hash = NULL;
228 mdev->tl_hash_s = 0;
229}
230
231/**
232 * _tl_add_barrier() - Adds a barrier to the transfer log
233 * @mdev: DRBD device.
234 * @new: Barrier to be added before the current head of the TL.
235 *
236 * The caller must hold the req_lock.
237 */
238void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
239{
240 struct drbd_tl_epoch *newest_before;
241
242 INIT_LIST_HEAD(&new->requests);
243 INIT_LIST_HEAD(&new->w.list);
244 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
245 new->next = NULL;
7e602c0a 246 new->n_writes = 0;
b411b363
PR
247
248 newest_before = mdev->newest_tle;
249 /* never send a barrier number == 0, because that is special-cased
250 * when using TCQ for our write ordering code */
251 new->br_number = (newest_before->br_number+1) ?: 1;
252 if (mdev->newest_tle != new) {
253 mdev->newest_tle->next = new;
254 mdev->newest_tle = new;
255 }
256}
257
258/**
259 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
260 * @mdev: DRBD device.
261 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
262 * @set_size: Expected number of requests before that barrier.
263 *
264 * In case the passed barrier_nr or set_size does not match the oldest
265 * &struct drbd_tl_epoch objects this function will cause a termination
266 * of the connection.
267 */
268void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
269 unsigned int set_size)
270{
271 struct drbd_tl_epoch *b, *nob; /* next old barrier */
272 struct list_head *le, *tle;
273 struct drbd_request *r;
274
275 spin_lock_irq(&mdev->req_lock);
276
277 b = mdev->oldest_tle;
278
279 /* first some paranoia code */
280 if (b == NULL) {
281 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
282 barrier_nr);
283 goto bail;
284 }
285 if (b->br_number != barrier_nr) {
286 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
287 barrier_nr, b->br_number);
288 goto bail;
289 }
7e602c0a
PR
290 if (b->n_writes != set_size) {
291 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
292 barrier_nr, set_size, b->n_writes);
b411b363
PR
293 goto bail;
294 }
295
296 /* Clean up list of requests processed during current epoch */
297 list_for_each_safe(le, tle, &b->requests) {
298 r = list_entry(le, struct drbd_request, tl_requests);
299 _req_mod(r, barrier_acked);
300 }
301 /* There could be requests on the list waiting for completion
302 of the write to the local disk. To avoid corruptions of
303 slab's data structures we have to remove the lists head.
304
305 Also there could have been a barrier ack out of sequence, overtaking
306 the write acks - which would be a bug and violating write ordering.
307 To not deadlock in case we lose connection while such requests are
308 still pending, we need some way to find them for the
309 _req_mode(connection_lost_while_pending).
310
311 These have been list_move'd to the out_of_sequence_requests list in
312 _req_mod(, barrier_acked) above.
313 */
314 list_del_init(&b->requests);
315
316 nob = b->next;
317 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
318 _tl_add_barrier(mdev, b);
319 if (nob)
320 mdev->oldest_tle = nob;
321 /* if nob == NULL b was the only barrier, and becomes the new
322 barrier. Therefore mdev->oldest_tle points already to b */
323 } else {
324 D_ASSERT(nob != NULL);
325 mdev->oldest_tle = nob;
326 kfree(b);
327 }
328
329 spin_unlock_irq(&mdev->req_lock);
330 dec_ap_pending(mdev);
331
332 return;
333
334bail:
335 spin_unlock_irq(&mdev->req_lock);
336 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
337}
338
617049aa
PR
339
340/* In C_AHEAD mode only out_of_sync packets are sent for requests. Detach
341 * those requests from the newsest barrier when changing to an other cstate.
342 *
343 * That headless list vanishes when the last request finished its write or
344 * send out_of_sync packet. */
345static void tl_forget(struct drbd_conf *mdev)
346{
347 struct drbd_tl_epoch *b;
348
349 if (test_bit(CREATE_BARRIER, &mdev->flags))
350 return;
351
352 b = mdev->newest_tle;
353 list_del(&b->requests);
354 _tl_add_barrier(mdev, b);
355}
356
b411b363 357/**
11b58e73 358 * _tl_restart() - Walks the transfer log, and applies an action to all requests
b411b363 359 * @mdev: DRBD device.
11b58e73 360 * @what: The action/event to perform with all request objects
b411b363 361 *
11b58e73
PR
362 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
363 * restart_frozen_disk_io.
b411b363 364 */
11b58e73 365static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
b411b363 366{
11b58e73 367 struct drbd_tl_epoch *b, *tmp, **pn;
b9b98716 368 struct list_head *le, *tle, carry_reads;
11b58e73
PR
369 struct drbd_request *req;
370 int rv, n_writes, n_reads;
b411b363
PR
371
372 b = mdev->oldest_tle;
11b58e73 373 pn = &mdev->oldest_tle;
b411b363 374 while (b) {
11b58e73
PR
375 n_writes = 0;
376 n_reads = 0;
b9b98716 377 INIT_LIST_HEAD(&carry_reads);
b411b363 378 list_for_each_safe(le, tle, &b->requests) {
11b58e73
PR
379 req = list_entry(le, struct drbd_request, tl_requests);
380 rv = _req_mod(req, what);
381
382 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
383 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
b411b363
PR
384 }
385 tmp = b->next;
386
b9b98716 387 if (n_writes) {
11b58e73
PR
388 if (what == resend) {
389 b->n_writes = n_writes;
390 if (b->w.cb == NULL) {
391 b->w.cb = w_send_barrier;
392 inc_ap_pending(mdev);
393 set_bit(CREATE_BARRIER, &mdev->flags);
394 }
395
396 drbd_queue_work(&mdev->data.work, &b->w);
397 }
398 pn = &b->next;
399 } else {
b9b98716
PR
400 if (n_reads)
401 list_add(&carry_reads, &b->requests);
11b58e73
PR
402 /* there could still be requests on that ring list,
403 * in case local io is still pending */
404 list_del(&b->requests);
405
406 /* dec_ap_pending corresponding to queue_barrier.
407 * the newest barrier may not have been queued yet,
408 * in which case w.cb is still NULL. */
409 if (b->w.cb != NULL)
410 dec_ap_pending(mdev);
411
412 if (b == mdev->newest_tle) {
413 /* recycle, but reinit! */
414 D_ASSERT(tmp == NULL);
415 INIT_LIST_HEAD(&b->requests);
b9b98716 416 list_splice(&carry_reads, &b->requests);
11b58e73
PR
417 INIT_LIST_HEAD(&b->w.list);
418 b->w.cb = NULL;
419 b->br_number = net_random();
420 b->n_writes = 0;
421
422 *pn = b;
423 break;
424 }
425 *pn = tmp;
426 kfree(b);
b411b363 427 }
b411b363 428 b = tmp;
b9b98716 429 list_splice(&carry_reads, &b->requests);
b411b363 430 }
11b58e73
PR
431}
432
b411b363
PR
433
434/**
435 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
436 * @mdev: DRBD device.
437 *
438 * This is called after the connection to the peer was lost. The storage covered
439 * by the requests on the transfer gets marked as our of sync. Called from the
440 * receiver thread and the worker thread.
441 */
442void tl_clear(struct drbd_conf *mdev)
443{
b411b363
PR
444 struct list_head *le, *tle;
445 struct drbd_request *r;
b411b363
PR
446
447 spin_lock_irq(&mdev->req_lock);
448
11b58e73 449 _tl_restart(mdev, connection_lost_while_pending);
b411b363
PR
450
451 /* we expect this list to be empty. */
452 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
453
454 /* but just in case, clean it up anyways! */
455 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
456 r = list_entry(le, struct drbd_request, tl_requests);
457 /* It would be nice to complete outside of spinlock.
458 * But this is easier for now. */
459 _req_mod(r, connection_lost_while_pending);
460 }
461
462 /* ensure bit indicating barrier is required is clear */
463 clear_bit(CREATE_BARRIER, &mdev->flags);
464
288f422e
PR
465 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
466
b411b363
PR
467 spin_unlock_irq(&mdev->req_lock);
468}
469
11b58e73
PR
470void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
471{
472 spin_lock_irq(&mdev->req_lock);
473 _tl_restart(mdev, what);
b411b363
PR
474 spin_unlock_irq(&mdev->req_lock);
475}
476
477/**
81e84650 478 * cl_wide_st_chg() - true if the state change is a cluster wide one
b411b363
PR
479 * @mdev: DRBD device.
480 * @os: old (current) state.
481 * @ns: new (wanted) state.
482 */
483static int cl_wide_st_chg(struct drbd_conf *mdev,
484 union drbd_state os, union drbd_state ns)
485{
486 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
487 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
488 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
489 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
490 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
491 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
492 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
493}
494
bf885f8a
AG
495enum drbd_state_rv
496drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
497 union drbd_state mask, union drbd_state val)
b411b363
PR
498{
499 unsigned long flags;
500 union drbd_state os, ns;
bf885f8a 501 enum drbd_state_rv rv;
b411b363
PR
502
503 spin_lock_irqsave(&mdev->req_lock, flags);
504 os = mdev->state;
505 ns.i = (os.i & ~mask.i) | val.i;
506 rv = _drbd_set_state(mdev, ns, f, NULL);
507 ns = mdev->state;
508 spin_unlock_irqrestore(&mdev->req_lock, flags);
509
510 return rv;
511}
512
513/**
514 * drbd_force_state() - Impose a change which happens outside our control on our state
515 * @mdev: DRBD device.
516 * @mask: mask of state bits to change.
517 * @val: value of new state bits.
518 */
519void drbd_force_state(struct drbd_conf *mdev,
520 union drbd_state mask, union drbd_state val)
521{
522 drbd_change_state(mdev, CS_HARD, mask, val);
523}
524
bf885f8a
AG
525static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
526static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
527 union drbd_state,
528 union drbd_state);
b411b363 529static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
02bc7174 530 union drbd_state ns, const char **warn_sync_abort);
b411b363
PR
531int drbd_send_state_req(struct drbd_conf *,
532 union drbd_state, union drbd_state);
533
c8b32563
AG
534static enum drbd_state_rv
535_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
536 union drbd_state val)
b411b363
PR
537{
538 union drbd_state os, ns;
539 unsigned long flags;
bf885f8a 540 enum drbd_state_rv rv;
b411b363
PR
541
542 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
543 return SS_CW_SUCCESS;
544
545 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
546 return SS_CW_FAILED_BY_PEER;
547
548 rv = 0;
549 spin_lock_irqsave(&mdev->req_lock, flags);
550 os = mdev->state;
551 ns.i = (os.i & ~mask.i) | val.i;
552 ns = sanitize_state(mdev, os, ns, NULL);
553
554 if (!cl_wide_st_chg(mdev, os, ns))
555 rv = SS_CW_NO_NEED;
556 if (!rv) {
557 rv = is_valid_state(mdev, ns);
558 if (rv == SS_SUCCESS) {
559 rv = is_valid_state_transition(mdev, ns, os);
560 if (rv == SS_SUCCESS)
bf885f8a 561 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
b411b363
PR
562 }
563 }
564 spin_unlock_irqrestore(&mdev->req_lock, flags);
565
566 return rv;
567}
568
569/**
570 * drbd_req_state() - Perform an eventually cluster wide state change
571 * @mdev: DRBD device.
572 * @mask: mask of state bits to change.
573 * @val: value of new state bits.
574 * @f: flags
575 *
576 * Should not be called directly, use drbd_request_state() or
577 * _drbd_request_state().
578 */
bf885f8a
AG
579static enum drbd_state_rv
580drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
581 union drbd_state val, enum chg_state_flags f)
b411b363
PR
582{
583 struct completion done;
584 unsigned long flags;
585 union drbd_state os, ns;
bf885f8a 586 enum drbd_state_rv rv;
b411b363
PR
587
588 init_completion(&done);
589
590 if (f & CS_SERIALIZE)
591 mutex_lock(&mdev->state_mutex);
592
593 spin_lock_irqsave(&mdev->req_lock, flags);
594 os = mdev->state;
595 ns.i = (os.i & ~mask.i) | val.i;
596 ns = sanitize_state(mdev, os, ns, NULL);
597
598 if (cl_wide_st_chg(mdev, os, ns)) {
599 rv = is_valid_state(mdev, ns);
600 if (rv == SS_SUCCESS)
601 rv = is_valid_state_transition(mdev, ns, os);
602 spin_unlock_irqrestore(&mdev->req_lock, flags);
603
604 if (rv < SS_SUCCESS) {
605 if (f & CS_VERBOSE)
606 print_st_err(mdev, os, ns, rv);
607 goto abort;
608 }
609
610 drbd_state_lock(mdev);
611 if (!drbd_send_state_req(mdev, mask, val)) {
612 drbd_state_unlock(mdev);
613 rv = SS_CW_FAILED_BY_PEER;
614 if (f & CS_VERBOSE)
615 print_st_err(mdev, os, ns, rv);
616 goto abort;
617 }
618
619 wait_event(mdev->state_wait,
620 (rv = _req_st_cond(mdev, mask, val)));
621
622 if (rv < SS_SUCCESS) {
623 drbd_state_unlock(mdev);
624 if (f & CS_VERBOSE)
625 print_st_err(mdev, os, ns, rv);
626 goto abort;
627 }
628 spin_lock_irqsave(&mdev->req_lock, flags);
629 os = mdev->state;
630 ns.i = (os.i & ~mask.i) | val.i;
631 rv = _drbd_set_state(mdev, ns, f, &done);
632 drbd_state_unlock(mdev);
633 } else {
634 rv = _drbd_set_state(mdev, ns, f, &done);
635 }
636
637 spin_unlock_irqrestore(&mdev->req_lock, flags);
638
639 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
640 D_ASSERT(current != mdev->worker.task);
641 wait_for_completion(&done);
642 }
643
644abort:
645 if (f & CS_SERIALIZE)
646 mutex_unlock(&mdev->state_mutex);
647
648 return rv;
649}
650
651/**
652 * _drbd_request_state() - Request a state change (with flags)
653 * @mdev: DRBD device.
654 * @mask: mask of state bits to change.
655 * @val: value of new state bits.
656 * @f: flags
657 *
658 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
659 * flag, or when logging of failed state change requests is not desired.
660 */
bf885f8a
AG
661enum drbd_state_rv
662_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
663 union drbd_state val, enum chg_state_flags f)
b411b363 664{
bf885f8a 665 enum drbd_state_rv rv;
b411b363
PR
666
667 wait_event(mdev->state_wait,
668 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
669
670 return rv;
671}
672
673static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
674{
675 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
676 name,
677 drbd_conn_str(ns.conn),
678 drbd_role_str(ns.role),
679 drbd_role_str(ns.peer),
680 drbd_disk_str(ns.disk),
681 drbd_disk_str(ns.pdsk),
fb22c402 682 is_susp(ns) ? 's' : 'r',
b411b363
PR
683 ns.aftr_isp ? 'a' : '-',
684 ns.peer_isp ? 'p' : '-',
685 ns.user_isp ? 'u' : '-'
686 );
687}
688
bf885f8a
AG
689void print_st_err(struct drbd_conf *mdev, union drbd_state os,
690 union drbd_state ns, enum drbd_state_rv err)
b411b363
PR
691{
692 if (err == SS_IN_TRANSIENT_STATE)
693 return;
694 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
695 print_st(mdev, " state", os);
696 print_st(mdev, "wanted", ns);
697}
698
699
b411b363
PR
700/**
701 * is_valid_state() - Returns an SS_ error code if ns is not valid
702 * @mdev: DRBD device.
703 * @ns: State to consider.
704 */
bf885f8a
AG
705static enum drbd_state_rv
706is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
b411b363
PR
707{
708 /* See drbd_state_sw_errors in drbd_strings.c */
709
710 enum drbd_fencing_p fp;
bf885f8a 711 enum drbd_state_rv rv = SS_SUCCESS;
b411b363
PR
712
713 fp = FP_DONT_CARE;
714 if (get_ldev(mdev)) {
715 fp = mdev->ldev->dc.fencing;
716 put_ldev(mdev);
717 }
718
719 if (get_net_conf(mdev)) {
720 if (!mdev->net_conf->two_primaries &&
721 ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
722 rv = SS_TWO_PRIMARIES;
723 put_net_conf(mdev);
724 }
725
726 if (rv <= 0)
727 /* already found a reason to abort */;
728 else if (ns.role == R_SECONDARY && mdev->open_cnt)
729 rv = SS_DEVICE_IN_USE;
730
731 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
732 rv = SS_NO_UP_TO_DATE_DISK;
733
734 else if (fp >= FP_RESOURCE &&
735 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
736 rv = SS_PRIMARY_NOP;
737
738 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
739 rv = SS_NO_UP_TO_DATE_DISK;
740
741 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
742 rv = SS_NO_LOCAL_DISK;
743
744 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
745 rv = SS_NO_REMOTE_DISK;
746
8d4ce82b
LE
747 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
748 rv = SS_NO_UP_TO_DATE_DISK;
749
b411b363
PR
750 else if ((ns.conn == C_CONNECTED ||
751 ns.conn == C_WF_BITMAP_S ||
752 ns.conn == C_SYNC_SOURCE ||
753 ns.conn == C_PAUSED_SYNC_S) &&
754 ns.disk == D_OUTDATED)
755 rv = SS_CONNECTED_OUTDATES;
756
757 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
758 (mdev->sync_conf.verify_alg[0] == 0))
759 rv = SS_NO_VERIFY_ALG;
760
761 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
762 mdev->agreed_pro_version < 88)
763 rv = SS_NOT_SUPPORTED;
764
765 return rv;
766}
767
768/**
769 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
770 * @mdev: DRBD device.
771 * @ns: new state.
772 * @os: old state.
773 */
bf885f8a
AG
774static enum drbd_state_rv
775is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
776 union drbd_state os)
b411b363 777{
bf885f8a 778 enum drbd_state_rv rv = SS_SUCCESS;
b411b363
PR
779
780 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
781 os.conn > C_CONNECTED)
782 rv = SS_RESYNC_RUNNING;
783
784 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
785 rv = SS_ALREADY_STANDALONE;
786
787 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
788 rv = SS_IS_DISKLESS;
789
790 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
791 rv = SS_NO_NET_CONFIG;
792
793 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
794 rv = SS_LOWER_THAN_OUTDATED;
795
796 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
797 rv = SS_IN_TRANSIENT_STATE;
798
799 if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
800 rv = SS_IN_TRANSIENT_STATE;
801
802 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
803 rv = SS_NEED_CONNECTION;
804
805 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
806 ns.conn != os.conn && os.conn > C_CONNECTED)
807 rv = SS_RESYNC_RUNNING;
808
809 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
810 os.conn < C_CONNECTED)
811 rv = SS_NEED_CONNECTION;
812
1fc80cf3
PR
813 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
814 && os.conn < C_WF_REPORT_PARAMS)
815 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
816
b411b363
PR
817 return rv;
818}
819
820/**
821 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
822 * @mdev: DRBD device.
823 * @os: old state.
824 * @ns: new state.
825 * @warn_sync_abort:
826 *
827 * When we loose connection, we have to set the state of the peers disk (pdsk)
828 * to D_UNKNOWN. This rule and many more along those lines are in this function.
829 */
830static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
02bc7174 831 union drbd_state ns, const char **warn_sync_abort)
b411b363
PR
832{
833 enum drbd_fencing_p fp;
ab17b68f 834 enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
b411b363
PR
835
836 fp = FP_DONT_CARE;
837 if (get_ldev(mdev)) {
838 fp = mdev->ldev->dc.fencing;
839 put_ldev(mdev);
840 }
841
842 /* Disallow Network errors to configure a device's network part */
843 if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
844 os.conn <= C_DISCONNECTING)
845 ns.conn = os.conn;
846
f2906e18
LE
847 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
848 * If you try to go into some Sync* state, that shall fail (elsewhere). */
b411b363 849 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
f2906e18 850 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
b411b363
PR
851 ns.conn = os.conn;
852
82f59cc6
LE
853 /* we cannot fail (again) if we already detached */
854 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
855 ns.disk = D_DISKLESS;
856
857 /* if we are only D_ATTACHING yet,
858 * we can (and should) go directly to D_DISKLESS. */
859 if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
860 ns.disk = D_DISKLESS;
861
b411b363
PR
862 /* After C_DISCONNECTING only C_STANDALONE may follow */
863 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
864 ns.conn = os.conn;
865
866 if (ns.conn < C_CONNECTED) {
867 ns.peer_isp = 0;
868 ns.peer = R_UNKNOWN;
869 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
870 ns.pdsk = D_UNKNOWN;
871 }
872
873 /* Clear the aftr_isp when becoming unconfigured */
874 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
875 ns.aftr_isp = 0;
876
b411b363
PR
877 /* Abort resync if a disk fails/detaches */
878 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
879 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
880 if (warn_sync_abort)
02bc7174
LE
881 *warn_sync_abort =
882 os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
883 "Online-verify" : "Resync";
b411b363
PR
884 ns.conn = C_CONNECTED;
885 }
886
b411b363
PR
887 /* Connection breaks down before we finished "Negotiating" */
888 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
889 get_ldev_if_state(mdev, D_NEGOTIATING)) {
890 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
891 ns.disk = mdev->new_state_tmp.disk;
892 ns.pdsk = mdev->new_state_tmp.pdsk;
893 } else {
894 dev_alert(DEV, "Connection lost while negotiating, no data!\n");
895 ns.disk = D_DISKLESS;
896 ns.pdsk = D_UNKNOWN;
897 }
898 put_ldev(mdev);
899 }
900
ab17b68f
PR
901 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
902 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
903 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
904 ns.disk = D_UP_TO_DATE;
905 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
906 ns.pdsk = D_UP_TO_DATE;
907 }
908
909 /* Implications of the connection stat on the disk states */
910 disk_min = D_DISKLESS;
911 disk_max = D_UP_TO_DATE;
912 pdsk_min = D_INCONSISTENT;
913 pdsk_max = D_UNKNOWN;
914 switch ((enum drbd_conns)ns.conn) {
915 case C_WF_BITMAP_T:
916 case C_PAUSED_SYNC_T:
917 case C_STARTING_SYNC_T:
918 case C_WF_SYNC_UUID:
919 case C_BEHIND:
920 disk_min = D_INCONSISTENT;
921 disk_max = D_OUTDATED;
922 pdsk_min = D_UP_TO_DATE;
923 pdsk_max = D_UP_TO_DATE;
924 break;
925 case C_VERIFY_S:
926 case C_VERIFY_T:
927 disk_min = D_UP_TO_DATE;
928 disk_max = D_UP_TO_DATE;
929 pdsk_min = D_UP_TO_DATE;
930 pdsk_max = D_UP_TO_DATE;
931 break;
932 case C_CONNECTED:
933 disk_min = D_DISKLESS;
934 disk_max = D_UP_TO_DATE;
935 pdsk_min = D_DISKLESS;
936 pdsk_max = D_UP_TO_DATE;
937 break;
938 case C_WF_BITMAP_S:
939 case C_PAUSED_SYNC_S:
940 case C_STARTING_SYNC_S:
941 case C_AHEAD:
942 disk_min = D_UP_TO_DATE;
943 disk_max = D_UP_TO_DATE;
944 pdsk_min = D_INCONSISTENT;
945 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
946 break;
947 case C_SYNC_TARGET:
948 disk_min = D_INCONSISTENT;
949 disk_max = D_INCONSISTENT;
950 pdsk_min = D_UP_TO_DATE;
951 pdsk_max = D_UP_TO_DATE;
952 break;
953 case C_SYNC_SOURCE:
954 disk_min = D_UP_TO_DATE;
955 disk_max = D_UP_TO_DATE;
956 pdsk_min = D_INCONSISTENT;
957 pdsk_max = D_INCONSISTENT;
958 break;
959 case C_STANDALONE:
960 case C_DISCONNECTING:
961 case C_UNCONNECTED:
962 case C_TIMEOUT:
963 case C_BROKEN_PIPE:
964 case C_NETWORK_FAILURE:
965 case C_PROTOCOL_ERROR:
966 case C_TEAR_DOWN:
967 case C_WF_CONNECTION:
968 case C_WF_REPORT_PARAMS:
969 case C_MASK:
970 break;
971 }
972 if (ns.disk > disk_max)
973 ns.disk = disk_max;
974
975 if (ns.disk < disk_min) {
976 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
977 drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
978 ns.disk = disk_min;
979 }
980 if (ns.pdsk > pdsk_max)
981 ns.pdsk = pdsk_max;
982
983 if (ns.pdsk < pdsk_min) {
984 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
985 drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
986 ns.pdsk = pdsk_min;
987 }
988
b411b363 989 if (fp == FP_STONITH &&
0a492166
PR
990 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
991 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
fb22c402 992 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
265be2d0
PR
993
994 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
995 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
996 !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
fb22c402 997 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
b411b363
PR
998
999 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
1000 if (ns.conn == C_SYNC_SOURCE)
1001 ns.conn = C_PAUSED_SYNC_S;
1002 if (ns.conn == C_SYNC_TARGET)
1003 ns.conn = C_PAUSED_SYNC_T;
1004 } else {
1005 if (ns.conn == C_PAUSED_SYNC_S)
1006 ns.conn = C_SYNC_SOURCE;
1007 if (ns.conn == C_PAUSED_SYNC_T)
1008 ns.conn = C_SYNC_TARGET;
1009 }
1010
1011 return ns;
1012}
1013
1014/* helper for __drbd_set_state */
1015static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
1016{
30b743a2
LE
1017 if (mdev->agreed_pro_version < 90)
1018 mdev->ov_start_sector = 0;
1019 mdev->rs_total = drbd_bm_bits(mdev);
1020 mdev->ov_position = 0;
b411b363
PR
1021 if (cs == C_VERIFY_T) {
1022 /* starting online verify from an arbitrary position
1023 * does not fit well into the existing protocol.
1024 * on C_VERIFY_T, we initialize ov_left and friends
1025 * implicitly in receive_DataRequest once the
1026 * first P_OV_REQUEST is received */
1027 mdev->ov_start_sector = ~(sector_t)0;
1028 } else {
1029 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
30b743a2 1030 if (bit >= mdev->rs_total) {
b411b363
PR
1031 mdev->ov_start_sector =
1032 BM_BIT_TO_SECT(mdev->rs_total - 1);
30b743a2
LE
1033 mdev->rs_total = 1;
1034 } else
1035 mdev->rs_total -= bit;
b411b363
PR
1036 mdev->ov_position = mdev->ov_start_sector;
1037 }
30b743a2 1038 mdev->ov_left = mdev->rs_total;
b411b363
PR
1039}
1040
0778286a
PR
1041static void drbd_resume_al(struct drbd_conf *mdev)
1042{
1043 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1044 dev_info(DEV, "Resumed AL updates\n");
1045}
1046
b411b363
PR
1047/**
1048 * __drbd_set_state() - Set a new DRBD state
1049 * @mdev: DRBD device.
1050 * @ns: new state.
1051 * @flags: Flags
1052 * @done: Optional completion, that will get completed after the after_state_ch() finished
1053 *
1054 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1055 */
bf885f8a
AG
1056enum drbd_state_rv
1057__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1058 enum chg_state_flags flags, struct completion *done)
b411b363
PR
1059{
1060 union drbd_state os;
bf885f8a 1061 enum drbd_state_rv rv = SS_SUCCESS;
02bc7174 1062 const char *warn_sync_abort = NULL;
b411b363
PR
1063 struct after_state_chg_work *ascw;
1064
1065 os = mdev->state;
1066
1067 ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
1068
1069 if (ns.i == os.i)
1070 return SS_NOTHING_TO_DO;
1071
1072 if (!(flags & CS_HARD)) {
1073 /* pre-state-change checks ; only look at ns */
1074 /* See drbd_state_sw_errors in drbd_strings.c */
1075
1076 rv = is_valid_state(mdev, ns);
1077 if (rv < SS_SUCCESS) {
1078 /* If the old state was illegal as well, then let
1079 this happen...*/
1080
1616a254 1081 if (is_valid_state(mdev, os) == rv)
b411b363 1082 rv = is_valid_state_transition(mdev, ns, os);
b411b363
PR
1083 } else
1084 rv = is_valid_state_transition(mdev, ns, os);
1085 }
1086
1087 if (rv < SS_SUCCESS) {
1088 if (flags & CS_VERBOSE)
1089 print_st_err(mdev, os, ns, rv);
1090 return rv;
1091 }
1092
1093 if (warn_sync_abort)
02bc7174 1094 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
b411b363
PR
1095
1096 {
662d91a2
AG
1097 char *pbp, pb[300];
1098 pbp = pb;
1099 *pbp = 0;
1100 if (ns.role != os.role)
1101 pbp += sprintf(pbp, "role( %s -> %s ) ",
1102 drbd_role_str(os.role),
1103 drbd_role_str(ns.role));
1104 if (ns.peer != os.peer)
1105 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1106 drbd_role_str(os.peer),
1107 drbd_role_str(ns.peer));
1108 if (ns.conn != os.conn)
1109 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1110 drbd_conn_str(os.conn),
1111 drbd_conn_str(ns.conn));
1112 if (ns.disk != os.disk)
1113 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1114 drbd_disk_str(os.disk),
1115 drbd_disk_str(ns.disk));
1116 if (ns.pdsk != os.pdsk)
1117 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1118 drbd_disk_str(os.pdsk),
1119 drbd_disk_str(ns.pdsk));
1120 if (is_susp(ns) != is_susp(os))
1121 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1122 is_susp(os),
1123 is_susp(ns));
1124 if (ns.aftr_isp != os.aftr_isp)
1125 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1126 os.aftr_isp,
1127 ns.aftr_isp);
1128 if (ns.peer_isp != os.peer_isp)
1129 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1130 os.peer_isp,
1131 ns.peer_isp);
1132 if (ns.user_isp != os.user_isp)
1133 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1134 os.user_isp,
1135 ns.user_isp);
1136 dev_info(DEV, "%s\n", pb);
b411b363
PR
1137 }
1138
1139 /* solve the race between becoming unconfigured,
1140 * worker doing the cleanup, and
1141 * admin reconfiguring us:
1142 * on (re)configure, first set CONFIG_PENDING,
1143 * then wait for a potentially exiting worker,
1144 * start the worker, and schedule one no_op.
1145 * then proceed with configuration.
1146 */
1147 if (ns.disk == D_DISKLESS &&
1148 ns.conn == C_STANDALONE &&
1149 ns.role == R_SECONDARY &&
1150 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1151 set_bit(DEVICE_DYING, &mdev->flags);
1152
82f59cc6
LE
1153 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1154 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1155 * drbd_ldev_destroy() won't happen before our corresponding
1156 * after_state_ch works run, where we put_ldev again. */
1157 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1158 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1159 atomic_inc(&mdev->local_cnt);
1160
1161 mdev->state = ns;
b411b363
PR
1162 wake_up(&mdev->misc_wait);
1163 wake_up(&mdev->state_wait);
1164
b411b363
PR
1165 /* aborted verify run. log the last position */
1166 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1167 ns.conn < C_CONNECTED) {
1168 mdev->ov_start_sector =
30b743a2 1169 BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
b411b363
PR
1170 dev_info(DEV, "Online Verify reached sector %llu\n",
1171 (unsigned long long)mdev->ov_start_sector);
1172 }
1173
1174 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1175 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1176 dev_info(DEV, "Syncer continues.\n");
1d7734a0
LE
1177 mdev->rs_paused += (long)jiffies
1178 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
63106d3c
PR
1179 if (ns.conn == C_SYNC_TARGET)
1180 mod_timer(&mdev->resync_timer, jiffies);
b411b363
PR
1181 }
1182
1183 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1184 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1185 dev_info(DEV, "Resync suspended\n");
1d7734a0 1186 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
b411b363
PR
1187 }
1188
1189 if (os.conn == C_CONNECTED &&
1190 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1d7734a0
LE
1191 unsigned long now = jiffies;
1192 int i;
1193
30b743a2 1194 set_ov_position(mdev, ns.conn);
1d7734a0 1195 mdev->rs_start = now;
0f0601f4
LE
1196 mdev->rs_last_events = 0;
1197 mdev->rs_last_sect_ev = 0;
b411b363
PR
1198 mdev->ov_last_oos_size = 0;
1199 mdev->ov_last_oos_start = 0;
1200
1d7734a0 1201 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
30b743a2 1202 mdev->rs_mark_left[i] = mdev->ov_left;
1d7734a0
LE
1203 mdev->rs_mark_time[i] = now;
1204 }
1205
2649f080
LE
1206 drbd_rs_controller_reset(mdev);
1207
b411b363
PR
1208 if (ns.conn == C_VERIFY_S) {
1209 dev_info(DEV, "Starting Online Verify from sector %llu\n",
1210 (unsigned long long)mdev->ov_position);
1211 mod_timer(&mdev->resync_timer, jiffies);
1212 }
1213 }
1214
1215 if (get_ldev(mdev)) {
1216 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1217 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1218 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1219
1220 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1221 mdf |= MDF_CRASHED_PRIMARY;
1222 if (mdev->state.role == R_PRIMARY ||
1223 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1224 mdf |= MDF_PRIMARY_IND;
1225 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1226 mdf |= MDF_CONNECTED_IND;
1227 if (mdev->state.disk > D_INCONSISTENT)
1228 mdf |= MDF_CONSISTENT;
1229 if (mdev->state.disk > D_OUTDATED)
1230 mdf |= MDF_WAS_UP_TO_DATE;
1231 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1232 mdf |= MDF_PEER_OUT_DATED;
1233 if (mdf != mdev->ldev->md.flags) {
1234 mdev->ldev->md.flags = mdf;
1235 drbd_md_mark_dirty(mdev);
1236 }
1237 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1238 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1239 put_ldev(mdev);
1240 }
1241
1242 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1243 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1244 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1245 set_bit(CONSIDER_RESYNC, &mdev->flags);
1246
1247 /* Receiver should clean up itself */
1248 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1249 drbd_thread_stop_nowait(&mdev->receiver);
1250
1251 /* Now the receiver finished cleaning up itself, it should die */
1252 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1253 drbd_thread_stop_nowait(&mdev->receiver);
1254
1255 /* Upon network failure, we need to restart the receiver. */
1256 if (os.conn > C_TEAR_DOWN &&
1257 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1258 drbd_thread_restart_nowait(&mdev->receiver);
1259
0778286a
PR
1260 /* Resume AL writing if we get a connection */
1261 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1262 drbd_resume_al(mdev);
1263
6a35c45f
PR
1264 /* Start a new epoch in case we start to mirror write requests */
1265 if (!drbd_should_do_remote(os) && drbd_should_do_remote(ns))
617049aa
PR
1266 tl_forget(mdev);
1267
6a35c45f
PR
1268 /* Do not add local-only requests to an epoch with mirrored requests */
1269 if (drbd_should_do_remote(os) && !drbd_should_do_remote(ns))
1270 set_bit(CREATE_BARRIER, &mdev->flags);
1271
b411b363
PR
1272 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1273 if (ascw) {
1274 ascw->os = os;
1275 ascw->ns = ns;
1276 ascw->flags = flags;
1277 ascw->w.cb = w_after_state_ch;
1278 ascw->done = done;
1279 drbd_queue_work(&mdev->data.work, &ascw->w);
1280 } else {
1281 dev_warn(DEV, "Could not kmalloc an ascw\n");
1282 }
1283
1284 return rv;
1285}
1286
1287static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1288{
1289 struct after_state_chg_work *ascw =
1290 container_of(w, struct after_state_chg_work, w);
1291 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1292 if (ascw->flags & CS_WAIT_COMPLETE) {
1293 D_ASSERT(ascw->done != NULL);
1294 complete(ascw->done);
1295 }
1296 kfree(ascw);
1297
1298 return 1;
1299}
1300
1301static void abw_start_sync(struct drbd_conf *mdev, int rv)
1302{
1303 if (rv) {
1304 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1305 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1306 return;
1307 }
1308
1309 switch (mdev->state.conn) {
1310 case C_STARTING_SYNC_T:
1311 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1312 break;
1313 case C_STARTING_SYNC_S:
1314 drbd_start_resync(mdev, C_SYNC_SOURCE);
1315 break;
1316 }
1317}
1318
19f843aa
LE
1319int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
1320{
1321 int rv;
1322
1323 D_ASSERT(current == mdev->worker.task);
1324
1325 /* open coded non-blocking drbd_suspend_io(mdev); */
1326 set_bit(SUSPEND_IO, &mdev->flags);
1327 if (!is_susp(mdev->state))
1328 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
1329
1330 drbd_bm_lock(mdev, why);
1331 rv = io_fn(mdev);
1332 drbd_bm_unlock(mdev);
1333
1334 drbd_resume_io(mdev);
1335
1336 return rv;
1337}
1338
b411b363
PR
1339/**
1340 * after_state_ch() - Perform after state change actions that may sleep
1341 * @mdev: DRBD device.
1342 * @os: old state.
1343 * @ns: new state.
1344 * @flags: Flags
1345 */
1346static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1347 union drbd_state ns, enum chg_state_flags flags)
1348{
1349 enum drbd_fencing_p fp;
67098930 1350 enum drbd_req_event what = nothing;
fb22c402 1351 union drbd_state nsm = (union drbd_state){ .i = -1 };
b411b363
PR
1352
1353 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1354 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1355 if (mdev->p_uuid)
1356 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1357 }
1358
1359 fp = FP_DONT_CARE;
1360 if (get_ldev(mdev)) {
1361 fp = mdev->ldev->dc.fencing;
1362 put_ldev(mdev);
1363 }
1364
1365 /* Inform userspace about the change... */
1366 drbd_bcast_state(mdev, ns);
1367
1368 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1369 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1370 drbd_khelper(mdev, "pri-on-incon-degr");
1371
1372 /* Here we have the actions that are performed after a
1373 state change. This function might sleep */
1374
fb22c402
PR
1375 nsm.i = -1;
1376 if (ns.susp_nod) {
3f98688a
PR
1377 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1378 what = resend;
265be2d0 1379
67098930 1380 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
3f98688a 1381 what = restart_frozen_disk_io;
fb22c402 1382
3f98688a
PR
1383 if (what != nothing)
1384 nsm.susp_nod = 0;
265be2d0
PR
1385 }
1386
fb22c402 1387 if (ns.susp_fen) {
43a5182c
PR
1388 /* case1: The outdate peer handler is successful: */
1389 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
b411b363 1390 tl_clear(mdev);
43a5182c
PR
1391 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1392 drbd_uuid_new_current(mdev);
1393 clear_bit(NEW_CUR_UUID, &mdev->flags);
43a5182c 1394 }
b411b363 1395 spin_lock_irq(&mdev->req_lock);
fb22c402 1396 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
b411b363
PR
1397 spin_unlock_irq(&mdev->req_lock);
1398 }
43a5182c
PR
1399 /* case2: The connection was established again: */
1400 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1401 clear_bit(NEW_CUR_UUID, &mdev->flags);
67098930 1402 what = resend;
fb22c402 1403 nsm.susp_fen = 0;
43a5182c 1404 }
b411b363 1405 }
67098930
PR
1406
1407 if (what != nothing) {
1408 spin_lock_irq(&mdev->req_lock);
1409 _tl_restart(mdev, what);
fb22c402
PR
1410 nsm.i &= mdev->state.i;
1411 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
67098930 1412 spin_unlock_irq(&mdev->req_lock);
b411b363 1413 }
67098930 1414
5a22db89
LE
1415 /* Became sync source. With protocol >= 96, we still need to send out
1416 * the sync uuid now. Need to do that before any drbd_send_state, or
1417 * the other side may go "paused sync" before receiving the sync uuids,
1418 * which is unexpected. */
1419 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1420 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1421 mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1422 drbd_gen_and_send_sync_uuid(mdev);
1423 put_ldev(mdev);
1424 }
1425
b411b363
PR
1426 /* Do not change the order of the if above and the two below... */
1427 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1428 drbd_send_uuids(mdev);
1429 drbd_send_state(mdev);
1430 }
54b956ab
LE
1431 /* No point in queuing send_bitmap if we don't have a connection
1432 * anymore, so check also the _current_ state, not only the new state
1433 * at the time this work was queued. */
1434 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1435 mdev->state.conn == C_WF_BITMAP_S)
1436 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
1437 "send_bitmap (WFBitMapS)");
b411b363
PR
1438
1439 /* Lost contact to peer's copy of the data */
1440 if ((os.pdsk >= D_INCONSISTENT &&
1441 os.pdsk != D_UNKNOWN &&
1442 os.pdsk != D_OUTDATED)
1443 && (ns.pdsk < D_INCONSISTENT ||
1444 ns.pdsk == D_UNKNOWN ||
1445 ns.pdsk == D_OUTDATED)) {
b411b363
PR
1446 if (get_ldev(mdev)) {
1447 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
2c8d1967 1448 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
fb22c402 1449 if (is_susp(mdev->state)) {
43a5182c
PR
1450 set_bit(NEW_CUR_UUID, &mdev->flags);
1451 } else {
1452 drbd_uuid_new_current(mdev);
1453 drbd_send_uuids(mdev);
1454 }
2c8d1967 1455 }
b411b363
PR
1456 put_ldev(mdev);
1457 }
1458 }
1459
1460 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
18a50fa2 1461 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
2c8d1967 1462 drbd_uuid_new_current(mdev);
18a50fa2
PR
1463 drbd_send_uuids(mdev);
1464 }
b411b363
PR
1465
1466 /* D_DISKLESS Peer becomes secondary */
1467 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
19f843aa
LE
1468 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote diskless peer");
1469 put_ldev(mdev);
1470 }
1471
06d33e96
LE
1472 /* Write out all changed bits on demote.
1473 * Though, no need to da that just yet
1474 * if there is a resync going on still */
1475 if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1476 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
19f843aa 1477 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote");
b411b363
PR
1478 put_ldev(mdev);
1479 }
1480
1481 /* Last part of the attaching process ... */
1482 if (ns.conn >= C_CONNECTED &&
1483 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
e89b591c 1484 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
b411b363
PR
1485 drbd_send_uuids(mdev);
1486 drbd_send_state(mdev);
1487 }
1488
1489 /* We want to pause/continue resync, tell peer. */
1490 if (ns.conn >= C_CONNECTED &&
1491 ((os.aftr_isp != ns.aftr_isp) ||
1492 (os.user_isp != ns.user_isp)))
1493 drbd_send_state(mdev);
1494
1495 /* In case one of the isp bits got set, suspend other devices. */
1496 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1497 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1498 suspend_other_sg(mdev);
1499
1500 /* Make sure the peer gets informed about eventual state
1501 changes (ISP bits) while we were in WFReportParams. */
1502 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1503 drbd_send_state(mdev);
1504
67531718
PR
1505 if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1506 drbd_send_state(mdev);
1507
b411b363
PR
1508 /* We are in the progress to start a full sync... */
1509 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1510 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1511 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
1512
1513 /* We are invalidating our self... */
1514 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1515 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1516 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1517
82f59cc6
LE
1518 /* first half of local IO error, failure to attach,
1519 * or administrative detach */
1520 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1521 enum drbd_io_error_p eh;
1522 int was_io_error;
1523 /* corresponding get_ldev was in __drbd_set_state, to serialize
1524 * our cleanup here with the transition to D_DISKLESS,
1525 * so it is safe to dreference ldev here. */
1526 eh = mdev->ldev->dc.on_io_error;
1527 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1528
1529 /* current state still has to be D_FAILED,
1530 * there is only one way out: to D_DISKLESS,
1531 * and that may only happen after our put_ldev below. */
1532 if (mdev->state.disk != D_FAILED)
1533 dev_err(DEV,
1534 "ASSERT FAILED: disk is %s during detach\n",
1535 drbd_disk_str(mdev->state.disk));
e9e6f3ec
LE
1536
1537 if (drbd_send_state(mdev))
82f59cc6 1538 dev_warn(DEV, "Notified peer that I am detaching my disk\n");
e9e6f3ec 1539 else
82f59cc6 1540 dev_err(DEV, "Sending state for detaching disk failed\n");
e9e6f3ec
LE
1541
1542 drbd_rs_cancel_all(mdev);
b411b363 1543
82f59cc6
LE
1544 /* In case we want to get something to stable storage still,
1545 * this may be the last chance.
1546 * Following put_ldev may transition to D_DISKLESS. */
1547 drbd_md_sync(mdev);
1548 put_ldev(mdev);
1549
1550 if (was_io_error && eh == EP_CALL_HELPER)
e9e6f3ec
LE
1551 drbd_khelper(mdev, "local-io-error");
1552 }
b411b363 1553
82f59cc6
LE
1554 /* second half of local IO error, failure to attach,
1555 * or administrative detach,
1556 * after local_cnt references have reached zero again */
1557 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1558 /* We must still be diskless,
1559 * re-attach has to be serialized with this! */
1560 if (mdev->state.disk != D_DISKLESS)
1561 dev_err(DEV,
1562 "ASSERT FAILED: disk is %s while going diskless\n",
1563 drbd_disk_str(mdev->state.disk));
e9e6f3ec 1564
82f59cc6
LE
1565 mdev->rs_total = 0;
1566 mdev->rs_failed = 0;
1567 atomic_set(&mdev->rs_pending_cnt, 0);
9d282875 1568
e9e6f3ec 1569 if (drbd_send_state(mdev))
82f59cc6 1570 dev_warn(DEV, "Notified peer that I'm now diskless.\n");
e9e6f3ec 1571 else
82f59cc6
LE
1572 dev_err(DEV, "Sending state for being diskless failed\n");
1573 /* corresponding get_ldev in __drbd_set_state
1574 * this may finaly trigger drbd_ldev_destroy. */
1575 put_ldev(mdev);
b411b363
PR
1576 }
1577
1578 /* Disks got bigger while they were detached */
1579 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1580 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1581 if (ns.conn == C_CONNECTED)
1582 resync_after_online_grow(mdev);
1583 }
1584
1585 /* A resync finished or aborted, wake paused devices... */
1586 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1587 (os.peer_isp && !ns.peer_isp) ||
1588 (os.user_isp && !ns.user_isp))
1589 resume_next_sg(mdev);
1590
af85e8e8
LE
1591 /* sync target done with resync. Explicitly notify peer, even though
1592 * it should (at least for non-empty resyncs) already know itself. */
1593 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1594 drbd_send_state(mdev);
1595
79a30d2d
LE
1596 /* This triggers bitmap writeout of potentially still unwritten pages
1597 * if the resync finished cleanly, or aborted because of peer disk
1598 * failure. Resync aborted because of connection failure does bitmap
1599 * writeout from drbd_disconnect.
1600 * For resync aborted because of local disk failure, we cannot do
1601 * any bitmap writeout anymore.
1602 */
1603 if (os.conn > C_CONNECTED && ns.conn == C_CONNECTED &&
1604 mdev->state.conn == C_CONNECTED && get_ldev(mdev)) {
02851e9f 1605 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
79a30d2d
LE
1606 put_ldev(mdev);
1607 }
02851e9f 1608
f70b3511 1609 /* free tl_hash if we Got thawed and are C_STANDALONE */
fb22c402 1610 if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
f70b3511
PR
1611 drbd_free_tl_hash(mdev);
1612
b411b363
PR
1613 /* Upon network connection, we need to start the receiver */
1614 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1615 drbd_thread_start(&mdev->receiver);
1616
1617 /* Terminate worker thread if we are unconfigured - it will be
1618 restarted as needed... */
1619 if (ns.disk == D_DISKLESS &&
1620 ns.conn == C_STANDALONE &&
1621 ns.role == R_SECONDARY) {
1622 if (os.aftr_isp != ns.aftr_isp)
1623 resume_next_sg(mdev);
1624 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1625 if (test_bit(DEVICE_DYING, &mdev->flags))
1626 drbd_thread_stop_nowait(&mdev->worker);
1627 }
1628
1629 drbd_md_sync(mdev);
1630}
1631
1632
1633static int drbd_thread_setup(void *arg)
1634{
1635 struct drbd_thread *thi = (struct drbd_thread *) arg;
1636 struct drbd_conf *mdev = thi->mdev;
1637 unsigned long flags;
1638 int retval;
1639
1640restart:
1641 retval = thi->function(thi);
1642
1643 spin_lock_irqsave(&thi->t_lock, flags);
1644
1645 /* if the receiver has been "Exiting", the last thing it did
1646 * was set the conn state to "StandAlone",
1647 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1648 * and receiver thread will be "started".
1649 * drbd_thread_start needs to set "Restarting" in that case.
1650 * t_state check and assignment needs to be within the same spinlock,
1651 * so either thread_start sees Exiting, and can remap to Restarting,
1652 * or thread_start see None, and can proceed as normal.
1653 */
1654
1655 if (thi->t_state == Restarting) {
1656 dev_info(DEV, "Restarting %s\n", current->comm);
1657 thi->t_state = Running;
1658 spin_unlock_irqrestore(&thi->t_lock, flags);
1659 goto restart;
1660 }
1661
1662 thi->task = NULL;
1663 thi->t_state = None;
1664 smp_mb();
1665 complete(&thi->stop);
1666 spin_unlock_irqrestore(&thi->t_lock, flags);
1667
1668 dev_info(DEV, "Terminating %s\n", current->comm);
1669
1670 /* Release mod reference taken when thread was started */
1671 module_put(THIS_MODULE);
1672 return retval;
1673}
1674
1675static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1676 int (*func) (struct drbd_thread *))
1677{
1678 spin_lock_init(&thi->t_lock);
1679 thi->task = NULL;
1680 thi->t_state = None;
1681 thi->function = func;
1682 thi->mdev = mdev;
1683}
1684
1685int drbd_thread_start(struct drbd_thread *thi)
1686{
1687 struct drbd_conf *mdev = thi->mdev;
1688 struct task_struct *nt;
1689 unsigned long flags;
1690
1691 const char *me =
1692 thi == &mdev->receiver ? "receiver" :
1693 thi == &mdev->asender ? "asender" :
1694 thi == &mdev->worker ? "worker" : "NONSENSE";
1695
1696 /* is used from state engine doing drbd_thread_stop_nowait,
1697 * while holding the req lock irqsave */
1698 spin_lock_irqsave(&thi->t_lock, flags);
1699
1700 switch (thi->t_state) {
1701 case None:
1702 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1703 me, current->comm, current->pid);
1704
1705 /* Get ref on module for thread - this is released when thread exits */
1706 if (!try_module_get(THIS_MODULE)) {
1707 dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1708 spin_unlock_irqrestore(&thi->t_lock, flags);
81e84650 1709 return false;
b411b363
PR
1710 }
1711
1712 init_completion(&thi->stop);
1713 D_ASSERT(thi->task == NULL);
1714 thi->reset_cpu_mask = 1;
1715 thi->t_state = Running;
1716 spin_unlock_irqrestore(&thi->t_lock, flags);
1717 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1718
1719 nt = kthread_create(drbd_thread_setup, (void *) thi,
1720 "drbd%d_%s", mdev_to_minor(mdev), me);
1721
1722 if (IS_ERR(nt)) {
1723 dev_err(DEV, "Couldn't start thread\n");
1724
1725 module_put(THIS_MODULE);
81e84650 1726 return false;
b411b363
PR
1727 }
1728 spin_lock_irqsave(&thi->t_lock, flags);
1729 thi->task = nt;
1730 thi->t_state = Running;
1731 spin_unlock_irqrestore(&thi->t_lock, flags);
1732 wake_up_process(nt);
1733 break;
1734 case Exiting:
1735 thi->t_state = Restarting;
1736 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1737 me, current->comm, current->pid);
1738 /* fall through */
1739 case Running:
1740 case Restarting:
1741 default:
1742 spin_unlock_irqrestore(&thi->t_lock, flags);
1743 break;
1744 }
1745
81e84650 1746 return true;
b411b363
PR
1747}
1748
1749
1750void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1751{
1752 unsigned long flags;
1753
1754 enum drbd_thread_state ns = restart ? Restarting : Exiting;
1755
1756 /* may be called from state engine, holding the req lock irqsave */
1757 spin_lock_irqsave(&thi->t_lock, flags);
1758
1759 if (thi->t_state == None) {
1760 spin_unlock_irqrestore(&thi->t_lock, flags);
1761 if (restart)
1762 drbd_thread_start(thi);
1763 return;
1764 }
1765
1766 if (thi->t_state != ns) {
1767 if (thi->task == NULL) {
1768 spin_unlock_irqrestore(&thi->t_lock, flags);
1769 return;
1770 }
1771
1772 thi->t_state = ns;
1773 smp_mb();
1774 init_completion(&thi->stop);
1775 if (thi->task != current)
1776 force_sig(DRBD_SIGKILL, thi->task);
1777
1778 }
1779
1780 spin_unlock_irqrestore(&thi->t_lock, flags);
1781
1782 if (wait)
1783 wait_for_completion(&thi->stop);
1784}
1785
1786#ifdef CONFIG_SMP
1787/**
1788 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1789 * @mdev: DRBD device.
1790 *
1791 * Forces all threads of a device onto the same CPU. This is beneficial for
1792 * DRBD's performance. May be overwritten by user's configuration.
1793 */
1794void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1795{
1796 int ord, cpu;
1797
1798 /* user override. */
1799 if (cpumask_weight(mdev->cpu_mask))
1800 return;
1801
1802 ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1803 for_each_online_cpu(cpu) {
1804 if (ord-- == 0) {
1805 cpumask_set_cpu(cpu, mdev->cpu_mask);
1806 return;
1807 }
1808 }
1809 /* should not be reached */
1810 cpumask_setall(mdev->cpu_mask);
1811}
1812
1813/**
1814 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1815 * @mdev: DRBD device.
1816 *
1817 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1818 * prematurely.
1819 */
1820void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1821{
1822 struct task_struct *p = current;
1823 struct drbd_thread *thi =
1824 p == mdev->asender.task ? &mdev->asender :
1825 p == mdev->receiver.task ? &mdev->receiver :
1826 p == mdev->worker.task ? &mdev->worker :
1827 NULL;
1828 ERR_IF(thi == NULL)
1829 return;
1830 if (!thi->reset_cpu_mask)
1831 return;
1832 thi->reset_cpu_mask = 0;
1833 set_cpus_allowed_ptr(p, mdev->cpu_mask);
1834}
1835#endif
1836
1837/* the appropriate socket mutex must be held already */
1838int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
0b70a13d 1839 enum drbd_packets cmd, struct p_header80 *h,
b411b363
PR
1840 size_t size, unsigned msg_flags)
1841{
1842 int sent, ok;
1843
81e84650
AG
1844 ERR_IF(!h) return false;
1845 ERR_IF(!size) return false;
b411b363
PR
1846
1847 h->magic = BE_DRBD_MAGIC;
1848 h->command = cpu_to_be16(cmd);
0b70a13d 1849 h->length = cpu_to_be16(size-sizeof(struct p_header80));
b411b363 1850
b411b363
PR
1851 sent = drbd_send(mdev, sock, h, size, msg_flags);
1852
1853 ok = (sent == size);
1854 if (!ok)
1855 dev_err(DEV, "short sent %s size=%d sent=%d\n",
1856 cmdname(cmd), (int)size, sent);
1857 return ok;
1858}
1859
1860/* don't pass the socket. we may only look at it
1861 * when we hold the appropriate socket mutex.
1862 */
1863int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
0b70a13d 1864 enum drbd_packets cmd, struct p_header80 *h, size_t size)
b411b363
PR
1865{
1866 int ok = 0;
1867 struct socket *sock;
1868
1869 if (use_data_socket) {
1870 mutex_lock(&mdev->data.mutex);
1871 sock = mdev->data.socket;
1872 } else {
1873 mutex_lock(&mdev->meta.mutex);
1874 sock = mdev->meta.socket;
1875 }
1876
1877 /* drbd_disconnect() could have called drbd_free_sock()
1878 * while we were waiting in down()... */
1879 if (likely(sock != NULL))
1880 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1881
1882 if (use_data_socket)
1883 mutex_unlock(&mdev->data.mutex);
1884 else
1885 mutex_unlock(&mdev->meta.mutex);
1886 return ok;
1887}
1888
1889int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1890 size_t size)
1891{
0b70a13d 1892 struct p_header80 h;
b411b363
PR
1893 int ok;
1894
1895 h.magic = BE_DRBD_MAGIC;
1896 h.command = cpu_to_be16(cmd);
1897 h.length = cpu_to_be16(size);
1898
1899 if (!drbd_get_data_sock(mdev))
1900 return 0;
1901
b411b363
PR
1902 ok = (sizeof(h) ==
1903 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1904 ok = ok && (size ==
1905 drbd_send(mdev, mdev->data.socket, data, size, 0));
1906
1907 drbd_put_data_sock(mdev);
1908
1909 return ok;
1910}
1911
1912int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1913{
8e26f9cc 1914 struct p_rs_param_95 *p;
b411b363
PR
1915 struct socket *sock;
1916 int size, rv;
1917 const int apv = mdev->agreed_pro_version;
1918
1919 size = apv <= 87 ? sizeof(struct p_rs_param)
1920 : apv == 88 ? sizeof(struct p_rs_param)
1921 + strlen(mdev->sync_conf.verify_alg) + 1
8e26f9cc
PR
1922 : apv <= 94 ? sizeof(struct p_rs_param_89)
1923 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363
PR
1924
1925 /* used from admin command context and receiver/worker context.
1926 * to avoid kmalloc, grab the socket right here,
1927 * then use the pre-allocated sbuf there */
1928 mutex_lock(&mdev->data.mutex);
1929 sock = mdev->data.socket;
1930
1931 if (likely(sock != NULL)) {
1932 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1933
8e26f9cc 1934 p = &mdev->data.sbuf.rs_param_95;
b411b363
PR
1935
1936 /* initialize verify_alg and csums_alg */
1937 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1938
1939 p->rate = cpu_to_be32(sc->rate);
8e26f9cc
PR
1940 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
1941 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
1942 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
1943 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
b411b363
PR
1944
1945 if (apv >= 88)
1946 strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1947 if (apv >= 89)
1948 strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1949
1950 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1951 } else
1952 rv = 0; /* not ok */
1953
1954 mutex_unlock(&mdev->data.mutex);
1955
1956 return rv;
1957}
1958
1959int drbd_send_protocol(struct drbd_conf *mdev)
1960{
1961 struct p_protocol *p;
cf14c2e9 1962 int size, cf, rv;
b411b363
PR
1963
1964 size = sizeof(struct p_protocol);
1965
1966 if (mdev->agreed_pro_version >= 87)
1967 size += strlen(mdev->net_conf->integrity_alg) + 1;
1968
1969 /* we must not recurse into our own queue,
1970 * as that is blocked during handshake */
1971 p = kmalloc(size, GFP_NOIO);
1972 if (p == NULL)
1973 return 0;
1974
1975 p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
1976 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
1977 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
1978 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
b411b363
PR
1979 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1980
cf14c2e9
PR
1981 cf = 0;
1982 if (mdev->net_conf->want_lose)
1983 cf |= CF_WANT_LOSE;
1984 if (mdev->net_conf->dry_run) {
1985 if (mdev->agreed_pro_version >= 92)
1986 cf |= CF_DRY_RUN;
1987 else {
1988 dev_err(DEV, "--dry-run is not supported by peer");
7ac314c8 1989 kfree(p);
148efa16 1990 return -1;
cf14c2e9
PR
1991 }
1992 }
1993 p->conn_flags = cpu_to_be32(cf);
1994
b411b363
PR
1995 if (mdev->agreed_pro_version >= 87)
1996 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1997
1998 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
0b70a13d 1999 (struct p_header80 *)p, size);
b411b363
PR
2000 kfree(p);
2001 return rv;
2002}
2003
2004int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
2005{
2006 struct p_uuids p;
2007 int i;
2008
2009 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
2010 return 1;
2011
2012 for (i = UI_CURRENT; i < UI_SIZE; i++)
2013 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
2014
2015 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
2016 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
2017 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
2018 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
2019 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2020 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2021
2022 put_ldev(mdev);
2023
2024 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
0b70a13d 2025 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2026}
2027
2028int drbd_send_uuids(struct drbd_conf *mdev)
2029{
2030 return _drbd_send_uuids(mdev, 0);
2031}
2032
2033int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
2034{
2035 return _drbd_send_uuids(mdev, 8);
2036}
2037
5a22db89 2038int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
b411b363
PR
2039{
2040 struct p_rs_uuid p;
5a22db89
LE
2041 u64 uuid;
2042
2043 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
b411b363 2044
4a23f264 2045 uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
5a22db89
LE
2046 drbd_uuid_set(mdev, UI_BITMAP, uuid);
2047 drbd_md_sync(mdev);
2048 p.uuid = cpu_to_be64(uuid);
b411b363
PR
2049
2050 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
0b70a13d 2051 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2052}
2053
e89b591c 2054int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
b411b363
PR
2055{
2056 struct p_sizes p;
2057 sector_t d_size, u_size;
2058 int q_order_type;
2059 int ok;
2060
2061 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2062 D_ASSERT(mdev->ldev->backing_bdev);
2063 d_size = drbd_get_max_capacity(mdev->ldev);
2064 u_size = mdev->ldev->dc.disk_size;
2065 q_order_type = drbd_queue_order_type(mdev);
b411b363
PR
2066 put_ldev(mdev);
2067 } else {
2068 d_size = 0;
2069 u_size = 0;
2070 q_order_type = QUEUE_ORDERED_NONE;
2071 }
2072
2073 p.d_size = cpu_to_be64(d_size);
2074 p.u_size = cpu_to_be64(u_size);
2075 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
1816a2b4 2076 p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9);
e89b591c
PR
2077 p.queue_order_type = cpu_to_be16(q_order_type);
2078 p.dds_flags = cpu_to_be16(flags);
b411b363
PR
2079
2080 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
0b70a13d 2081 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2082 return ok;
2083}
2084
2085/**
2086 * drbd_send_state() - Sends the drbd state to the peer
2087 * @mdev: DRBD device.
2088 */
2089int drbd_send_state(struct drbd_conf *mdev)
2090{
2091 struct socket *sock;
2092 struct p_state p;
2093 int ok = 0;
2094
2095 /* Grab state lock so we wont send state if we're in the middle
2096 * of a cluster wide state change on another thread */
2097 drbd_state_lock(mdev);
2098
2099 mutex_lock(&mdev->data.mutex);
2100
2101 p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2102 sock = mdev->data.socket;
2103
2104 if (likely(sock != NULL)) {
2105 ok = _drbd_send_cmd(mdev, sock, P_STATE,
0b70a13d 2106 (struct p_header80 *)&p, sizeof(p), 0);
b411b363
PR
2107 }
2108
2109 mutex_unlock(&mdev->data.mutex);
2110
2111 drbd_state_unlock(mdev);
2112 return ok;
2113}
2114
2115int drbd_send_state_req(struct drbd_conf *mdev,
2116 union drbd_state mask, union drbd_state val)
2117{
2118 struct p_req_state p;
2119
2120 p.mask = cpu_to_be32(mask.i);
2121 p.val = cpu_to_be32(val.i);
2122
2123 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
0b70a13d 2124 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2125}
2126
bf885f8a 2127int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
b411b363
PR
2128{
2129 struct p_req_state_reply p;
2130
2131 p.retcode = cpu_to_be32(retcode);
2132
2133 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
0b70a13d 2134 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2135}
2136
2137int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2138 struct p_compressed_bm *p,
2139 struct bm_xfer_ctx *c)
2140{
2141 struct bitstream bs;
2142 unsigned long plain_bits;
2143 unsigned long tmp;
2144 unsigned long rl;
2145 unsigned len;
2146 unsigned toggle;
2147 int bits;
2148
2149 /* may we use this feature? */
2150 if ((mdev->sync_conf.use_rle == 0) ||
2151 (mdev->agreed_pro_version < 90))
2152 return 0;
2153
2154 if (c->bit_offset >= c->bm_bits)
2155 return 0; /* nothing to do. */
2156
2157 /* use at most thus many bytes */
2158 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2159 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2160 /* plain bits covered in this code string */
2161 plain_bits = 0;
2162
2163 /* p->encoding & 0x80 stores whether the first run length is set.
2164 * bit offset is implicit.
2165 * start with toggle == 2 to be able to tell the first iteration */
2166 toggle = 2;
2167
2168 /* see how much plain bits we can stuff into one packet
2169 * using RLE and VLI. */
2170 do {
2171 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2172 : _drbd_bm_find_next(mdev, c->bit_offset);
2173 if (tmp == -1UL)
2174 tmp = c->bm_bits;
2175 rl = tmp - c->bit_offset;
2176
2177 if (toggle == 2) { /* first iteration */
2178 if (rl == 0) {
2179 /* the first checked bit was set,
2180 * store start value, */
2181 DCBP_set_start(p, 1);
2182 /* but skip encoding of zero run length */
2183 toggle = !toggle;
2184 continue;
2185 }
2186 DCBP_set_start(p, 0);
2187 }
2188
2189 /* paranoia: catch zero runlength.
2190 * can only happen if bitmap is modified while we scan it. */
2191 if (rl == 0) {
2192 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2193 "t:%u bo:%lu\n", toggle, c->bit_offset);
2194 return -1;
2195 }
2196
2197 bits = vli_encode_bits(&bs, rl);
2198 if (bits == -ENOBUFS) /* buffer full */
2199 break;
2200 if (bits <= 0) {
2201 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2202 return 0;
2203 }
2204
2205 toggle = !toggle;
2206 plain_bits += rl;
2207 c->bit_offset = tmp;
2208 } while (c->bit_offset < c->bm_bits);
2209
2210 len = bs.cur.b - p->code + !!bs.cur.bit;
2211
2212 if (plain_bits < (len << 3)) {
2213 /* incompressible with this method.
2214 * we need to rewind both word and bit position. */
2215 c->bit_offset -= plain_bits;
2216 bm_xfer_ctx_bit_to_word_offset(c);
2217 c->bit_offset = c->word_offset * BITS_PER_LONG;
2218 return 0;
2219 }
2220
2221 /* RLE + VLI was able to compress it just fine.
2222 * update c->word_offset. */
2223 bm_xfer_ctx_bit_to_word_offset(c);
2224
2225 /* store pad_bits */
2226 DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2227
2228 return len;
2229}
2230
f70af118
AG
2231/**
2232 * send_bitmap_rle_or_plain
2233 *
2234 * Return 0 when done, 1 when another iteration is needed, and a negative error
2235 * code upon failure.
2236 */
2237static int
b411b363 2238send_bitmap_rle_or_plain(struct drbd_conf *mdev,
f70af118 2239 struct p_header80 *h, struct bm_xfer_ctx *c)
b411b363
PR
2240{
2241 struct p_compressed_bm *p = (void*)h;
2242 unsigned long num_words;
2243 int len;
2244 int ok;
2245
2246 len = fill_bitmap_rle_bits(mdev, p, c);
2247
2248 if (len < 0)
f70af118 2249 return -EIO;
b411b363
PR
2250
2251 if (len) {
2252 DCBP_set_code(p, RLE_VLI_Bits);
2253 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2254 sizeof(*p) + len, 0);
2255
2256 c->packets[0]++;
2257 c->bytes[0] += sizeof(*p) + len;
2258
2259 if (c->bit_offset >= c->bm_bits)
2260 len = 0; /* DONE */
2261 } else {
2262 /* was not compressible.
2263 * send a buffer full of plain text bits instead. */
2264 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2265 len = num_words * sizeof(long);
2266 if (len)
2267 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2268 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
0b70a13d 2269 h, sizeof(struct p_header80) + len, 0);
b411b363
PR
2270 c->word_offset += num_words;
2271 c->bit_offset = c->word_offset * BITS_PER_LONG;
2272
2273 c->packets[1]++;
0b70a13d 2274 c->bytes[1] += sizeof(struct p_header80) + len;
b411b363
PR
2275
2276 if (c->bit_offset > c->bm_bits)
2277 c->bit_offset = c->bm_bits;
2278 }
f70af118
AG
2279 if (ok) {
2280 if (len == 0) {
2281 INFO_bm_xfer_stats(mdev, "send", c);
2282 return 0;
2283 } else
2284 return 1;
2285 }
2286 return -EIO;
b411b363
PR
2287}
2288
2289/* See the comment at receive_bitmap() */
2290int _drbd_send_bitmap(struct drbd_conf *mdev)
2291{
2292 struct bm_xfer_ctx c;
0b70a13d 2293 struct p_header80 *p;
f70af118 2294 int err;
b411b363 2295
81e84650 2296 ERR_IF(!mdev->bitmap) return false;
b411b363
PR
2297
2298 /* maybe we should use some per thread scratch page,
2299 * and allocate that during initial device creation? */
0b70a13d 2300 p = (struct p_header80 *) __get_free_page(GFP_NOIO);
b411b363
PR
2301 if (!p) {
2302 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
81e84650 2303 return false;
b411b363
PR
2304 }
2305
2306 if (get_ldev(mdev)) {
2307 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2308 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2309 drbd_bm_set_all(mdev);
2310 if (drbd_bm_write(mdev)) {
2311 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2312 * but otherwise process as per normal - need to tell other
2313 * side that a full resync is required! */
2314 dev_err(DEV, "Failed to write bitmap to disk!\n");
2315 } else {
2316 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2317 drbd_md_sync(mdev);
2318 }
2319 }
2320 put_ldev(mdev);
2321 }
2322
2323 c = (struct bm_xfer_ctx) {
2324 .bm_bits = drbd_bm_bits(mdev),
2325 .bm_words = drbd_bm_words(mdev),
2326 };
2327
2328 do {
f70af118
AG
2329 err = send_bitmap_rle_or_plain(mdev, p, &c);
2330 } while (err > 0);
b411b363
PR
2331
2332 free_page((unsigned long) p);
f70af118 2333 return err == 0;
b411b363
PR
2334}
2335
2336int drbd_send_bitmap(struct drbd_conf *mdev)
2337{
2338 int err;
2339
2340 if (!drbd_get_data_sock(mdev))
2341 return -1;
2342 err = !_drbd_send_bitmap(mdev);
2343 drbd_put_data_sock(mdev);
2344 return err;
2345}
2346
2347int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2348{
2349 int ok;
2350 struct p_barrier_ack p;
2351
2352 p.barrier = barrier_nr;
2353 p.set_size = cpu_to_be32(set_size);
2354
2355 if (mdev->state.conn < C_CONNECTED)
81e84650 2356 return false;
b411b363 2357 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
0b70a13d 2358 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2359 return ok;
2360}
2361
2362/**
2363 * _drbd_send_ack() - Sends an ack packet
2364 * @mdev: DRBD device.
2365 * @cmd: Packet command code.
2366 * @sector: sector, needs to be in big endian byte order
2367 * @blksize: size in byte, needs to be in big endian byte order
2368 * @block_id: Id, big endian byte order
2369 */
2370static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2371 u64 sector,
2372 u32 blksize,
2373 u64 block_id)
2374{
2375 int ok;
2376 struct p_block_ack p;
2377
2378 p.sector = sector;
2379 p.block_id = block_id;
2380 p.blksize = blksize;
2381 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2382
2383 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
81e84650 2384 return false;
b411b363 2385 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
0b70a13d 2386 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2387 return ok;
2388}
2389
2b2bf214
LE
2390/* dp->sector and dp->block_id already/still in network byte order,
2391 * data_size is payload size according to dp->head,
2392 * and may need to be corrected for digest size. */
b411b363 2393int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2b2bf214 2394 struct p_data *dp, int data_size)
b411b363 2395{
2b2bf214
LE
2396 data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2397 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
b411b363
PR
2398 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2399 dp->block_id);
2400}
2401
2402int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2403 struct p_block_req *rp)
2404{
2405 return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2406}
2407
2408/**
2409 * drbd_send_ack() - Sends an ack packet
2410 * @mdev: DRBD device.
2411 * @cmd: Packet command code.
2412 * @e: Epoch entry.
2413 */
2414int drbd_send_ack(struct drbd_conf *mdev,
2415 enum drbd_packets cmd, struct drbd_epoch_entry *e)
2416{
2417 return _drbd_send_ack(mdev, cmd,
2418 cpu_to_be64(e->sector),
2419 cpu_to_be32(e->size),
2420 e->block_id);
2421}
2422
2423/* This function misuses the block_id field to signal if the blocks
2424 * are is sync or not. */
2425int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2426 sector_t sector, int blksize, u64 block_id)
2427{
2428 return _drbd_send_ack(mdev, cmd,
2429 cpu_to_be64(sector),
2430 cpu_to_be32(blksize),
2431 cpu_to_be64(block_id));
2432}
2433
2434int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2435 sector_t sector, int size, u64 block_id)
2436{
2437 int ok;
2438 struct p_block_req p;
2439
2440 p.sector = cpu_to_be64(sector);
2441 p.block_id = block_id;
2442 p.blksize = cpu_to_be32(size);
2443
2444 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
0b70a13d 2445 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2446 return ok;
2447}
2448
2449int drbd_send_drequest_csum(struct drbd_conf *mdev,
2450 sector_t sector, int size,
2451 void *digest, int digest_size,
2452 enum drbd_packets cmd)
2453{
2454 int ok;
2455 struct p_block_req p;
2456
2457 p.sector = cpu_to_be64(sector);
2458 p.block_id = BE_DRBD_MAGIC + 0xbeef;
2459 p.blksize = cpu_to_be32(size);
2460
2461 p.head.magic = BE_DRBD_MAGIC;
2462 p.head.command = cpu_to_be16(cmd);
0b70a13d 2463 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
b411b363
PR
2464
2465 mutex_lock(&mdev->data.mutex);
2466
2467 ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2468 ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2469
2470 mutex_unlock(&mdev->data.mutex);
2471
2472 return ok;
2473}
2474
2475int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2476{
2477 int ok;
2478 struct p_block_req p;
2479
2480 p.sector = cpu_to_be64(sector);
2481 p.block_id = BE_DRBD_MAGIC + 0xbabe;
2482 p.blksize = cpu_to_be32(size);
2483
2484 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
0b70a13d 2485 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2486 return ok;
2487}
2488
2489/* called on sndtimeo
81e84650
AG
2490 * returns false if we should retry,
2491 * true if we think connection is dead
b411b363
PR
2492 */
2493static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2494{
2495 int drop_it;
2496 /* long elapsed = (long)(jiffies - mdev->last_received); */
2497
2498 drop_it = mdev->meta.socket == sock
2499 || !mdev->asender.task
2500 || get_t_state(&mdev->asender) != Running
2501 || mdev->state.conn < C_CONNECTED;
2502
2503 if (drop_it)
81e84650 2504 return true;
b411b363
PR
2505
2506 drop_it = !--mdev->ko_count;
2507 if (!drop_it) {
2508 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2509 current->comm, current->pid, mdev->ko_count);
2510 request_ping(mdev);
2511 }
2512
2513 return drop_it; /* && (mdev->state == R_PRIMARY) */;
2514}
2515
2516/* The idea of sendpage seems to be to put some kind of reference
2517 * to the page into the skb, and to hand it over to the NIC. In
2518 * this process get_page() gets called.
2519 *
2520 * As soon as the page was really sent over the network put_page()
2521 * gets called by some part of the network layer. [ NIC driver? ]
2522 *
2523 * [ get_page() / put_page() increment/decrement the count. If count
2524 * reaches 0 the page will be freed. ]
2525 *
2526 * This works nicely with pages from FSs.
2527 * But this means that in protocol A we might signal IO completion too early!
2528 *
2529 * In order not to corrupt data during a resync we must make sure
2530 * that we do not reuse our own buffer pages (EEs) to early, therefore
2531 * we have the net_ee list.
2532 *
2533 * XFS seems to have problems, still, it submits pages with page_count == 0!
2534 * As a workaround, we disable sendpage on pages
2535 * with page_count == 0 or PageSlab.
2536 */
2537static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
ba11ad9a 2538 int offset, size_t size, unsigned msg_flags)
b411b363 2539{
ba11ad9a 2540 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
b411b363
PR
2541 kunmap(page);
2542 if (sent == size)
2543 mdev->send_cnt += size>>9;
2544 return sent == size;
2545}
2546
2547static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
ba11ad9a 2548 int offset, size_t size, unsigned msg_flags)
b411b363
PR
2549{
2550 mm_segment_t oldfs = get_fs();
2551 int sent, ok;
2552 int len = size;
2553
2554 /* e.g. XFS meta- & log-data is in slab pages, which have a
2555 * page_count of 0 and/or have PageSlab() set.
2556 * we cannot use send_page for those, as that does get_page();
2557 * put_page(); and would cause either a VM_BUG directly, or
2558 * __page_cache_release a page that would actually still be referenced
2559 * by someone, leading to some obscure delayed Oops somewhere else. */
2560 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
ba11ad9a 2561 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
b411b363 2562
ba11ad9a 2563 msg_flags |= MSG_NOSIGNAL;
b411b363
PR
2564 drbd_update_congested(mdev);
2565 set_fs(KERNEL_DS);
2566 do {
2567 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2568 offset, len,
ba11ad9a 2569 msg_flags);
b411b363
PR
2570 if (sent == -EAGAIN) {
2571 if (we_should_drop_the_connection(mdev,
2572 mdev->data.socket))
2573 break;
2574 else
2575 continue;
2576 }
2577 if (sent <= 0) {
2578 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2579 __func__, (int)size, len, sent);
2580 break;
2581 }
2582 len -= sent;
2583 offset += sent;
2584 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2585 set_fs(oldfs);
2586 clear_bit(NET_CONGESTED, &mdev->flags);
2587
2588 ok = (len == 0);
2589 if (likely(ok))
2590 mdev->send_cnt += size>>9;
2591 return ok;
2592}
2593
2594static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2595{
2596 struct bio_vec *bvec;
2597 int i;
ba11ad9a 2598 /* hint all but last page with MSG_MORE */
b411b363
PR
2599 __bio_for_each_segment(bvec, bio, i, 0) {
2600 if (!_drbd_no_send_page(mdev, bvec->bv_page,
ba11ad9a
LE
2601 bvec->bv_offset, bvec->bv_len,
2602 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
b411b363
PR
2603 return 0;
2604 }
2605 return 1;
2606}
2607
2608static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2609{
2610 struct bio_vec *bvec;
2611 int i;
ba11ad9a 2612 /* hint all but last page with MSG_MORE */
b411b363
PR
2613 __bio_for_each_segment(bvec, bio, i, 0) {
2614 if (!_drbd_send_page(mdev, bvec->bv_page,
ba11ad9a
LE
2615 bvec->bv_offset, bvec->bv_len,
2616 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
b411b363
PR
2617 return 0;
2618 }
b411b363
PR
2619 return 1;
2620}
2621
45bb912b
LE
2622static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2623{
2624 struct page *page = e->pages;
2625 unsigned len = e->size;
ba11ad9a 2626 /* hint all but last page with MSG_MORE */
45bb912b
LE
2627 page_chain_for_each(page) {
2628 unsigned l = min_t(unsigned, len, PAGE_SIZE);
ba11ad9a
LE
2629 if (!_drbd_send_page(mdev, page, 0, l,
2630 page_chain_next(page) ? MSG_MORE : 0))
45bb912b
LE
2631 return 0;
2632 len -= l;
2633 }
2634 return 1;
2635}
2636
76d2e7ec
PR
2637static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2638{
2639 if (mdev->agreed_pro_version >= 95)
2640 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
76d2e7ec
PR
2641 (bi_rw & REQ_FUA ? DP_FUA : 0) |
2642 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2643 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2644 else
721a9602 2645 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
76d2e7ec
PR
2646}
2647
b411b363
PR
2648/* Used to send write requests
2649 * R_PRIMARY -> Peer (P_DATA)
2650 */
2651int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2652{
2653 int ok = 1;
2654 struct p_data p;
2655 unsigned int dp_flags = 0;
2656 void *dgb;
2657 int dgs;
2658
2659 if (!drbd_get_data_sock(mdev))
2660 return 0;
2661
2662 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2663 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2664
d5373389 2665 if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
0b70a13d
PR
2666 p.head.h80.magic = BE_DRBD_MAGIC;
2667 p.head.h80.command = cpu_to_be16(P_DATA);
2668 p.head.h80.length =
2669 cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2670 } else {
2671 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2672 p.head.h95.command = cpu_to_be16(P_DATA);
2673 p.head.h95.length =
2674 cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2675 }
b411b363
PR
2676
2677 p.sector = cpu_to_be64(req->sector);
2678 p.block_id = (unsigned long)req;
2679 p.seq_num = cpu_to_be32(req->seq_num =
2680 atomic_add_return(1, &mdev->packet_seq));
b411b363 2681
76d2e7ec
PR
2682 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2683
b411b363
PR
2684 if (mdev->state.conn >= C_SYNC_SOURCE &&
2685 mdev->state.conn <= C_PAUSED_SYNC_T)
2686 dp_flags |= DP_MAY_SET_IN_SYNC;
2687
2688 p.dp_flags = cpu_to_be32(dp_flags);
b411b363
PR
2689 set_bit(UNPLUG_REMOTE, &mdev->flags);
2690 ok = (sizeof(p) ==
ba11ad9a 2691 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
b411b363
PR
2692 if (ok && dgs) {
2693 dgb = mdev->int_dig_out;
45bb912b 2694 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
cab2f74b 2695 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
b411b363
PR
2696 }
2697 if (ok) {
470be44a
LE
2698 /* For protocol A, we have to memcpy the payload into
2699 * socket buffers, as we may complete right away
2700 * as soon as we handed it over to tcp, at which point the data
2701 * pages may become invalid.
2702 *
2703 * For data-integrity enabled, we copy it as well, so we can be
2704 * sure that even if the bio pages may still be modified, it
2705 * won't change the data on the wire, thus if the digest checks
2706 * out ok after sending on this side, but does not fit on the
2707 * receiving side, we sure have detected corruption elsewhere.
2708 */
2709 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
b411b363
PR
2710 ok = _drbd_send_bio(mdev, req->master_bio);
2711 else
2712 ok = _drbd_send_zc_bio(mdev, req->master_bio);
470be44a
LE
2713
2714 /* double check digest, sometimes buffers have been modified in flight. */
2715 if (dgs > 0 && dgs <= 64) {
2716 /* 64 byte, 512 bit, is the larges digest size
2717 * currently supported in kernel crypto. */
2718 unsigned char digest[64];
2719 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2720 if (memcmp(mdev->int_dig_out, digest, dgs)) {
2721 dev_warn(DEV,
2722 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2723 (unsigned long long)req->sector, req->size);
2724 }
2725 } /* else if (dgs > 64) {
2726 ... Be noisy about digest too large ...
2727 } */
b411b363
PR
2728 }
2729
2730 drbd_put_data_sock(mdev);
bd26bfc5 2731
b411b363
PR
2732 return ok;
2733}
2734
2735/* answer packet, used to send data back for read requests:
2736 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2737 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2738 */
2739int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2740 struct drbd_epoch_entry *e)
2741{
2742 int ok;
2743 struct p_data p;
2744 void *dgb;
2745 int dgs;
2746
2747 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2748 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2749
d5373389 2750 if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
0b70a13d
PR
2751 p.head.h80.magic = BE_DRBD_MAGIC;
2752 p.head.h80.command = cpu_to_be16(cmd);
2753 p.head.h80.length =
2754 cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2755 } else {
2756 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2757 p.head.h95.command = cpu_to_be16(cmd);
2758 p.head.h95.length =
2759 cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2760 }
b411b363
PR
2761
2762 p.sector = cpu_to_be64(e->sector);
2763 p.block_id = e->block_id;
2764 /* p.seq_num = 0; No sequence numbers here.. */
2765
2766 /* Only called by our kernel thread.
2767 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2768 * in response to admin command or module unload.
2769 */
2770 if (!drbd_get_data_sock(mdev))
2771 return 0;
2772
0b70a13d 2773 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
b411b363
PR
2774 if (ok && dgs) {
2775 dgb = mdev->int_dig_out;
45bb912b 2776 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
cab2f74b 2777 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
b411b363
PR
2778 }
2779 if (ok)
45bb912b 2780 ok = _drbd_send_zc_ee(mdev, e);
b411b363
PR
2781
2782 drbd_put_data_sock(mdev);
bd26bfc5 2783
b411b363
PR
2784 return ok;
2785}
2786
73a01a18
PR
2787int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2788{
2789 struct p_block_desc p;
2790
2791 p.sector = cpu_to_be64(req->sector);
2792 p.blksize = cpu_to_be32(req->size);
2793
2794 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2795}
2796
b411b363
PR
2797/*
2798 drbd_send distinguishes two cases:
2799
2800 Packets sent via the data socket "sock"
2801 and packets sent via the meta data socket "msock"
2802
2803 sock msock
2804 -----------------+-------------------------+------------------------------
2805 timeout conf.timeout / 2 conf.timeout / 2
2806 timeout action send a ping via msock Abort communication
2807 and close all sockets
2808*/
2809
2810/*
2811 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2812 */
2813int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2814 void *buf, size_t size, unsigned msg_flags)
2815{
2816 struct kvec iov;
2817 struct msghdr msg;
2818 int rv, sent = 0;
2819
2820 if (!sock)
2821 return -1000;
2822
2823 /* THINK if (signal_pending) return ... ? */
2824
2825 iov.iov_base = buf;
2826 iov.iov_len = size;
2827
2828 msg.msg_name = NULL;
2829 msg.msg_namelen = 0;
2830 msg.msg_control = NULL;
2831 msg.msg_controllen = 0;
2832 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
2833
2834 if (sock == mdev->data.socket) {
2835 mdev->ko_count = mdev->net_conf->ko_count;
2836 drbd_update_congested(mdev);
2837 }
2838 do {
2839 /* STRANGE
2840 * tcp_sendmsg does _not_ use its size parameter at all ?
2841 *
2842 * -EAGAIN on timeout, -EINTR on signal.
2843 */
2844/* THINK
2845 * do we need to block DRBD_SIG if sock == &meta.socket ??
2846 * otherwise wake_asender() might interrupt some send_*Ack !
2847 */
2848 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2849 if (rv == -EAGAIN) {
2850 if (we_should_drop_the_connection(mdev, sock))
2851 break;
2852 else
2853 continue;
2854 }
2855 D_ASSERT(rv != 0);
2856 if (rv == -EINTR) {
2857 flush_signals(current);
2858 rv = 0;
2859 }
2860 if (rv < 0)
2861 break;
2862 sent += rv;
2863 iov.iov_base += rv;
2864 iov.iov_len -= rv;
2865 } while (sent < size);
2866
2867 if (sock == mdev->data.socket)
2868 clear_bit(NET_CONGESTED, &mdev->flags);
2869
2870 if (rv <= 0) {
2871 if (rv != -EAGAIN) {
2872 dev_err(DEV, "%s_sendmsg returned %d\n",
2873 sock == mdev->meta.socket ? "msock" : "sock",
2874 rv);
2875 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2876 } else
2877 drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2878 }
2879
2880 return sent;
2881}
2882
2883static int drbd_open(struct block_device *bdev, fmode_t mode)
2884{
2885 struct drbd_conf *mdev = bdev->bd_disk->private_data;
2886 unsigned long flags;
2887 int rv = 0;
2888
2a48fc0a 2889 mutex_lock(&drbd_main_mutex);
b411b363
PR
2890 spin_lock_irqsave(&mdev->req_lock, flags);
2891 /* to have a stable mdev->state.role
2892 * and no race with updating open_cnt */
2893
2894 if (mdev->state.role != R_PRIMARY) {
2895 if (mode & FMODE_WRITE)
2896 rv = -EROFS;
2897 else if (!allow_oos)
2898 rv = -EMEDIUMTYPE;
2899 }
2900
2901 if (!rv)
2902 mdev->open_cnt++;
2903 spin_unlock_irqrestore(&mdev->req_lock, flags);
2a48fc0a 2904 mutex_unlock(&drbd_main_mutex);
b411b363
PR
2905
2906 return rv;
2907}
2908
2909static int drbd_release(struct gendisk *gd, fmode_t mode)
2910{
2911 struct drbd_conf *mdev = gd->private_data;
2a48fc0a 2912 mutex_lock(&drbd_main_mutex);
b411b363 2913 mdev->open_cnt--;
2a48fc0a 2914 mutex_unlock(&drbd_main_mutex);
b411b363
PR
2915 return 0;
2916}
2917
b411b363
PR
2918static void drbd_set_defaults(struct drbd_conf *mdev)
2919{
85f4cc17
PR
2920 /* This way we get a compile error when sync_conf grows,
2921 and we forgot to initialize it here */
2922 mdev->sync_conf = (struct syncer_conf) {
2923 /* .rate = */ DRBD_RATE_DEF,
2924 /* .after = */ DRBD_AFTER_DEF,
2925 /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
85f4cc17
PR
2926 /* .verify_alg = */ {}, 0,
2927 /* .cpu_mask = */ {}, 0,
2928 /* .csums_alg = */ {}, 0,
e756414f 2929 /* .use_rle = */ 0,
9a31d716
PR
2930 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
2931 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
2932 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
2933 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
0f0601f4
LE
2934 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
2935 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
85f4cc17
PR
2936 };
2937
2938 /* Have to use that way, because the layout differs between
2939 big endian and little endian */
b411b363
PR
2940 mdev->state = (union drbd_state) {
2941 { .role = R_SECONDARY,
2942 .peer = R_UNKNOWN,
2943 .conn = C_STANDALONE,
2944 .disk = D_DISKLESS,
2945 .pdsk = D_UNKNOWN,
fb22c402
PR
2946 .susp = 0,
2947 .susp_nod = 0,
2948 .susp_fen = 0
b411b363
PR
2949 } };
2950}
2951
2952void drbd_init_set_defaults(struct drbd_conf *mdev)
2953{
2954 /* the memset(,0,) did most of this.
2955 * note: only assignments, no allocation in here */
2956
2957 drbd_set_defaults(mdev);
2958
b411b363
PR
2959 atomic_set(&mdev->ap_bio_cnt, 0);
2960 atomic_set(&mdev->ap_pending_cnt, 0);
2961 atomic_set(&mdev->rs_pending_cnt, 0);
2962 atomic_set(&mdev->unacked_cnt, 0);
2963 atomic_set(&mdev->local_cnt, 0);
2964 atomic_set(&mdev->net_cnt, 0);
2965 atomic_set(&mdev->packet_seq, 0);
2966 atomic_set(&mdev->pp_in_use, 0);
435f0740 2967 atomic_set(&mdev->pp_in_use_by_net, 0);
778f271d 2968 atomic_set(&mdev->rs_sect_in, 0);
0f0601f4 2969 atomic_set(&mdev->rs_sect_ev, 0);
759fbdfb 2970 atomic_set(&mdev->ap_in_flight, 0);
b411b363
PR
2971
2972 mutex_init(&mdev->md_io_mutex);
2973 mutex_init(&mdev->data.mutex);
2974 mutex_init(&mdev->meta.mutex);
2975 sema_init(&mdev->data.work.s, 0);
2976 sema_init(&mdev->meta.work.s, 0);
2977 mutex_init(&mdev->state_mutex);
2978
2979 spin_lock_init(&mdev->data.work.q_lock);
2980 spin_lock_init(&mdev->meta.work.q_lock);
2981
2982 spin_lock_init(&mdev->al_lock);
2983 spin_lock_init(&mdev->req_lock);
2984 spin_lock_init(&mdev->peer_seq_lock);
2985 spin_lock_init(&mdev->epoch_lock);
2986
2987 INIT_LIST_HEAD(&mdev->active_ee);
2988 INIT_LIST_HEAD(&mdev->sync_ee);
2989 INIT_LIST_HEAD(&mdev->done_ee);
2990 INIT_LIST_HEAD(&mdev->read_ee);
2991 INIT_LIST_HEAD(&mdev->net_ee);
2992 INIT_LIST_HEAD(&mdev->resync_reads);
2993 INIT_LIST_HEAD(&mdev->data.work.q);
2994 INIT_LIST_HEAD(&mdev->meta.work.q);
2995 INIT_LIST_HEAD(&mdev->resync_work.list);
2996 INIT_LIST_HEAD(&mdev->unplug_work.list);
e9e6f3ec 2997 INIT_LIST_HEAD(&mdev->go_diskless.list);
b411b363 2998 INIT_LIST_HEAD(&mdev->md_sync_work.list);
c4752ef1 2999 INIT_LIST_HEAD(&mdev->start_resync_work.list);
b411b363 3000 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
0ced55a3 3001
794abb75 3002 mdev->resync_work.cb = w_resync_timer;
b411b363 3003 mdev->unplug_work.cb = w_send_write_hint;
e9e6f3ec 3004 mdev->go_diskless.cb = w_go_diskless;
b411b363
PR
3005 mdev->md_sync_work.cb = w_md_sync;
3006 mdev->bm_io_work.w.cb = w_bitmap_io;
370a43e7 3007 mdev->start_resync_work.cb = w_start_resync;
b411b363
PR
3008 init_timer(&mdev->resync_timer);
3009 init_timer(&mdev->md_sync_timer);
370a43e7 3010 init_timer(&mdev->start_resync_timer);
b411b363
PR
3011 mdev->resync_timer.function = resync_timer_fn;
3012 mdev->resync_timer.data = (unsigned long) mdev;
3013 mdev->md_sync_timer.function = md_sync_timer_fn;
3014 mdev->md_sync_timer.data = (unsigned long) mdev;
370a43e7
PR
3015 mdev->start_resync_timer.function = start_resync_timer_fn;
3016 mdev->start_resync_timer.data = (unsigned long) mdev;
b411b363
PR
3017
3018 init_waitqueue_head(&mdev->misc_wait);
3019 init_waitqueue_head(&mdev->state_wait);
84dfb9f5 3020 init_waitqueue_head(&mdev->net_cnt_wait);
b411b363
PR
3021 init_waitqueue_head(&mdev->ee_wait);
3022 init_waitqueue_head(&mdev->al_wait);
3023 init_waitqueue_head(&mdev->seq_wait);
3024
3025 drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
3026 drbd_thread_init(mdev, &mdev->worker, drbd_worker);
3027 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
3028
3029 mdev->agreed_pro_version = PRO_VERSION_MAX;
2451fc3b 3030 mdev->write_ordering = WO_bdev_flush;
b411b363
PR
3031 mdev->resync_wenr = LC_FREE;
3032}
3033
3034void drbd_mdev_cleanup(struct drbd_conf *mdev)
3035{
1d7734a0 3036 int i;
b411b363
PR
3037 if (mdev->receiver.t_state != None)
3038 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3039 mdev->receiver.t_state);
3040
3041 /* no need to lock it, I'm the only thread alive */
3042 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
3043 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
3044 mdev->al_writ_cnt =
3045 mdev->bm_writ_cnt =
3046 mdev->read_cnt =
3047 mdev->recv_cnt =
3048 mdev->send_cnt =
3049 mdev->writ_cnt =
3050 mdev->p_size =
3051 mdev->rs_start =
3052 mdev->rs_total =
1d7734a0
LE
3053 mdev->rs_failed = 0;
3054 mdev->rs_last_events = 0;
0f0601f4 3055 mdev->rs_last_sect_ev = 0;
1d7734a0
LE
3056 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
3057 mdev->rs_mark_left[i] = 0;
3058 mdev->rs_mark_time[i] = 0;
3059 }
b411b363
PR
3060 D_ASSERT(mdev->net_conf == NULL);
3061
3062 drbd_set_my_capacity(mdev, 0);
3063 if (mdev->bitmap) {
3064 /* maybe never allocated. */
02d9a94b 3065 drbd_bm_resize(mdev, 0, 1);
b411b363
PR
3066 drbd_bm_cleanup(mdev);
3067 }
3068
3069 drbd_free_resources(mdev);
0778286a 3070 clear_bit(AL_SUSPENDED, &mdev->flags);
b411b363
PR
3071
3072 /*
3073 * currently we drbd_init_ee only on module load, so
3074 * we may do drbd_release_ee only on module unload!
3075 */
3076 D_ASSERT(list_empty(&mdev->active_ee));
3077 D_ASSERT(list_empty(&mdev->sync_ee));
3078 D_ASSERT(list_empty(&mdev->done_ee));
3079 D_ASSERT(list_empty(&mdev->read_ee));
3080 D_ASSERT(list_empty(&mdev->net_ee));
3081 D_ASSERT(list_empty(&mdev->resync_reads));
3082 D_ASSERT(list_empty(&mdev->data.work.q));
3083 D_ASSERT(list_empty(&mdev->meta.work.q));
3084 D_ASSERT(list_empty(&mdev->resync_work.list));
3085 D_ASSERT(list_empty(&mdev->unplug_work.list));
e9e6f3ec 3086 D_ASSERT(list_empty(&mdev->go_diskless.list));
2265b473
LE
3087
3088 drbd_set_defaults(mdev);
b411b363
PR
3089}
3090
3091
3092static void drbd_destroy_mempools(void)
3093{
3094 struct page *page;
3095
3096 while (drbd_pp_pool) {
3097 page = drbd_pp_pool;
3098 drbd_pp_pool = (struct page *)page_private(page);
3099 __free_page(page);
3100 drbd_pp_vacant--;
3101 }
3102
3103 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3104
3105 if (drbd_ee_mempool)
3106 mempool_destroy(drbd_ee_mempool);
3107 if (drbd_request_mempool)
3108 mempool_destroy(drbd_request_mempool);
3109 if (drbd_ee_cache)
3110 kmem_cache_destroy(drbd_ee_cache);
3111 if (drbd_request_cache)
3112 kmem_cache_destroy(drbd_request_cache);
3113 if (drbd_bm_ext_cache)
3114 kmem_cache_destroy(drbd_bm_ext_cache);
3115 if (drbd_al_ext_cache)
3116 kmem_cache_destroy(drbd_al_ext_cache);
3117
3118 drbd_ee_mempool = NULL;
3119 drbd_request_mempool = NULL;
3120 drbd_ee_cache = NULL;
3121 drbd_request_cache = NULL;
3122 drbd_bm_ext_cache = NULL;
3123 drbd_al_ext_cache = NULL;
3124
3125 return;
3126}
3127
3128static int drbd_create_mempools(void)
3129{
3130 struct page *page;
1816a2b4 3131 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
b411b363
PR
3132 int i;
3133
3134 /* prepare our caches and mempools */
3135 drbd_request_mempool = NULL;
3136 drbd_ee_cache = NULL;
3137 drbd_request_cache = NULL;
3138 drbd_bm_ext_cache = NULL;
3139 drbd_al_ext_cache = NULL;
3140 drbd_pp_pool = NULL;
3141
3142 /* caches */
3143 drbd_request_cache = kmem_cache_create(
3144 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3145 if (drbd_request_cache == NULL)
3146 goto Enomem;
3147
3148 drbd_ee_cache = kmem_cache_create(
3149 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3150 if (drbd_ee_cache == NULL)
3151 goto Enomem;
3152
3153 drbd_bm_ext_cache = kmem_cache_create(
3154 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3155 if (drbd_bm_ext_cache == NULL)
3156 goto Enomem;
3157
3158 drbd_al_ext_cache = kmem_cache_create(
3159 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3160 if (drbd_al_ext_cache == NULL)
3161 goto Enomem;
3162
3163 /* mempools */
3164 drbd_request_mempool = mempool_create(number,
3165 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3166 if (drbd_request_mempool == NULL)
3167 goto Enomem;
3168
3169 drbd_ee_mempool = mempool_create(number,
3170 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2027ae1f 3171 if (drbd_ee_mempool == NULL)
b411b363
PR
3172 goto Enomem;
3173
3174 /* drbd's page pool */
3175 spin_lock_init(&drbd_pp_lock);
3176
3177 for (i = 0; i < number; i++) {
3178 page = alloc_page(GFP_HIGHUSER);
3179 if (!page)
3180 goto Enomem;
3181 set_page_private(page, (unsigned long)drbd_pp_pool);
3182 drbd_pp_pool = page;
3183 }
3184 drbd_pp_vacant = number;
3185
3186 return 0;
3187
3188Enomem:
3189 drbd_destroy_mempools(); /* in case we allocated some */
3190 return -ENOMEM;
3191}
3192
3193static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3194 void *unused)
3195{
3196 /* just so we have it. you never know what interesting things we
3197 * might want to do here some day...
3198 */
3199
3200 return NOTIFY_DONE;
3201}
3202
3203static struct notifier_block drbd_notifier = {
3204 .notifier_call = drbd_notify_sys,
3205};
3206
3207static void drbd_release_ee_lists(struct drbd_conf *mdev)
3208{
3209 int rr;
3210
3211 rr = drbd_release_ee(mdev, &mdev->active_ee);
3212 if (rr)
3213 dev_err(DEV, "%d EEs in active list found!\n", rr);
3214
3215 rr = drbd_release_ee(mdev, &mdev->sync_ee);
3216 if (rr)
3217 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3218
3219 rr = drbd_release_ee(mdev, &mdev->read_ee);
3220 if (rr)
3221 dev_err(DEV, "%d EEs in read list found!\n", rr);
3222
3223 rr = drbd_release_ee(mdev, &mdev->done_ee);
3224 if (rr)
3225 dev_err(DEV, "%d EEs in done list found!\n", rr);
3226
3227 rr = drbd_release_ee(mdev, &mdev->net_ee);
3228 if (rr)
3229 dev_err(DEV, "%d EEs in net list found!\n", rr);
3230}
3231
3232/* caution. no locking.
3233 * currently only used from module cleanup code. */
3234static void drbd_delete_device(unsigned int minor)
3235{
3236 struct drbd_conf *mdev = minor_to_mdev(minor);
3237
3238 if (!mdev)
3239 return;
3240
3241 /* paranoia asserts */
3242 if (mdev->open_cnt != 0)
3243 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3244 __FILE__ , __LINE__);
3245
3246 ERR_IF (!list_empty(&mdev->data.work.q)) {
3247 struct list_head *lp;
3248 list_for_each(lp, &mdev->data.work.q) {
3249 dev_err(DEV, "lp = %p\n", lp);
3250 }
3251 };
3252 /* end paranoia asserts */
3253
3254 del_gendisk(mdev->vdisk);
3255
3256 /* cleanup stuff that may have been allocated during
3257 * device (re-)configuration or state changes */
3258
3259 if (mdev->this_bdev)
3260 bdput(mdev->this_bdev);
3261
3262 drbd_free_resources(mdev);
3263
3264 drbd_release_ee_lists(mdev);
3265
3266 /* should be free'd on disconnect? */
3267 kfree(mdev->ee_hash);
3268 /*
3269 mdev->ee_hash_s = 0;
3270 mdev->ee_hash = NULL;
3271 */
3272
3273 lc_destroy(mdev->act_log);
3274 lc_destroy(mdev->resync);
3275
3276 kfree(mdev->p_uuid);
3277 /* mdev->p_uuid = NULL; */
3278
3279 kfree(mdev->int_dig_out);
3280 kfree(mdev->int_dig_in);
3281 kfree(mdev->int_dig_vv);
3282
3283 /* cleanup the rest that has been
3284 * allocated from drbd_new_device
3285 * and actually free the mdev itself */
3286 drbd_free_mdev(mdev);
3287}
3288
3289static void drbd_cleanup(void)
3290{
3291 unsigned int i;
3292
3293 unregister_reboot_notifier(&drbd_notifier);
3294
17a93f30
LE
3295 /* first remove proc,
3296 * drbdsetup uses it's presence to detect
3297 * whether DRBD is loaded.
3298 * If we would get stuck in proc removal,
3299 * but have netlink already deregistered,
3300 * some drbdsetup commands may wait forever
3301 * for an answer.
3302 */
3303 if (drbd_proc)
3304 remove_proc_entry("drbd", NULL);
3305
b411b363
PR
3306 drbd_nl_cleanup();
3307
3308 if (minor_table) {
b411b363
PR
3309 i = minor_count;
3310 while (i--)
3311 drbd_delete_device(i);
3312 drbd_destroy_mempools();
3313 }
3314
3315 kfree(minor_table);
3316
3317 unregister_blkdev(DRBD_MAJOR, "drbd");
3318
3319 printk(KERN_INFO "drbd: module cleanup done.\n");
3320}
3321
3322/**
3323 * drbd_congested() - Callback for pdflush
3324 * @congested_data: User data
3325 * @bdi_bits: Bits pdflush is currently interested in
3326 *
3327 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3328 */
3329static int drbd_congested(void *congested_data, int bdi_bits)
3330{
3331 struct drbd_conf *mdev = congested_data;
3332 struct request_queue *q;
3333 char reason = '-';
3334 int r = 0;
3335
1b881ef7 3336 if (!may_inc_ap_bio(mdev)) {
b411b363
PR
3337 /* DRBD has frozen IO */
3338 r = bdi_bits;
3339 reason = 'd';
3340 goto out;
3341 }
3342
3343 if (get_ldev(mdev)) {
3344 q = bdev_get_queue(mdev->ldev->backing_bdev);
3345 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3346 put_ldev(mdev);
3347 if (r)
3348 reason = 'b';
3349 }
3350
3351 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3352 r |= (1 << BDI_async_congested);
3353 reason = reason == 'b' ? 'a' : 'n';
3354 }
3355
3356out:
3357 mdev->congestion_reason = reason;
3358 return r;
3359}
3360
3361struct drbd_conf *drbd_new_device(unsigned int minor)
3362{
3363 struct drbd_conf *mdev;
3364 struct gendisk *disk;
3365 struct request_queue *q;
3366
3367 /* GFP_KERNEL, we are outside of all write-out paths */
3368 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3369 if (!mdev)
3370 return NULL;
3371 if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3372 goto out_no_cpumask;
3373
3374 mdev->minor = minor;
3375
3376 drbd_init_set_defaults(mdev);
3377
3378 q = blk_alloc_queue(GFP_KERNEL);
3379 if (!q)
3380 goto out_no_q;
3381 mdev->rq_queue = q;
3382 q->queuedata = mdev;
b411b363
PR
3383
3384 disk = alloc_disk(1);
3385 if (!disk)
3386 goto out_no_disk;
3387 mdev->vdisk = disk;
3388
81e84650 3389 set_disk_ro(disk, true);
b411b363
PR
3390
3391 disk->queue = q;
3392 disk->major = DRBD_MAJOR;
3393 disk->first_minor = minor;
3394 disk->fops = &drbd_ops;
3395 sprintf(disk->disk_name, "drbd%d", minor);
3396 disk->private_data = mdev;
3397
3398 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3399 /* we have no partitions. we contain only ourselves. */
3400 mdev->this_bdev->bd_contains = mdev->this_bdev;
3401
3402 q->backing_dev_info.congested_fn = drbd_congested;
3403 q->backing_dev_info.congested_data = mdev;
3404
2f58dcfc 3405 blk_queue_make_request(q, drbd_make_request);
1816a2b4 3406 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9);
b411b363
PR
3407 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3408 blk_queue_merge_bvec(q, drbd_merge_bvec);
7eaceacc 3409 q->queue_lock = &mdev->req_lock;
b411b363
PR
3410
3411 mdev->md_io_page = alloc_page(GFP_KERNEL);
3412 if (!mdev->md_io_page)
3413 goto out_no_io_page;
3414
3415 if (drbd_bm_init(mdev))
3416 goto out_no_bitmap;
3417 /* no need to lock access, we are still initializing this minor device. */
3418 if (!tl_init(mdev))
3419 goto out_no_tl;
3420
3421 mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3422 if (!mdev->app_reads_hash)
3423 goto out_no_app_reads;
3424
3425 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3426 if (!mdev->current_epoch)
3427 goto out_no_epoch;
3428
3429 INIT_LIST_HEAD(&mdev->current_epoch->list);
3430 mdev->epochs = 1;
3431
3432 return mdev;
3433
3434/* out_whatever_else:
3435 kfree(mdev->current_epoch); */
3436out_no_epoch:
3437 kfree(mdev->app_reads_hash);
3438out_no_app_reads:
3439 tl_cleanup(mdev);
3440out_no_tl:
3441 drbd_bm_cleanup(mdev);
3442out_no_bitmap:
3443 __free_page(mdev->md_io_page);
3444out_no_io_page:
3445 put_disk(disk);
3446out_no_disk:
3447 blk_cleanup_queue(q);
3448out_no_q:
3449 free_cpumask_var(mdev->cpu_mask);
3450out_no_cpumask:
3451 kfree(mdev);
3452 return NULL;
3453}
3454
3455/* counterpart of drbd_new_device.
3456 * last part of drbd_delete_device. */
3457void drbd_free_mdev(struct drbd_conf *mdev)
3458{
3459 kfree(mdev->current_epoch);
3460 kfree(mdev->app_reads_hash);
3461 tl_cleanup(mdev);
3462 if (mdev->bitmap) /* should no longer be there. */
3463 drbd_bm_cleanup(mdev);
3464 __free_page(mdev->md_io_page);
3465 put_disk(mdev->vdisk);
3466 blk_cleanup_queue(mdev->rq_queue);
3467 free_cpumask_var(mdev->cpu_mask);
3719094e 3468 drbd_free_tl_hash(mdev);
b411b363
PR
3469 kfree(mdev);
3470}
3471
3472
3473int __init drbd_init(void)
3474{
3475 int err;
3476
3477 if (sizeof(struct p_handshake) != 80) {
3478 printk(KERN_ERR
3479 "drbd: never change the size or layout "
3480 "of the HandShake packet.\n");
3481 return -EINVAL;
3482 }
3483
2b8a90b5 3484 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
b411b363
PR
3485 printk(KERN_ERR
3486 "drbd: invalid minor_count (%d)\n", minor_count);
3487#ifdef MODULE
3488 return -EINVAL;
3489#else
3490 minor_count = 8;
3491#endif
3492 }
3493
3494 err = drbd_nl_init();
3495 if (err)
3496 return err;
3497
3498 err = register_blkdev(DRBD_MAJOR, "drbd");
3499 if (err) {
3500 printk(KERN_ERR
3501 "drbd: unable to register block device major %d\n",
3502 DRBD_MAJOR);
3503 return err;
3504 }
3505
3506 register_reboot_notifier(&drbd_notifier);
3507
3508 /*
3509 * allocate all necessary structs
3510 */
3511 err = -ENOMEM;
3512
3513 init_waitqueue_head(&drbd_pp_wait);
3514
3515 drbd_proc = NULL; /* play safe for drbd_cleanup */
3516 minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3517 GFP_KERNEL);
3518 if (!minor_table)
3519 goto Enomem;
3520
3521 err = drbd_create_mempools();
3522 if (err)
3523 goto Enomem;
3524
8c484ee4 3525 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
b411b363
PR
3526 if (!drbd_proc) {
3527 printk(KERN_ERR "drbd: unable to register proc file\n");
3528 goto Enomem;
3529 }
3530
3531 rwlock_init(&global_state_lock);
3532
3533 printk(KERN_INFO "drbd: initialized. "
3534 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3535 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3536 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3537 printk(KERN_INFO "drbd: registered as block device major %d\n",
3538 DRBD_MAJOR);
3539 printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3540
3541 return 0; /* Success! */
3542
3543Enomem:
3544 drbd_cleanup();
3545 if (err == -ENOMEM)
3546 /* currently always the case */
3547 printk(KERN_ERR "drbd: ran out of memory\n");
3548 else
3549 printk(KERN_ERR "drbd: initialization failure\n");
3550 return err;
3551}
3552
3553void drbd_free_bc(struct drbd_backing_dev *ldev)
3554{
3555 if (ldev == NULL)
3556 return;
3557
e525fd89
TH
3558 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3559 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
b411b363
PR
3560
3561 kfree(ldev);
3562}
3563
3564void drbd_free_sock(struct drbd_conf *mdev)
3565{
3566 if (mdev->data.socket) {
4589d7f8 3567 mutex_lock(&mdev->data.mutex);
b411b363
PR
3568 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3569 sock_release(mdev->data.socket);
3570 mdev->data.socket = NULL;
4589d7f8 3571 mutex_unlock(&mdev->data.mutex);
b411b363
PR
3572 }
3573 if (mdev->meta.socket) {
4589d7f8 3574 mutex_lock(&mdev->meta.mutex);
b411b363
PR
3575 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3576 sock_release(mdev->meta.socket);
3577 mdev->meta.socket = NULL;
4589d7f8 3578 mutex_unlock(&mdev->meta.mutex);
b411b363
PR
3579 }
3580}
3581
3582
3583void drbd_free_resources(struct drbd_conf *mdev)
3584{
3585 crypto_free_hash(mdev->csums_tfm);
3586 mdev->csums_tfm = NULL;
3587 crypto_free_hash(mdev->verify_tfm);
3588 mdev->verify_tfm = NULL;
3589 crypto_free_hash(mdev->cram_hmac_tfm);
3590 mdev->cram_hmac_tfm = NULL;
3591 crypto_free_hash(mdev->integrity_w_tfm);
3592 mdev->integrity_w_tfm = NULL;
3593 crypto_free_hash(mdev->integrity_r_tfm);
3594 mdev->integrity_r_tfm = NULL;
3595
3596 drbd_free_sock(mdev);
3597
3598 __no_warn(local,
3599 drbd_free_bc(mdev->ldev);
3600 mdev->ldev = NULL;);
3601}
3602
3603/* meta data management */
3604
3605struct meta_data_on_disk {
3606 u64 la_size; /* last agreed size. */
3607 u64 uuid[UI_SIZE]; /* UUIDs. */
3608 u64 device_uuid;
3609 u64 reserved_u64_1;
3610 u32 flags; /* MDF */
3611 u32 magic;
3612 u32 md_size_sect;
3613 u32 al_offset; /* offset to this block */
3614 u32 al_nr_extents; /* important for restoring the AL */
3615 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3616 u32 bm_offset; /* offset to the bitmap, from here */
3617 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
3618 u32 reserved_u32[4];
3619
3620} __packed;
3621
3622/**
3623 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3624 * @mdev: DRBD device.
3625 */
3626void drbd_md_sync(struct drbd_conf *mdev)
3627{
3628 struct meta_data_on_disk *buffer;
3629 sector_t sector;
3630 int i;
3631
ee15b038
LE
3632 del_timer(&mdev->md_sync_timer);
3633 /* timer may be rearmed by drbd_md_mark_dirty() now. */
b411b363
PR
3634 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3635 return;
b411b363
PR
3636
3637 /* We use here D_FAILED and not D_ATTACHING because we try to write
3638 * metadata even if we detach due to a disk failure! */
3639 if (!get_ldev_if_state(mdev, D_FAILED))
3640 return;
3641
b411b363
PR
3642 mutex_lock(&mdev->md_io_mutex);
3643 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3644 memset(buffer, 0, 512);
3645
3646 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3647 for (i = UI_CURRENT; i < UI_SIZE; i++)
3648 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3649 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3650 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3651
3652 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
3653 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
3654 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3655 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3656 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3657
3658 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3659
3660 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3661 sector = mdev->ldev->md.md_offset;
3662
3f3a9b84 3663 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
b411b363
PR
3664 /* this was a try anyways ... */
3665 dev_err(DEV, "meta data update failed!\n");
81e84650 3666 drbd_chk_io_error(mdev, 1, true);
b411b363
PR
3667 }
3668
3669 /* Update mdev->ldev->md.la_size_sect,
3670 * since we updated it on metadata. */
3671 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3672
3673 mutex_unlock(&mdev->md_io_mutex);
3674 put_ldev(mdev);
3675}
3676
3677/**
3678 * drbd_md_read() - Reads in the meta data super block
3679 * @mdev: DRBD device.
3680 * @bdev: Device from which the meta data should be read in.
3681 *
116676ca 3682 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
b411b363
PR
3683 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3684 */
3685int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3686{
3687 struct meta_data_on_disk *buffer;
3688 int i, rv = NO_ERROR;
3689
3690 if (!get_ldev_if_state(mdev, D_ATTACHING))
3691 return ERR_IO_MD_DISK;
3692
b411b363
PR
3693 mutex_lock(&mdev->md_io_mutex);
3694 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3695
3696 if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
3697 /* NOTE: cant do normal error processing here as this is
3698 called BEFORE disk is attached */
3699 dev_err(DEV, "Error while reading metadata.\n");
3700 rv = ERR_IO_MD_DISK;
3701 goto err;
3702 }
3703
3704 if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3705 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3706 rv = ERR_MD_INVALID;
3707 goto err;
3708 }
3709 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3710 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3711 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3712 rv = ERR_MD_INVALID;
3713 goto err;
3714 }
3715 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3716 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3717 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3718 rv = ERR_MD_INVALID;
3719 goto err;
3720 }
3721 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3722 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3723 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3724 rv = ERR_MD_INVALID;
3725 goto err;
3726 }
3727
3728 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3729 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3730 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3731 rv = ERR_MD_INVALID;
3732 goto err;
3733 }
3734
3735 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3736 for (i = UI_CURRENT; i < UI_SIZE; i++)
3737 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3738 bdev->md.flags = be32_to_cpu(buffer->flags);
3739 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3740 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3741
3742 if (mdev->sync_conf.al_extents < 7)
3743 mdev->sync_conf.al_extents = 127;
3744
3745 err:
3746 mutex_unlock(&mdev->md_io_mutex);
3747 put_ldev(mdev);
3748
3749 return rv;
3750}
3751
ac724121
LE
3752static void debug_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index)
3753{
3754 static char *uuid_str[UI_EXTENDED_SIZE] = {
3755 [UI_CURRENT] = "CURRENT",
3756 [UI_BITMAP] = "BITMAP",
3757 [UI_HISTORY_START] = "HISTORY_START",
3758 [UI_HISTORY_END] = "HISTORY_END",
3759 [UI_SIZE] = "SIZE",
3760 [UI_FLAGS] = "FLAGS",
3761 };
3762
3763 if (index >= UI_EXTENDED_SIZE) {
3764 dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n");
3765 return;
3766 }
3767
3768 dynamic_dev_dbg(DEV, " uuid[%s] now %016llX\n",
3769 uuid_str[index],
3770 (unsigned long long)mdev->ldev->md.uuid[index]);
3771}
3772
3773
b411b363
PR
3774/**
3775 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3776 * @mdev: DRBD device.
3777 *
3778 * Call this function if you change anything that should be written to
3779 * the meta-data super block. This function sets MD_DIRTY, and starts a
3780 * timer that ensures that within five seconds you have to call drbd_md_sync().
3781 */
ca0e6098 3782#ifdef DEBUG
ee15b038
LE
3783void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3784{
3785 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3786 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3787 mdev->last_md_mark_dirty.line = line;
3788 mdev->last_md_mark_dirty.func = func;
3789 }
3790}
3791#else
b411b363
PR
3792void drbd_md_mark_dirty(struct drbd_conf *mdev)
3793{
ee15b038 3794 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
ca0e6098 3795 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
b411b363 3796}
ee15b038 3797#endif
b411b363
PR
3798
3799static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3800{
3801 int i;
3802
ac724121 3803 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) {
b411b363 3804 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
ac724121
LE
3805 debug_drbd_uuid(mdev, i+1);
3806 }
b411b363
PR
3807}
3808
3809void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3810{
3811 if (idx == UI_CURRENT) {
3812 if (mdev->state.role == R_PRIMARY)
3813 val |= 1;
3814 else
3815 val &= ~((u64)1);
3816
3817 drbd_set_ed_uuid(mdev, val);
3818 }
3819
3820 mdev->ldev->md.uuid[idx] = val;
ac724121 3821 debug_drbd_uuid(mdev, idx);
b411b363
PR
3822 drbd_md_mark_dirty(mdev);
3823}
3824
3825
3826void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3827{
3828 if (mdev->ldev->md.uuid[idx]) {
3829 drbd_uuid_move_history(mdev);
3830 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
ac724121 3831 debug_drbd_uuid(mdev, UI_HISTORY_START);
b411b363
PR
3832 }
3833 _drbd_uuid_set(mdev, idx, val);
3834}
3835
3836/**
3837 * drbd_uuid_new_current() - Creates a new current UUID
3838 * @mdev: DRBD device.
3839 *
3840 * Creates a new current UUID, and rotates the old current UUID into
3841 * the bitmap slot. Causes an incremental resync upon next connect.
3842 */
3843void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3844{
3845 u64 val;
3846
3847 dev_info(DEV, "Creating new current UUID\n");
3848 D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
3849 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
ac724121 3850 debug_drbd_uuid(mdev, UI_BITMAP);
b411b363
PR
3851
3852 get_random_bytes(&val, sizeof(u64));
3853 _drbd_uuid_set(mdev, UI_CURRENT, val);
aaa8e2b3
LE
3854 /* get it to stable storage _now_ */
3855 drbd_md_sync(mdev);
b411b363
PR
3856}
3857
3858void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3859{
3860 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3861 return;
3862
3863 if (val == 0) {
3864 drbd_uuid_move_history(mdev);
3865 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3866 mdev->ldev->md.uuid[UI_BITMAP] = 0;
ac724121
LE
3867 debug_drbd_uuid(mdev, UI_HISTORY_START);
3868 debug_drbd_uuid(mdev, UI_BITMAP);
b411b363
PR
3869 } else {
3870 if (mdev->ldev->md.uuid[UI_BITMAP])
3871 dev_warn(DEV, "bm UUID already set");
3872
3873 mdev->ldev->md.uuid[UI_BITMAP] = val;
3874 mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
3875
ac724121 3876 debug_drbd_uuid(mdev, UI_BITMAP);
b411b363
PR
3877 }
3878 drbd_md_mark_dirty(mdev);
3879}
3880
3881/**
3882 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3883 * @mdev: DRBD device.
3884 *
3885 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3886 */
3887int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3888{
3889 int rv = -EIO;
3890
3891 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3892 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3893 drbd_md_sync(mdev);
3894 drbd_bm_set_all(mdev);
3895
3896 rv = drbd_bm_write(mdev);
3897
3898 if (!rv) {
3899 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3900 drbd_md_sync(mdev);
3901 }
3902
3903 put_ldev(mdev);
3904 }
3905
3906 return rv;
3907}
3908
3909/**
3910 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3911 * @mdev: DRBD device.
3912 *
3913 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3914 */
3915int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3916{
3917 int rv = -EIO;
3918
0778286a 3919 drbd_resume_al(mdev);
b411b363
PR
3920 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3921 drbd_bm_clear_all(mdev);
3922 rv = drbd_bm_write(mdev);
3923 put_ldev(mdev);
3924 }
3925
3926 return rv;
3927}
3928
3929static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3930{
3931 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
02851e9f 3932 int rv = -EIO;
b411b363
PR
3933
3934 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3935
02851e9f
LE
3936 if (get_ldev(mdev)) {
3937 drbd_bm_lock(mdev, work->why);
3938 rv = work->io_fn(mdev);
3939 drbd_bm_unlock(mdev);
3940 put_ldev(mdev);
3941 }
b411b363
PR
3942
3943 clear_bit(BITMAP_IO, &mdev->flags);
127b3178 3944 smp_mb__after_clear_bit();
b411b363
PR
3945 wake_up(&mdev->misc_wait);
3946
3947 if (work->done)
3948 work->done(mdev, rv);
3949
3950 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3951 work->why = NULL;
3952
3953 return 1;
3954}
3955
82f59cc6
LE
3956void drbd_ldev_destroy(struct drbd_conf *mdev)
3957{
3958 lc_destroy(mdev->resync);
3959 mdev->resync = NULL;
3960 lc_destroy(mdev->act_log);
3961 mdev->act_log = NULL;
3962 __no_warn(local,
3963 drbd_free_bc(mdev->ldev);
3964 mdev->ldev = NULL;);
3965
3966 if (mdev->md_io_tmpp) {
3967 __free_page(mdev->md_io_tmpp);
3968 mdev->md_io_tmpp = NULL;
3969 }
3970 clear_bit(GO_DISKLESS, &mdev->flags);
3971}
3972
e9e6f3ec
LE
3973static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3974{
3975 D_ASSERT(mdev->state.disk == D_FAILED);
9d282875
LE
3976 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3977 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
82f59cc6
LE
3978 * the protected members anymore, though, so once put_ldev reaches zero
3979 * again, it will be safe to free them. */
e9e6f3ec 3980 drbd_force_state(mdev, NS(disk, D_DISKLESS));
e9e6f3ec
LE
3981 return 1;
3982}
3983
3984void drbd_go_diskless(struct drbd_conf *mdev)
3985{
3986 D_ASSERT(mdev->state.disk == D_FAILED);
3987 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
9d282875 3988 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
e9e6f3ec
LE
3989}
3990
b411b363
PR
3991/**
3992 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3993 * @mdev: DRBD device.
3994 * @io_fn: IO callback to be called when bitmap IO is possible
3995 * @done: callback to be called after the bitmap IO was performed
3996 * @why: Descriptive text of the reason for doing the IO
3997 *
3998 * While IO on the bitmap happens we freeze application IO thus we ensure
3999 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
4000 * called from worker context. It MUST NOT be used while a previous such
4001 * work is still pending!
4002 */
4003void drbd_queue_bitmap_io(struct drbd_conf *mdev,
4004 int (*io_fn)(struct drbd_conf *),
4005 void (*done)(struct drbd_conf *, int),
4006 char *why)
4007{
4008 D_ASSERT(current == mdev->worker.task);
4009
4010 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
4011 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
4012 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
4013 if (mdev->bm_io_work.why)
4014 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
4015 why, mdev->bm_io_work.why);
4016
4017 mdev->bm_io_work.io_fn = io_fn;
4018 mdev->bm_io_work.done = done;
4019 mdev->bm_io_work.why = why;
4020
22afd7ee 4021 spin_lock_irq(&mdev->req_lock);
b411b363
PR
4022 set_bit(BITMAP_IO, &mdev->flags);
4023 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
127b3178 4024 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
b411b363 4025 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
b411b363 4026 }
22afd7ee 4027 spin_unlock_irq(&mdev->req_lock);
b411b363
PR
4028}
4029
4030/**
4031 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
4032 * @mdev: DRBD device.
4033 * @io_fn: IO callback to be called when bitmap IO is possible
4034 * @why: Descriptive text of the reason for doing the IO
4035 *
4036 * freezes application IO while that the actual IO operations runs. This
4037 * functions MAY NOT be called from worker context.
4038 */
4039int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
4040{
4041 int rv;
4042
4043 D_ASSERT(current != mdev->worker.task);
4044
4045 drbd_suspend_io(mdev);
4046
4047 drbd_bm_lock(mdev, why);
4048 rv = io_fn(mdev);
4049 drbd_bm_unlock(mdev);
4050
4051 drbd_resume_io(mdev);
4052
4053 return rv;
4054}
4055
4056void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4057{
4058 if ((mdev->ldev->md.flags & flag) != flag) {
4059 drbd_md_mark_dirty(mdev);
4060 mdev->ldev->md.flags |= flag;
4061 }
4062}
4063
4064void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4065{
4066 if ((mdev->ldev->md.flags & flag) != 0) {
4067 drbd_md_mark_dirty(mdev);
4068 mdev->ldev->md.flags &= ~flag;
4069 }
4070}
4071int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4072{
4073 return (bdev->md.flags & flag) != 0;
4074}
4075
4076static void md_sync_timer_fn(unsigned long data)
4077{
4078 struct drbd_conf *mdev = (struct drbd_conf *) data;
4079
4080 drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4081}
4082
4083static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4084{
4085 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
ee15b038
LE
4086#ifdef DEBUG
4087 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4088 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4089#endif
b411b363 4090 drbd_md_sync(mdev);
b411b363
PR
4091 return 1;
4092}
4093
4094#ifdef CONFIG_DRBD_FAULT_INJECTION
4095/* Fault insertion support including random number generator shamelessly
4096 * stolen from kernel/rcutorture.c */
4097struct fault_random_state {
4098 unsigned long state;
4099 unsigned long count;
4100};
4101
4102#define FAULT_RANDOM_MULT 39916801 /* prime */
4103#define FAULT_RANDOM_ADD 479001701 /* prime */
4104#define FAULT_RANDOM_REFRESH 10000
4105
4106/*
4107 * Crude but fast random-number generator. Uses a linear congruential
4108 * generator, with occasional help from get_random_bytes().
4109 */
4110static unsigned long
4111_drbd_fault_random(struct fault_random_state *rsp)
4112{
4113 long refresh;
4114
49829ea7 4115 if (!rsp->count--) {
b411b363
PR
4116 get_random_bytes(&refresh, sizeof(refresh));
4117 rsp->state += refresh;
4118 rsp->count = FAULT_RANDOM_REFRESH;
4119 }
4120 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4121 return swahw32(rsp->state);
4122}
4123
4124static char *
4125_drbd_fault_str(unsigned int type) {
4126 static char *_faults[] = {
4127 [DRBD_FAULT_MD_WR] = "Meta-data write",
4128 [DRBD_FAULT_MD_RD] = "Meta-data read",
4129 [DRBD_FAULT_RS_WR] = "Resync write",
4130 [DRBD_FAULT_RS_RD] = "Resync read",
4131 [DRBD_FAULT_DT_WR] = "Data write",
4132 [DRBD_FAULT_DT_RD] = "Data read",
4133 [DRBD_FAULT_DT_RA] = "Data read ahead",
4134 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
6b4388ac
PR
4135 [DRBD_FAULT_AL_EE] = "EE allocation",
4136 [DRBD_FAULT_RECEIVE] = "receive data corruption",
b411b363
PR
4137 };
4138
4139 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4140}
4141
4142unsigned int
4143_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4144{
4145 static struct fault_random_state rrs = {0, 0};
4146
4147 unsigned int ret = (
4148 (fault_devs == 0 ||
4149 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4150 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4151
4152 if (ret) {
4153 fault_count++;
4154
7383506c 4155 if (__ratelimit(&drbd_ratelimit_state))
b411b363
PR
4156 dev_warn(DEV, "***Simulating %s failure\n",
4157 _drbd_fault_str(type));
4158 }
4159
4160 return ret;
4161}
4162#endif
4163
4164const char *drbd_buildtag(void)
4165{
4166 /* DRBD built from external sources has here a reference to the
4167 git hash of the source code. */
4168
4169 static char buildtag[38] = "\0uilt-in";
4170
4171 if (buildtag[0] == 0) {
4172#ifdef CONFIG_MODULES
4173 if (THIS_MODULE != NULL)
4174 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4175 else
4176#endif
4177 buildtag[0] = 'b';
4178 }
4179
4180 return buildtag;
4181}
4182
4183module_init(drbd_init)
4184module_exit(drbd_cleanup)
4185
b411b363
PR
4186EXPORT_SYMBOL(drbd_conn_str);
4187EXPORT_SYMBOL(drbd_role_str);
4188EXPORT_SYMBOL(drbd_disk_str);
4189EXPORT_SYMBOL(drbd_set_st_err_str);