4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
57 static int drbd_do_handshake(struct drbd_conf
*mdev
);
58 static int drbd_do_auth(struct drbd_conf
*mdev
);
60 static enum finish_epoch
drbd_may_finish_epoch(struct drbd_conf
*, struct drbd_epoch
*, enum epoch_event
);
61 static int e_end_block(struct drbd_conf
*, struct drbd_work
*, int);
64 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
71 /* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
75 static struct page
*page_chain_del(struct page
**head
, int n
)
89 tmp
= page_chain_next(page
);
91 break; /* found sufficient pages */
93 /* insufficient pages, don't use any of them. */
98 /* add end of list marker for the returned list */
99 set_page_private(page
, 0);
100 /* actual return value, and adjustment of head */
106 /* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109 static struct page
*page_chain_tail(struct page
*page
, int *len
)
113 while ((tmp
= page_chain_next(page
)))
120 static int page_chain_free(struct page
*page
)
124 page_chain_for_each_safe(page
, tmp
) {
131 static void page_chain_add(struct page
**head
,
132 struct page
*chain_first
, struct page
*chain_last
)
136 tmp
= page_chain_tail(chain_first
, NULL
);
137 BUG_ON(tmp
!= chain_last
);
140 /* add chain to head */
141 set_page_private(chain_last
, (unsigned long)*head
);
145 static struct page
*drbd_pp_first_pages_or_try_alloc(struct drbd_conf
*mdev
, int number
)
147 struct page
*page
= NULL
;
148 struct page
*tmp
= NULL
;
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
153 if (drbd_pp_vacant
>= number
) {
154 spin_lock(&drbd_pp_lock
);
155 page
= page_chain_del(&drbd_pp_pool
, number
);
157 drbd_pp_vacant
-= number
;
158 spin_unlock(&drbd_pp_lock
);
163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
166 for (i
= 0; i
< number
; i
++) {
167 tmp
= alloc_page(GFP_TRY
);
170 set_page_private(tmp
, (unsigned long)page
);
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
181 tmp
= page_chain_tail(page
, NULL
);
182 spin_lock(&drbd_pp_lock
);
183 page_chain_add(&drbd_pp_pool
, page
, tmp
);
185 spin_unlock(&drbd_pp_lock
);
190 static void reclaim_net_ee(struct drbd_conf
*mdev
, struct list_head
*to_be_freed
)
192 struct drbd_epoch_entry
*e
;
193 struct list_head
*le
, *tle
;
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
200 list_for_each_safe(le
, tle
, &mdev
->net_ee
) {
201 e
= list_entry(le
, struct drbd_epoch_entry
, w
.list
);
202 if (drbd_ee_has_active_page(e
))
204 list_move(le
, to_be_freed
);
208 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf
*mdev
)
210 LIST_HEAD(reclaimed
);
211 struct drbd_epoch_entry
*e
, *t
;
213 spin_lock_irq(&mdev
->req_lock
);
214 reclaim_net_ee(mdev
, &reclaimed
);
215 spin_unlock_irq(&mdev
->req_lock
);
217 list_for_each_entry_safe(e
, t
, &reclaimed
, w
.list
)
218 drbd_free_net_ee(mdev
, e
);
222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
223 * @mdev: DRBD device.
224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
231 * Returns a page chain linked via page->private.
233 static struct page
*drbd_pp_alloc(struct drbd_conf
*mdev
, unsigned number
, bool retry
)
235 struct page
*page
= NULL
;
238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev
->pp_in_use
) < mdev
->net_conf
->max_buffers
)
241 page
= drbd_pp_first_pages_or_try_alloc(mdev
, number
);
243 while (page
== NULL
) {
244 prepare_to_wait(&drbd_pp_wait
, &wait
, TASK_INTERRUPTIBLE
);
246 drbd_kick_lo_and_reclaim_net(mdev
);
248 if (atomic_read(&mdev
->pp_in_use
) < mdev
->net_conf
->max_buffers
) {
249 page
= drbd_pp_first_pages_or_try_alloc(mdev
, number
);
257 if (signal_pending(current
)) {
258 dev_warn(DEV
, "drbd_pp_alloc interrupted!\n");
264 finish_wait(&drbd_pp_wait
, &wait
);
267 atomic_add(number
, &mdev
->pp_in_use
);
271 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
275 static void drbd_pp_free(struct drbd_conf
*mdev
, struct page
*page
, int is_net
)
277 atomic_t
*a
= is_net
? &mdev
->pp_in_use_by_net
: &mdev
->pp_in_use
;
280 if (drbd_pp_vacant
> (DRBD_MAX_BIO_SIZE
/PAGE_SIZE
)*minor_count
)
281 i
= page_chain_free(page
);
284 tmp
= page_chain_tail(page
, &i
);
285 spin_lock(&drbd_pp_lock
);
286 page_chain_add(&drbd_pp_pool
, page
, tmp
);
288 spin_unlock(&drbd_pp_lock
);
290 i
= atomic_sub_return(i
, a
);
292 dev_warn(DEV
, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net
? "pp_in_use_by_net" : "pp_in_use", i
);
294 wake_up(&drbd_pp_wait
);
298 You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
301 You must not have the req_lock:
307 drbd_process_done_ee()
309 drbd_wait_ee_list_empty()
312 struct drbd_epoch_entry
*drbd_alloc_ee(struct drbd_conf
*mdev
,
315 unsigned int data_size
,
316 gfp_t gfp_mask
) __must_hold(local
)
318 struct drbd_epoch_entry
*e
;
320 unsigned nr_pages
= (data_size
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
322 if (drbd_insert_fault(mdev
, DRBD_FAULT_AL_EE
))
325 e
= mempool_alloc(drbd_ee_mempool
, gfp_mask
& ~__GFP_HIGHMEM
);
327 if (!(gfp_mask
& __GFP_NOWARN
))
328 dev_err(DEV
, "alloc_ee: Allocation of an EE failed\n");
332 page
= drbd_pp_alloc(mdev
, nr_pages
, (gfp_mask
& __GFP_WAIT
));
336 INIT_HLIST_NODE(&e
->colision
);
340 atomic_set(&e
->pending_bios
, 0);
349 mempool_free(e
, drbd_ee_mempool
);
353 void drbd_free_some_ee(struct drbd_conf
*mdev
, struct drbd_epoch_entry
*e
, int is_net
)
355 if (e
->flags
& EE_HAS_DIGEST
)
357 drbd_pp_free(mdev
, e
->pages
, is_net
);
358 D_ASSERT(atomic_read(&e
->pending_bios
) == 0);
359 D_ASSERT(hlist_unhashed(&e
->colision
));
360 mempool_free(e
, drbd_ee_mempool
);
363 int drbd_release_ee(struct drbd_conf
*mdev
, struct list_head
*list
)
365 LIST_HEAD(work_list
);
366 struct drbd_epoch_entry
*e
, *t
;
368 int is_net
= list
== &mdev
->net_ee
;
370 spin_lock_irq(&mdev
->req_lock
);
371 list_splice_init(list
, &work_list
);
372 spin_unlock_irq(&mdev
->req_lock
);
374 list_for_each_entry_safe(e
, t
, &work_list
, w
.list
) {
375 drbd_free_some_ee(mdev
, e
, is_net
);
383 * This function is called from _asender only_
384 * but see also comments in _req_mod(,barrier_acked)
385 * and receive_Barrier.
387 * Move entries from net_ee to done_ee, if ready.
388 * Grab done_ee, call all callbacks, free the entries.
389 * The callbacks typically send out ACKs.
391 static int drbd_process_done_ee(struct drbd_conf
*mdev
)
393 LIST_HEAD(work_list
);
394 LIST_HEAD(reclaimed
);
395 struct drbd_epoch_entry
*e
, *t
;
396 int ok
= (mdev
->state
.conn
>= C_WF_REPORT_PARAMS
);
398 spin_lock_irq(&mdev
->req_lock
);
399 reclaim_net_ee(mdev
, &reclaimed
);
400 list_splice_init(&mdev
->done_ee
, &work_list
);
401 spin_unlock_irq(&mdev
->req_lock
);
403 list_for_each_entry_safe(e
, t
, &reclaimed
, w
.list
)
404 drbd_free_net_ee(mdev
, e
);
406 /* possible callbacks here:
407 * e_end_block, and e_end_resync_block, e_send_discard_ack.
408 * all ignore the last argument.
410 list_for_each_entry_safe(e
, t
, &work_list
, w
.list
) {
411 /* list_del not necessary, next/prev members not touched */
412 ok
= e
->w
.cb(mdev
, &e
->w
, !ok
) && ok
;
413 drbd_free_ee(mdev
, e
);
415 wake_up(&mdev
->ee_wait
);
420 void _drbd_wait_ee_list_empty(struct drbd_conf
*mdev
, struct list_head
*head
)
424 /* avoids spin_lock/unlock
425 * and calling prepare_to_wait in the fast path */
426 while (!list_empty(head
)) {
427 prepare_to_wait(&mdev
->ee_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
428 spin_unlock_irq(&mdev
->req_lock
);
430 finish_wait(&mdev
->ee_wait
, &wait
);
431 spin_lock_irq(&mdev
->req_lock
);
435 void drbd_wait_ee_list_empty(struct drbd_conf
*mdev
, struct list_head
*head
)
437 spin_lock_irq(&mdev
->req_lock
);
438 _drbd_wait_ee_list_empty(mdev
, head
);
439 spin_unlock_irq(&mdev
->req_lock
);
442 /* see also kernel_accept; which is only present since 2.6.18.
443 * also we want to log which part of it failed, exactly */
444 static int drbd_accept(struct drbd_conf
*mdev
, const char **what
,
445 struct socket
*sock
, struct socket
**newsock
)
447 struct sock
*sk
= sock
->sk
;
451 err
= sock
->ops
->listen(sock
, 5);
455 *what
= "sock_create_lite";
456 err
= sock_create_lite(sk
->sk_family
, sk
->sk_type
, sk
->sk_protocol
,
462 err
= sock
->ops
->accept(sock
, *newsock
, 0);
464 sock_release(*newsock
);
468 (*newsock
)->ops
= sock
->ops
;
474 static int drbd_recv_short(struct drbd_conf
*mdev
, struct socket
*sock
,
475 void *buf
, size_t size
, int flags
)
482 struct msghdr msg
= {
484 .msg_iov
= (struct iovec
*)&iov
,
485 .msg_flags
= (flags
? flags
: MSG_WAITALL
| MSG_NOSIGNAL
)
491 rv
= sock_recvmsg(sock
, &msg
, size
, msg
.msg_flags
);
497 static int drbd_recv(struct drbd_conf
*mdev
, void *buf
, size_t size
)
504 struct msghdr msg
= {
506 .msg_iov
= (struct iovec
*)&iov
,
507 .msg_flags
= MSG_WAITALL
| MSG_NOSIGNAL
515 rv
= sock_recvmsg(mdev
->data
.socket
, &msg
, size
, msg
.msg_flags
);
520 * ECONNRESET other side closed the connection
521 * ERESTARTSYS (on sock) we got a signal
525 if (rv
== -ECONNRESET
)
526 dev_info(DEV
, "sock was reset by peer\n");
527 else if (rv
!= -ERESTARTSYS
)
528 dev_err(DEV
, "sock_recvmsg returned %d\n", rv
);
530 } else if (rv
== 0) {
531 dev_info(DEV
, "sock was shut down by peer\n");
534 /* signal came in, or peer/link went down,
535 * after we read a partial message
537 /* D_ASSERT(signal_pending(current)); */
545 drbd_force_state(mdev
, NS(conn
, C_BROKEN_PIPE
));
551 * On individual connections, the socket buffer size must be set prior to the
552 * listen(2) or connect(2) calls in order to have it take effect.
553 * This is our wrapper to do so.
555 static void drbd_setbufsize(struct socket
*sock
, unsigned int snd
,
558 /* open coded SO_SNDBUF, SO_RCVBUF */
560 sock
->sk
->sk_sndbuf
= snd
;
561 sock
->sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
;
564 sock
->sk
->sk_rcvbuf
= rcv
;
565 sock
->sk
->sk_userlocks
|= SOCK_RCVBUF_LOCK
;
569 static struct socket
*drbd_try_connect(struct drbd_conf
*mdev
)
573 struct sockaddr_in6 src_in6
;
575 int disconnect_on_error
= 1;
577 if (!get_net_conf(mdev
))
580 what
= "sock_create_kern";
581 err
= sock_create_kern(((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
,
582 SOCK_STREAM
, IPPROTO_TCP
, &sock
);
588 sock
->sk
->sk_rcvtimeo
=
589 sock
->sk
->sk_sndtimeo
= mdev
->net_conf
->try_connect_int
*HZ
;
590 drbd_setbufsize(sock
, mdev
->net_conf
->sndbuf_size
,
591 mdev
->net_conf
->rcvbuf_size
);
593 /* explicitly bind to the configured IP as source IP
594 * for the outgoing connections.
595 * This is needed for multihomed hosts and to be
596 * able to use lo: interfaces for drbd.
597 * Make sure to use 0 as port number, so linux selects
598 * a free one dynamically.
600 memcpy(&src_in6
, mdev
->net_conf
->my_addr
,
601 min_t(int, mdev
->net_conf
->my_addr_len
, sizeof(src_in6
)));
602 if (((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
== AF_INET6
)
603 src_in6
.sin6_port
= 0;
605 ((struct sockaddr_in
*)&src_in6
)->sin_port
= 0; /* AF_INET & AF_SCI */
607 what
= "bind before connect";
608 err
= sock
->ops
->bind(sock
,
609 (struct sockaddr
*) &src_in6
,
610 mdev
->net_conf
->my_addr_len
);
614 /* connect may fail, peer not yet available.
615 * stay C_WF_CONNECTION, don't go Disconnecting! */
616 disconnect_on_error
= 0;
618 err
= sock
->ops
->connect(sock
,
619 (struct sockaddr
*)mdev
->net_conf
->peer_addr
,
620 mdev
->net_conf
->peer_addr_len
, 0);
629 /* timeout, busy, signal pending */
630 case ETIMEDOUT
: case EAGAIN
: case EINPROGRESS
:
631 case EINTR
: case ERESTARTSYS
:
632 /* peer not (yet) available, network problem */
633 case ECONNREFUSED
: case ENETUNREACH
:
634 case EHOSTDOWN
: case EHOSTUNREACH
:
635 disconnect_on_error
= 0;
638 dev_err(DEV
, "%s failed, err = %d\n", what
, err
);
640 if (disconnect_on_error
)
641 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
647 static struct socket
*drbd_wait_for_connect(struct drbd_conf
*mdev
)
650 struct socket
*s_estab
= NULL
, *s_listen
;
653 if (!get_net_conf(mdev
))
656 what
= "sock_create_kern";
657 err
= sock_create_kern(((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
,
658 SOCK_STREAM
, IPPROTO_TCP
, &s_listen
);
664 timeo
= mdev
->net_conf
->try_connect_int
* HZ
;
665 timeo
+= (random32() & 1) ? timeo
/ 7 : -timeo
/ 7; /* 28.5% random jitter */
667 s_listen
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
668 s_listen
->sk
->sk_rcvtimeo
= timeo
;
669 s_listen
->sk
->sk_sndtimeo
= timeo
;
670 drbd_setbufsize(s_listen
, mdev
->net_conf
->sndbuf_size
,
671 mdev
->net_conf
->rcvbuf_size
);
673 what
= "bind before listen";
674 err
= s_listen
->ops
->bind(s_listen
,
675 (struct sockaddr
*) mdev
->net_conf
->my_addr
,
676 mdev
->net_conf
->my_addr_len
);
680 err
= drbd_accept(mdev
, &what
, s_listen
, &s_estab
);
684 sock_release(s_listen
);
686 if (err
!= -EAGAIN
&& err
!= -EINTR
&& err
!= -ERESTARTSYS
) {
687 dev_err(DEV
, "%s failed, err = %d\n", what
, err
);
688 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
696 static int drbd_send_fp(struct drbd_conf
*mdev
,
697 struct socket
*sock
, enum drbd_packets cmd
)
699 struct p_header80
*h
= &mdev
->data
.sbuf
.header
.h80
;
701 return _drbd_send_cmd(mdev
, sock
, cmd
, h
, sizeof(*h
), 0);
704 static enum drbd_packets
drbd_recv_fp(struct drbd_conf
*mdev
, struct socket
*sock
)
706 struct p_header80
*h
= &mdev
->data
.rbuf
.header
.h80
;
709 rr
= drbd_recv_short(mdev
, sock
, h
, sizeof(*h
), 0);
711 if (rr
== sizeof(*h
) && h
->magic
== BE_DRBD_MAGIC
)
712 return be16_to_cpu(h
->command
);
718 * drbd_socket_okay() - Free the socket if its connection is not okay
719 * @mdev: DRBD device.
720 * @sock: pointer to the pointer to the socket.
722 static int drbd_socket_okay(struct drbd_conf
*mdev
, struct socket
**sock
)
730 rr
= drbd_recv_short(mdev
, *sock
, tb
, 4, MSG_DONTWAIT
| MSG_PEEK
);
732 if (rr
> 0 || rr
== -EAGAIN
) {
743 * 1 yes, we have a valid connection
744 * 0 oops, did not work out, please try again
745 * -1 peer talks different language,
746 * no point in trying again, please go standalone.
747 * -2 We do not have a network config...
749 static int drbd_connect(struct drbd_conf
*mdev
)
751 struct socket
*s
, *sock
, *msock
;
754 D_ASSERT(!mdev
->data
.socket
);
756 if (drbd_request_state(mdev
, NS(conn
, C_WF_CONNECTION
)) < SS_SUCCESS
)
759 clear_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
766 /* 3 tries, this should take less than a second! */
767 s
= drbd_try_connect(mdev
);
770 /* give the other side time to call bind() & listen() */
771 __set_current_state(TASK_INTERRUPTIBLE
);
772 schedule_timeout(HZ
/ 10);
777 drbd_send_fp(mdev
, s
, P_HAND_SHAKE_S
);
781 drbd_send_fp(mdev
, s
, P_HAND_SHAKE_M
);
785 dev_err(DEV
, "Logic error in drbd_connect()\n");
786 goto out_release_sockets
;
791 __set_current_state(TASK_INTERRUPTIBLE
);
792 schedule_timeout(HZ
/ 10);
793 ok
= drbd_socket_okay(mdev
, &sock
);
794 ok
= drbd_socket_okay(mdev
, &msock
) && ok
;
800 s
= drbd_wait_for_connect(mdev
);
802 try = drbd_recv_fp(mdev
, s
);
803 drbd_socket_okay(mdev
, &sock
);
804 drbd_socket_okay(mdev
, &msock
);
808 dev_warn(DEV
, "initial packet S crossed\n");
815 dev_warn(DEV
, "initial packet M crossed\n");
819 set_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
822 dev_warn(DEV
, "Error receiving initial packet\n");
829 if (mdev
->state
.conn
<= C_DISCONNECTING
)
830 goto out_release_sockets
;
831 if (signal_pending(current
)) {
832 flush_signals(current
);
834 if (get_t_state(&mdev
->receiver
) == Exiting
)
835 goto out_release_sockets
;
839 ok
= drbd_socket_okay(mdev
, &sock
);
840 ok
= drbd_socket_okay(mdev
, &msock
) && ok
;
846 msock
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
847 sock
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
849 sock
->sk
->sk_allocation
= GFP_NOIO
;
850 msock
->sk
->sk_allocation
= GFP_NOIO
;
852 sock
->sk
->sk_priority
= TC_PRIO_INTERACTIVE_BULK
;
853 msock
->sk
->sk_priority
= TC_PRIO_INTERACTIVE
;
856 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
857 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
858 * first set it to the P_HAND_SHAKE timeout,
859 * which we set to 4x the configured ping_timeout. */
860 sock
->sk
->sk_sndtimeo
=
861 sock
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_timeo
*4*HZ
/10;
863 msock
->sk
->sk_sndtimeo
= mdev
->net_conf
->timeout
*HZ
/10;
864 msock
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_int
*HZ
;
866 /* we don't want delays.
867 * we use TCP_CORK where apropriate, though */
868 drbd_tcp_nodelay(sock
);
869 drbd_tcp_nodelay(msock
);
871 mdev
->data
.socket
= sock
;
872 mdev
->meta
.socket
= msock
;
873 mdev
->last_received
= jiffies
;
875 D_ASSERT(mdev
->asender
.task
== NULL
);
877 h
= drbd_do_handshake(mdev
);
881 if (mdev
->cram_hmac_tfm
) {
882 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
883 switch (drbd_do_auth(mdev
)) {
885 dev_err(DEV
, "Authentication of peer failed\n");
888 dev_err(DEV
, "Authentication of peer failed, trying again.\n");
893 if (drbd_request_state(mdev
, NS(conn
, C_WF_REPORT_PARAMS
)) < SS_SUCCESS
)
896 sock
->sk
->sk_sndtimeo
= mdev
->net_conf
->timeout
*HZ
/10;
897 sock
->sk
->sk_rcvtimeo
= MAX_SCHEDULE_TIMEOUT
;
899 atomic_set(&mdev
->packet_seq
, 0);
902 drbd_thread_start(&mdev
->asender
);
904 if (mdev
->agreed_pro_version
< 95 && get_ldev(mdev
)) {
905 drbd_setup_queue_param(mdev
, DRBD_MAX_SIZE_H80_PACKET
);
909 if (!drbd_send_protocol(mdev
))
911 drbd_send_sync_param(mdev
, &mdev
->sync_conf
);
912 drbd_send_sizes(mdev
, 0, 0);
913 drbd_send_uuids(mdev
);
914 drbd_send_state(mdev
);
915 clear_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
916 clear_bit(RESIZE_PENDING
, &mdev
->flags
);
928 static int drbd_recv_header(struct drbd_conf
*mdev
, enum drbd_packets
*cmd
, unsigned int *packet_size
)
930 union p_header
*h
= &mdev
->data
.rbuf
.header
;
933 r
= drbd_recv(mdev
, h
, sizeof(*h
));
934 if (unlikely(r
!= sizeof(*h
))) {
935 dev_err(DEV
, "short read expecting header on sock: r=%d\n", r
);
939 if (likely(h
->h80
.magic
== BE_DRBD_MAGIC
)) {
940 *cmd
= be16_to_cpu(h
->h80
.command
);
941 *packet_size
= be16_to_cpu(h
->h80
.length
);
942 } else if (h
->h95
.magic
== BE_DRBD_MAGIC_BIG
) {
943 *cmd
= be16_to_cpu(h
->h95
.command
);
944 *packet_size
= be32_to_cpu(h
->h95
.length
);
946 dev_err(DEV
, "magic?? on data m: 0x%08x c: %d l: %d\n",
947 be32_to_cpu(h
->h80
.magic
),
948 be16_to_cpu(h
->h80
.command
),
949 be16_to_cpu(h
->h80
.length
));
952 mdev
->last_received
= jiffies
;
957 static void drbd_flush(struct drbd_conf
*mdev
)
961 if (mdev
->write_ordering
>= WO_bdev_flush
&& get_ldev(mdev
)) {
962 rv
= blkdev_issue_flush(mdev
->ldev
->backing_bdev
, GFP_KERNEL
,
965 dev_err(DEV
, "local disk flush failed with status %d\n", rv
);
966 /* would rather check on EOPNOTSUPP, but that is not reliable.
967 * don't try again for ANY return value != 0
968 * if (rv == -EOPNOTSUPP) */
969 drbd_bump_write_ordering(mdev
, WO_drain_io
);
976 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
977 * @mdev: DRBD device.
978 * @epoch: Epoch object.
981 static enum finish_epoch
drbd_may_finish_epoch(struct drbd_conf
*mdev
,
982 struct drbd_epoch
*epoch
,
986 struct drbd_epoch
*next_epoch
;
987 enum finish_epoch rv
= FE_STILL_LIVE
;
989 spin_lock(&mdev
->epoch_lock
);
993 epoch_size
= atomic_read(&epoch
->epoch_size
);
995 switch (ev
& ~EV_CLEANUP
) {
997 atomic_dec(&epoch
->active
);
999 case EV_GOT_BARRIER_NR
:
1000 set_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
);
1002 case EV_BECAME_LAST
:
1007 if (epoch_size
!= 0 &&
1008 atomic_read(&epoch
->active
) == 0 &&
1009 test_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
)) {
1010 if (!(ev
& EV_CLEANUP
)) {
1011 spin_unlock(&mdev
->epoch_lock
);
1012 drbd_send_b_ack(mdev
, epoch
->barrier_nr
, epoch_size
);
1013 spin_lock(&mdev
->epoch_lock
);
1017 if (mdev
->current_epoch
!= epoch
) {
1018 next_epoch
= list_entry(epoch
->list
.next
, struct drbd_epoch
, list
);
1019 list_del(&epoch
->list
);
1020 ev
= EV_BECAME_LAST
| (ev
& EV_CLEANUP
);
1024 if (rv
== FE_STILL_LIVE
)
1028 atomic_set(&epoch
->epoch_size
, 0);
1029 /* atomic_set(&epoch->active, 0); is already zero */
1030 if (rv
== FE_STILL_LIVE
)
1032 wake_up(&mdev
->ee_wait
);
1042 spin_unlock(&mdev
->epoch_lock
);
1048 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1049 * @mdev: DRBD device.
1050 * @wo: Write ordering method to try.
1052 void drbd_bump_write_ordering(struct drbd_conf
*mdev
, enum write_ordering_e wo
) __must_hold(local
)
1054 enum write_ordering_e pwo
;
1055 static char *write_ordering_str
[] = {
1057 [WO_drain_io
] = "drain",
1058 [WO_bdev_flush
] = "flush",
1061 pwo
= mdev
->write_ordering
;
1063 if (wo
== WO_bdev_flush
&& mdev
->ldev
->dc
.no_disk_flush
)
1065 if (wo
== WO_drain_io
&& mdev
->ldev
->dc
.no_disk_drain
)
1067 mdev
->write_ordering
= wo
;
1068 if (pwo
!= mdev
->write_ordering
|| wo
== WO_bdev_flush
)
1069 dev_info(DEV
, "Method to ensure write ordering: %s\n", write_ordering_str
[mdev
->write_ordering
]);
1074 * @mdev: DRBD device.
1076 * @rw: flag field, see bio->bi_rw
1078 /* TODO allocate from our own bio_set. */
1079 int drbd_submit_ee(struct drbd_conf
*mdev
, struct drbd_epoch_entry
*e
,
1080 const unsigned rw
, const int fault_type
)
1082 struct bio
*bios
= NULL
;
1084 struct page
*page
= e
->pages
;
1085 sector_t sector
= e
->sector
;
1086 unsigned ds
= e
->size
;
1087 unsigned n_bios
= 0;
1088 unsigned nr_pages
= (ds
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
1090 /* In most cases, we will only need one bio. But in case the lower
1091 * level restrictions happen to be different at this offset on this
1092 * side than those of the sending peer, we may need to submit the
1093 * request in more than one bio. */
1095 bio
= bio_alloc(GFP_NOIO
, nr_pages
);
1097 dev_err(DEV
, "submit_ee: Allocation of a bio failed\n");
1100 /* > e->sector, unless this is the first bio */
1101 bio
->bi_sector
= sector
;
1102 bio
->bi_bdev
= mdev
->ldev
->backing_bdev
;
1104 bio
->bi_private
= e
;
1105 bio
->bi_end_io
= drbd_endio_sec
;
1107 bio
->bi_next
= bios
;
1111 page_chain_for_each(page
) {
1112 unsigned len
= min_t(unsigned, ds
, PAGE_SIZE
);
1113 if (!bio_add_page(bio
, page
, len
, 0)) {
1114 /* a single page must always be possible! */
1115 BUG_ON(bio
->bi_vcnt
== 0);
1122 D_ASSERT(page
== NULL
);
1125 atomic_set(&e
->pending_bios
, n_bios
);
1128 bios
= bios
->bi_next
;
1129 bio
->bi_next
= NULL
;
1131 drbd_generic_make_request(mdev
, fault_type
, bio
);
1138 bios
= bios
->bi_next
;
1144 static int receive_Barrier(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1147 struct p_barrier
*p
= &mdev
->data
.rbuf
.barrier
;
1148 struct drbd_epoch
*epoch
;
1152 mdev
->current_epoch
->barrier_nr
= p
->barrier
;
1153 rv
= drbd_may_finish_epoch(mdev
, mdev
->current_epoch
, EV_GOT_BARRIER_NR
);
1155 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1156 * the activity log, which means it would not be resynced in case the
1157 * R_PRIMARY crashes now.
1158 * Therefore we must send the barrier_ack after the barrier request was
1160 switch (mdev
->write_ordering
) {
1162 if (rv
== FE_RECYCLED
)
1165 /* receiver context, in the writeout path of the other node.
1166 * avoid potential distributed deadlock */
1167 epoch
= kmalloc(sizeof(struct drbd_epoch
), GFP_NOIO
);
1171 dev_warn(DEV
, "Allocation of an epoch failed, slowing down\n");
1176 drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
1179 if (atomic_read(&mdev
->current_epoch
->epoch_size
)) {
1180 epoch
= kmalloc(sizeof(struct drbd_epoch
), GFP_NOIO
);
1185 epoch
= mdev
->current_epoch
;
1186 wait_event(mdev
->ee_wait
, atomic_read(&epoch
->epoch_size
) == 0);
1188 D_ASSERT(atomic_read(&epoch
->active
) == 0);
1189 D_ASSERT(epoch
->flags
== 0);
1193 dev_err(DEV
, "Strangeness in mdev->write_ordering %d\n", mdev
->write_ordering
);
1198 atomic_set(&epoch
->epoch_size
, 0);
1199 atomic_set(&epoch
->active
, 0);
1201 spin_lock(&mdev
->epoch_lock
);
1202 if (atomic_read(&mdev
->current_epoch
->epoch_size
)) {
1203 list_add(&epoch
->list
, &mdev
->current_epoch
->list
);
1204 mdev
->current_epoch
= epoch
;
1207 /* The current_epoch got recycled while we allocated this one... */
1210 spin_unlock(&mdev
->epoch_lock
);
1215 /* used from receive_RSDataReply (recv_resync_read)
1216 * and from receive_Data */
1217 static struct drbd_epoch_entry
*
1218 read_in_block(struct drbd_conf
*mdev
, u64 id
, sector_t sector
, int data_size
) __must_hold(local
)
1220 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
1221 struct drbd_epoch_entry
*e
;
1224 void *dig_in
= mdev
->int_dig_in
;
1225 void *dig_vv
= mdev
->int_dig_vv
;
1226 unsigned long *data
;
1228 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_r_tfm
) ?
1229 crypto_hash_digestsize(mdev
->integrity_r_tfm
) : 0;
1232 rr
= drbd_recv(mdev
, dig_in
, dgs
);
1234 dev_warn(DEV
, "short read receiving data digest: read %d expected %d\n",
1242 ERR_IF(data_size
& 0x1ff) return NULL
;
1243 ERR_IF(data_size
> DRBD_MAX_BIO_SIZE
) return NULL
;
1245 /* even though we trust out peer,
1246 * we sometimes have to double check. */
1247 if (sector
+ (data_size
>>9) > capacity
) {
1248 dev_err(DEV
, "capacity: %llus < sector: %llus + size: %u\n",
1249 (unsigned long long)capacity
,
1250 (unsigned long long)sector
, data_size
);
1254 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1255 * "criss-cross" setup, that might cause write-out on some other DRBD,
1256 * which in turn might block on the other node at this very place. */
1257 e
= drbd_alloc_ee(mdev
, id
, sector
, data_size
, GFP_NOIO
);
1263 page_chain_for_each(page
) {
1264 unsigned len
= min_t(int, ds
, PAGE_SIZE
);
1266 rr
= drbd_recv(mdev
, data
, len
);
1267 if (drbd_insert_fault(mdev
, DRBD_FAULT_RECEIVE
)) {
1268 dev_err(DEV
, "Fault injection: Corrupting data on receive\n");
1269 data
[0] = data
[0] ^ (unsigned long)-1;
1273 drbd_free_ee(mdev
, e
);
1274 dev_warn(DEV
, "short read receiving data: read %d expected %d\n",
1282 drbd_csum_ee(mdev
, mdev
->integrity_r_tfm
, e
, dig_vv
);
1283 if (memcmp(dig_in
, dig_vv
, dgs
)) {
1284 dev_err(DEV
, "Digest integrity check FAILED: %llus +%u\n",
1285 (unsigned long long)sector
, data_size
);
1286 drbd_bcast_ee(mdev
, "digest failed",
1287 dgs
, dig_in
, dig_vv
, e
);
1288 drbd_free_ee(mdev
, e
);
1292 mdev
->recv_cnt
+= data_size
>>9;
1296 /* drbd_drain_block() just takes a data block
1297 * out of the socket input buffer, and discards it.
1299 static int drbd_drain_block(struct drbd_conf
*mdev
, int data_size
)
1308 page
= drbd_pp_alloc(mdev
, 1, 1);
1312 rr
= drbd_recv(mdev
, data
, min_t(int, data_size
, PAGE_SIZE
));
1313 if (rr
!= min_t(int, data_size
, PAGE_SIZE
)) {
1315 dev_warn(DEV
, "short read receiving data: read %d expected %d\n",
1316 rr
, min_t(int, data_size
, PAGE_SIZE
));
1322 drbd_pp_free(mdev
, page
, 0);
1326 static int recv_dless_read(struct drbd_conf
*mdev
, struct drbd_request
*req
,
1327 sector_t sector
, int data_size
)
1329 struct bio_vec
*bvec
;
1331 int dgs
, rr
, i
, expect
;
1332 void *dig_in
= mdev
->int_dig_in
;
1333 void *dig_vv
= mdev
->int_dig_vv
;
1335 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_r_tfm
) ?
1336 crypto_hash_digestsize(mdev
->integrity_r_tfm
) : 0;
1339 rr
= drbd_recv(mdev
, dig_in
, dgs
);
1341 dev_warn(DEV
, "short read receiving data reply digest: read %d expected %d\n",
1349 /* optimistically update recv_cnt. if receiving fails below,
1350 * we disconnect anyways, and counters will be reset. */
1351 mdev
->recv_cnt
+= data_size
>>9;
1353 bio
= req
->master_bio
;
1354 D_ASSERT(sector
== bio
->bi_sector
);
1356 bio_for_each_segment(bvec
, bio
, i
) {
1357 expect
= min_t(int, data_size
, bvec
->bv_len
);
1358 rr
= drbd_recv(mdev
,
1359 kmap(bvec
->bv_page
)+bvec
->bv_offset
,
1361 kunmap(bvec
->bv_page
);
1363 dev_warn(DEV
, "short read receiving data reply: "
1364 "read %d expected %d\n",
1372 drbd_csum_bio(mdev
, mdev
->integrity_r_tfm
, bio
, dig_vv
);
1373 if (memcmp(dig_in
, dig_vv
, dgs
)) {
1374 dev_err(DEV
, "Digest integrity check FAILED. Broken NICs?\n");
1379 D_ASSERT(data_size
== 0);
1383 /* e_end_resync_block() is called via
1384 * drbd_process_done_ee() by asender only */
1385 static int e_end_resync_block(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
1387 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1388 sector_t sector
= e
->sector
;
1391 D_ASSERT(hlist_unhashed(&e
->colision
));
1393 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
1394 drbd_set_in_sync(mdev
, sector
, e
->size
);
1395 ok
= drbd_send_ack(mdev
, P_RS_WRITE_ACK
, e
);
1397 /* Record failure to sync */
1398 drbd_rs_failed_io(mdev
, sector
, e
->size
);
1400 ok
= drbd_send_ack(mdev
, P_NEG_ACK
, e
);
1407 static int recv_resync_read(struct drbd_conf
*mdev
, sector_t sector
, int data_size
) __releases(local
)
1409 struct drbd_epoch_entry
*e
;
1411 e
= read_in_block(mdev
, ID_SYNCER
, sector
, data_size
);
1415 dec_rs_pending(mdev
);
1418 /* corresponding dec_unacked() in e_end_resync_block()
1419 * respective _drbd_clear_done_ee */
1421 e
->w
.cb
= e_end_resync_block
;
1423 spin_lock_irq(&mdev
->req_lock
);
1424 list_add(&e
->w
.list
, &mdev
->sync_ee
);
1425 spin_unlock_irq(&mdev
->req_lock
);
1427 atomic_add(data_size
>> 9, &mdev
->rs_sect_ev
);
1428 if (drbd_submit_ee(mdev
, e
, WRITE
, DRBD_FAULT_RS_WR
) == 0)
1431 /* drbd_submit_ee currently fails for one reason only:
1432 * not being able to allocate enough bios.
1433 * Is dropping the connection going to help? */
1434 spin_lock_irq(&mdev
->req_lock
);
1435 list_del(&e
->w
.list
);
1436 spin_unlock_irq(&mdev
->req_lock
);
1438 drbd_free_ee(mdev
, e
);
1444 static int receive_DataReply(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1446 struct drbd_request
*req
;
1449 struct p_data
*p
= &mdev
->data
.rbuf
.data
;
1451 sector
= be64_to_cpu(p
->sector
);
1453 spin_lock_irq(&mdev
->req_lock
);
1454 req
= _ar_id_to_req(mdev
, p
->block_id
, sector
);
1455 spin_unlock_irq(&mdev
->req_lock
);
1456 if (unlikely(!req
)) {
1457 dev_err(DEV
, "Got a corrupt block_id/sector pair(1).\n");
1461 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1462 * special casing it there for the various failure cases.
1463 * still no race with drbd_fail_pending_reads */
1464 ok
= recv_dless_read(mdev
, req
, sector
, data_size
);
1467 req_mod(req
, data_received
);
1468 /* else: nothing. handled from drbd_disconnect...
1469 * I don't think we may complete this just yet
1470 * in case we are "on-disconnect: freeze" */
1475 static int receive_RSDataReply(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1479 struct p_data
*p
= &mdev
->data
.rbuf
.data
;
1481 sector
= be64_to_cpu(p
->sector
);
1482 D_ASSERT(p
->block_id
== ID_SYNCER
);
1484 if (get_ldev(mdev
)) {
1485 /* data is submitted to disk within recv_resync_read.
1486 * corresponding put_ldev done below on error,
1487 * or in drbd_endio_write_sec. */
1488 ok
= recv_resync_read(mdev
, sector
, data_size
);
1490 if (__ratelimit(&drbd_ratelimit_state
))
1491 dev_err(DEV
, "Can not write resync data to local disk.\n");
1493 ok
= drbd_drain_block(mdev
, data_size
);
1495 drbd_send_ack_dp(mdev
, P_NEG_ACK
, p
, data_size
);
1498 atomic_add(data_size
>> 9, &mdev
->rs_sect_in
);
1503 /* e_end_block() is called via drbd_process_done_ee().
1504 * this means this function only runs in the asender thread
1506 static int e_end_block(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1508 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1509 sector_t sector
= e
->sector
;
1512 if (mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
) {
1513 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
1514 pcmd
= (mdev
->state
.conn
>= C_SYNC_SOURCE
&&
1515 mdev
->state
.conn
<= C_PAUSED_SYNC_T
&&
1516 e
->flags
& EE_MAY_SET_IN_SYNC
) ?
1517 P_RS_WRITE_ACK
: P_WRITE_ACK
;
1518 ok
&= drbd_send_ack(mdev
, pcmd
, e
);
1519 if (pcmd
== P_RS_WRITE_ACK
)
1520 drbd_set_in_sync(mdev
, sector
, e
->size
);
1522 ok
= drbd_send_ack(mdev
, P_NEG_ACK
, e
);
1523 /* we expect it to be marked out of sync anyways...
1524 * maybe assert this? */
1528 /* we delete from the conflict detection hash _after_ we sent out the
1529 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1530 if (mdev
->net_conf
->two_primaries
) {
1531 spin_lock_irq(&mdev
->req_lock
);
1532 D_ASSERT(!hlist_unhashed(&e
->colision
));
1533 hlist_del_init(&e
->colision
);
1534 spin_unlock_irq(&mdev
->req_lock
);
1536 D_ASSERT(hlist_unhashed(&e
->colision
));
1539 drbd_may_finish_epoch(mdev
, e
->epoch
, EV_PUT
+ (cancel
? EV_CLEANUP
: 0));
1544 static int e_send_discard_ack(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
1546 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1549 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
1550 ok
= drbd_send_ack(mdev
, P_DISCARD_ACK
, e
);
1552 spin_lock_irq(&mdev
->req_lock
);
1553 D_ASSERT(!hlist_unhashed(&e
->colision
));
1554 hlist_del_init(&e
->colision
);
1555 spin_unlock_irq(&mdev
->req_lock
);
1562 /* Called from receive_Data.
1563 * Synchronize packets on sock with packets on msock.
1565 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1566 * packet traveling on msock, they are still processed in the order they have
1569 * Note: we don't care for Ack packets overtaking P_DATA packets.
1571 * In case packet_seq is larger than mdev->peer_seq number, there are
1572 * outstanding packets on the msock. We wait for them to arrive.
1573 * In case we are the logically next packet, we update mdev->peer_seq
1574 * ourselves. Correctly handles 32bit wrap around.
1576 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1577 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1578 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1579 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1581 * returns 0 if we may process the packet,
1582 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1583 static int drbd_wait_peer_seq(struct drbd_conf
*mdev
, const u32 packet_seq
)
1589 spin_lock(&mdev
->peer_seq_lock
);
1591 prepare_to_wait(&mdev
->seq_wait
, &wait
, TASK_INTERRUPTIBLE
);
1592 if (seq_le(packet_seq
, mdev
->peer_seq
+1))
1594 if (signal_pending(current
)) {
1598 p_seq
= mdev
->peer_seq
;
1599 spin_unlock(&mdev
->peer_seq_lock
);
1600 timeout
= schedule_timeout(30*HZ
);
1601 spin_lock(&mdev
->peer_seq_lock
);
1602 if (timeout
== 0 && p_seq
== mdev
->peer_seq
) {
1604 dev_err(DEV
, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1608 finish_wait(&mdev
->seq_wait
, &wait
);
1609 if (mdev
->peer_seq
+1 == packet_seq
)
1611 spin_unlock(&mdev
->peer_seq_lock
);
1615 /* see also bio_flags_to_wire()
1616 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1617 * flags and back. We may replicate to other kernel versions. */
1618 static unsigned long wire_flags_to_bio(struct drbd_conf
*mdev
, u32 dpf
)
1620 return (dpf
& DP_RW_SYNC
? REQ_SYNC
: 0) |
1621 (dpf
& DP_FUA
? REQ_FUA
: 0) |
1622 (dpf
& DP_FLUSH
? REQ_FLUSH
: 0) |
1623 (dpf
& DP_DISCARD
? REQ_DISCARD
: 0);
1626 /* mirrored write */
1627 static int receive_Data(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1630 struct drbd_epoch_entry
*e
;
1631 struct p_data
*p
= &mdev
->data
.rbuf
.data
;
1635 if (!get_ldev(mdev
)) {
1636 if (__ratelimit(&drbd_ratelimit_state
))
1637 dev_err(DEV
, "Can not write mirrored data block "
1638 "to local disk.\n");
1639 spin_lock(&mdev
->peer_seq_lock
);
1640 if (mdev
->peer_seq
+1 == be32_to_cpu(p
->seq_num
))
1642 spin_unlock(&mdev
->peer_seq_lock
);
1644 drbd_send_ack_dp(mdev
, P_NEG_ACK
, p
, data_size
);
1645 atomic_inc(&mdev
->current_epoch
->epoch_size
);
1646 return drbd_drain_block(mdev
, data_size
);
1649 /* get_ldev(mdev) successful.
1650 * Corresponding put_ldev done either below (on various errors),
1651 * or in drbd_endio_write_sec, if we successfully submit the data at
1652 * the end of this function. */
1654 sector
= be64_to_cpu(p
->sector
);
1655 e
= read_in_block(mdev
, p
->block_id
, sector
, data_size
);
1661 e
->w
.cb
= e_end_block
;
1663 dp_flags
= be32_to_cpu(p
->dp_flags
);
1664 rw
|= wire_flags_to_bio(mdev
, dp_flags
);
1666 if (dp_flags
& DP_MAY_SET_IN_SYNC
)
1667 e
->flags
|= EE_MAY_SET_IN_SYNC
;
1669 spin_lock(&mdev
->epoch_lock
);
1670 e
->epoch
= mdev
->current_epoch
;
1671 atomic_inc(&e
->epoch
->epoch_size
);
1672 atomic_inc(&e
->epoch
->active
);
1673 spin_unlock(&mdev
->epoch_lock
);
1675 /* I'm the receiver, I do hold a net_cnt reference. */
1676 if (!mdev
->net_conf
->two_primaries
) {
1677 spin_lock_irq(&mdev
->req_lock
);
1679 /* don't get the req_lock yet,
1680 * we may sleep in drbd_wait_peer_seq */
1681 const int size
= e
->size
;
1682 const int discard
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
1684 struct drbd_request
*i
;
1685 struct hlist_node
*n
;
1686 struct hlist_head
*slot
;
1689 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
1690 BUG_ON(mdev
->ee_hash
== NULL
);
1691 BUG_ON(mdev
->tl_hash
== NULL
);
1693 /* conflict detection and handling:
1694 * 1. wait on the sequence number,
1695 * in case this data packet overtook ACK packets.
1696 * 2. check our hash tables for conflicting requests.
1697 * we only need to walk the tl_hash, since an ee can not
1698 * have a conflict with an other ee: on the submitting
1699 * node, the corresponding req had already been conflicting,
1700 * and a conflicting req is never sent.
1702 * Note: for two_primaries, we are protocol C,
1703 * so there cannot be any request that is DONE
1704 * but still on the transfer log.
1706 * unconditionally add to the ee_hash.
1708 * if no conflicting request is found:
1711 * if any conflicting request is found
1712 * that has not yet been acked,
1713 * AND I have the "discard concurrent writes" flag:
1714 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1716 * if any conflicting request is found:
1717 * block the receiver, waiting on misc_wait
1718 * until no more conflicting requests are there,
1719 * or we get interrupted (disconnect).
1721 * we do not just write after local io completion of those
1722 * requests, but only after req is done completely, i.e.
1723 * we wait for the P_DISCARD_ACK to arrive!
1725 * then proceed normally, i.e. submit.
1727 if (drbd_wait_peer_seq(mdev
, be32_to_cpu(p
->seq_num
)))
1728 goto out_interrupted
;
1730 spin_lock_irq(&mdev
->req_lock
);
1732 hlist_add_head(&e
->colision
, ee_hash_slot(mdev
, sector
));
1734 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1735 slot
= tl_hash_slot(mdev
, sector
);
1738 int have_unacked
= 0;
1739 int have_conflict
= 0;
1740 prepare_to_wait(&mdev
->misc_wait
, &wait
,
1741 TASK_INTERRUPTIBLE
);
1742 hlist_for_each_entry(i
, n
, slot
, colision
) {
1744 /* only ALERT on first iteration,
1745 * we may be woken up early... */
1747 dev_alert(DEV
, "%s[%u] Concurrent local write detected!"
1748 " new: %llus +%u; pending: %llus +%u\n",
1749 current
->comm
, current
->pid
,
1750 (unsigned long long)sector
, size
,
1751 (unsigned long long)i
->sector
, i
->size
);
1752 if (i
->rq_state
& RQ_NET_PENDING
)
1761 /* Discard Ack only for the _first_ iteration */
1762 if (first
&& discard
&& have_unacked
) {
1763 dev_alert(DEV
, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1764 (unsigned long long)sector
);
1766 e
->w
.cb
= e_send_discard_ack
;
1767 list_add_tail(&e
->w
.list
, &mdev
->done_ee
);
1769 spin_unlock_irq(&mdev
->req_lock
);
1771 /* we could probably send that P_DISCARD_ACK ourselves,
1772 * but I don't like the receiver using the msock */
1776 finish_wait(&mdev
->misc_wait
, &wait
);
1780 if (signal_pending(current
)) {
1781 hlist_del_init(&e
->colision
);
1783 spin_unlock_irq(&mdev
->req_lock
);
1785 finish_wait(&mdev
->misc_wait
, &wait
);
1786 goto out_interrupted
;
1789 spin_unlock_irq(&mdev
->req_lock
);
1792 dev_alert(DEV
, "Concurrent write! [W AFTERWARDS] "
1793 "sec=%llus\n", (unsigned long long)sector
);
1794 } else if (discard
) {
1795 /* we had none on the first iteration.
1796 * there must be none now. */
1797 D_ASSERT(have_unacked
== 0);
1800 spin_lock_irq(&mdev
->req_lock
);
1802 finish_wait(&mdev
->misc_wait
, &wait
);
1805 list_add(&e
->w
.list
, &mdev
->active_ee
);
1806 spin_unlock_irq(&mdev
->req_lock
);
1808 switch (mdev
->net_conf
->wire_protocol
) {
1811 /* corresponding dec_unacked() in e_end_block()
1812 * respective _drbd_clear_done_ee */
1815 /* I really don't like it that the receiver thread
1816 * sends on the msock, but anyways */
1817 drbd_send_ack(mdev
, P_RECV_ACK
, e
);
1824 if (mdev
->state
.pdsk
< D_INCONSISTENT
) {
1825 /* In case we have the only disk of the cluster, */
1826 drbd_set_out_of_sync(mdev
, e
->sector
, e
->size
);
1827 e
->flags
|= EE_CALL_AL_COMPLETE_IO
;
1828 e
->flags
&= ~EE_MAY_SET_IN_SYNC
;
1829 drbd_al_begin_io(mdev
, e
->sector
);
1832 if (drbd_submit_ee(mdev
, e
, rw
, DRBD_FAULT_DT_WR
) == 0)
1835 /* drbd_submit_ee currently fails for one reason only:
1836 * not being able to allocate enough bios.
1837 * Is dropping the connection going to help? */
1838 spin_lock_irq(&mdev
->req_lock
);
1839 list_del(&e
->w
.list
);
1840 hlist_del_init(&e
->colision
);
1841 spin_unlock_irq(&mdev
->req_lock
);
1842 if (e
->flags
& EE_CALL_AL_COMPLETE_IO
)
1843 drbd_al_complete_io(mdev
, e
->sector
);
1846 /* yes, the epoch_size now is imbalanced.
1847 * but we drop the connection anyways, so we don't have a chance to
1848 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1850 drbd_free_ee(mdev
, e
);
1854 /* We may throttle resync, if the lower device seems to be busy,
1855 * and current sync rate is above c_min_rate.
1857 * To decide whether or not the lower device is busy, we use a scheme similar
1858 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1859 * (more than 64 sectors) of activity we cannot account for with our own resync
1860 * activity, it obviously is "busy".
1862 * The current sync rate used here uses only the most recent two step marks,
1863 * to have a short time average so we can react faster.
1865 int drbd_rs_should_slow_down(struct drbd_conf
*mdev
, sector_t sector
)
1867 struct gendisk
*disk
= mdev
->ldev
->backing_bdev
->bd_contains
->bd_disk
;
1868 unsigned long db
, dt
, dbdt
;
1869 struct lc_element
*tmp
;
1873 /* feature disabled? */
1874 if (mdev
->sync_conf
.c_min_rate
== 0)
1877 spin_lock_irq(&mdev
->al_lock
);
1878 tmp
= lc_find(mdev
->resync
, BM_SECT_TO_EXT(sector
));
1880 struct bm_extent
*bm_ext
= lc_entry(tmp
, struct bm_extent
, lce
);
1881 if (test_bit(BME_PRIORITY
, &bm_ext
->flags
)) {
1882 spin_unlock_irq(&mdev
->al_lock
);
1885 /* Do not slow down if app IO is already waiting for this extent */
1887 spin_unlock_irq(&mdev
->al_lock
);
1889 curr_events
= (int)part_stat_read(&disk
->part0
, sectors
[0]) +
1890 (int)part_stat_read(&disk
->part0
, sectors
[1]) -
1891 atomic_read(&mdev
->rs_sect_ev
);
1893 if (!mdev
->rs_last_events
|| curr_events
- mdev
->rs_last_events
> 64) {
1894 unsigned long rs_left
;
1897 mdev
->rs_last_events
= curr_events
;
1899 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1901 i
= (mdev
->rs_last_mark
+ DRBD_SYNC_MARKS
-1) % DRBD_SYNC_MARKS
;
1903 if (mdev
->state
.conn
== C_VERIFY_S
|| mdev
->state
.conn
== C_VERIFY_T
)
1904 rs_left
= mdev
->ov_left
;
1906 rs_left
= drbd_bm_total_weight(mdev
) - mdev
->rs_failed
;
1908 dt
= ((long)jiffies
- (long)mdev
->rs_mark_time
[i
]) / HZ
;
1911 db
= mdev
->rs_mark_left
[i
] - rs_left
;
1912 dbdt
= Bit2KB(db
/dt
);
1914 if (dbdt
> mdev
->sync_conf
.c_min_rate
)
1921 static int receive_DataRequest(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int digest_size
)
1924 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
1925 struct drbd_epoch_entry
*e
;
1926 struct digest_info
*di
= NULL
;
1928 unsigned int fault_type
;
1929 struct p_block_req
*p
= &mdev
->data
.rbuf
.block_req
;
1931 sector
= be64_to_cpu(p
->sector
);
1932 size
= be32_to_cpu(p
->blksize
);
1934 if (size
<= 0 || (size
& 0x1ff) != 0 || size
> DRBD_MAX_BIO_SIZE
) {
1935 dev_err(DEV
, "%s:%d: sector: %llus, size: %u\n", __FILE__
, __LINE__
,
1936 (unsigned long long)sector
, size
);
1939 if (sector
+ (size
>>9) > capacity
) {
1940 dev_err(DEV
, "%s:%d: sector: %llus, size: %u\n", __FILE__
, __LINE__
,
1941 (unsigned long long)sector
, size
);
1945 if (!get_ldev_if_state(mdev
, D_UP_TO_DATE
)) {
1948 case P_DATA_REQUEST
:
1949 drbd_send_ack_rp(mdev
, P_NEG_DREPLY
, p
);
1951 case P_RS_DATA_REQUEST
:
1952 case P_CSUM_RS_REQUEST
:
1954 drbd_send_ack_rp(mdev
, P_NEG_RS_DREPLY
, p
);
1958 dec_rs_pending(mdev
);
1959 drbd_send_ack_ex(mdev
, P_OV_RESULT
, sector
, size
, ID_IN_SYNC
);
1962 dev_err(DEV
, "unexpected command (%s) in receive_DataRequest\n",
1965 if (verb
&& __ratelimit(&drbd_ratelimit_state
))
1966 dev_err(DEV
, "Can not satisfy peer's read request, "
1967 "no local data.\n");
1969 /* drain possibly payload */
1970 return drbd_drain_block(mdev
, digest_size
);
1973 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1974 * "criss-cross" setup, that might cause write-out on some other DRBD,
1975 * which in turn might block on the other node at this very place. */
1976 e
= drbd_alloc_ee(mdev
, p
->block_id
, sector
, size
, GFP_NOIO
);
1983 case P_DATA_REQUEST
:
1984 e
->w
.cb
= w_e_end_data_req
;
1985 fault_type
= DRBD_FAULT_DT_RD
;
1986 /* application IO, don't drbd_rs_begin_io */
1989 case P_RS_DATA_REQUEST
:
1990 e
->w
.cb
= w_e_end_rsdata_req
;
1991 fault_type
= DRBD_FAULT_RS_RD
;
1992 /* used in the sector offset progress display */
1993 mdev
->bm_resync_fo
= BM_SECT_TO_BIT(sector
);
1997 case P_CSUM_RS_REQUEST
:
1998 fault_type
= DRBD_FAULT_RS_RD
;
1999 di
= kmalloc(sizeof(*di
) + digest_size
, GFP_NOIO
);
2003 di
->digest_size
= digest_size
;
2004 di
->digest
= (((char *)di
)+sizeof(struct digest_info
));
2007 e
->flags
|= EE_HAS_DIGEST
;
2009 if (drbd_recv(mdev
, di
->digest
, digest_size
) != digest_size
)
2012 if (cmd
== P_CSUM_RS_REQUEST
) {
2013 D_ASSERT(mdev
->agreed_pro_version
>= 89);
2014 e
->w
.cb
= w_e_end_csum_rs_req
;
2015 /* used in the sector offset progress display */
2016 mdev
->bm_resync_fo
= BM_SECT_TO_BIT(sector
);
2017 } else if (cmd
== P_OV_REPLY
) {
2018 /* track progress, we may need to throttle */
2019 atomic_add(size
>> 9, &mdev
->rs_sect_in
);
2020 e
->w
.cb
= w_e_end_ov_reply
;
2021 dec_rs_pending(mdev
);
2022 /* drbd_rs_begin_io done when we sent this request,
2023 * but accounting still needs to be done. */
2024 goto submit_for_resync
;
2029 if (mdev
->ov_start_sector
== ~(sector_t
)0 &&
2030 mdev
->agreed_pro_version
>= 90) {
2031 unsigned long now
= jiffies
;
2033 mdev
->ov_start_sector
= sector
;
2034 mdev
->ov_position
= sector
;
2035 mdev
->ov_left
= drbd_bm_bits(mdev
) - BM_SECT_TO_BIT(sector
);
2036 mdev
->rs_total
= mdev
->ov_left
;
2037 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
2038 mdev
->rs_mark_left
[i
] = mdev
->ov_left
;
2039 mdev
->rs_mark_time
[i
] = now
;
2041 dev_info(DEV
, "Online Verify start sector: %llu\n",
2042 (unsigned long long)sector
);
2044 e
->w
.cb
= w_e_end_ov_req
;
2045 fault_type
= DRBD_FAULT_RS_RD
;
2049 dev_err(DEV
, "unexpected command (%s) in receive_DataRequest\n",
2051 fault_type
= DRBD_FAULT_MAX
;
2055 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2056 * wrt the receiver, but it is not as straightforward as it may seem.
2057 * Various places in the resync start and stop logic assume resync
2058 * requests are processed in order, requeuing this on the worker thread
2059 * introduces a bunch of new code for synchronization between threads.
2061 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2062 * "forever", throttling after drbd_rs_begin_io will lock that extent
2063 * for application writes for the same time. For now, just throttle
2064 * here, where the rest of the code expects the receiver to sleep for
2068 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2069 * this defers syncer requests for some time, before letting at least
2070 * on request through. The resync controller on the receiving side
2071 * will adapt to the incoming rate accordingly.
2073 * We cannot throttle here if remote is Primary/SyncTarget:
2074 * we would also throttle its application reads.
2075 * In that case, throttling is done on the SyncTarget only.
2077 if (mdev
->state
.peer
!= R_PRIMARY
&& drbd_rs_should_slow_down(mdev
, sector
))
2078 schedule_timeout_uninterruptible(HZ
/10);
2079 if (drbd_rs_begin_io(mdev
, sector
))
2083 atomic_add(size
>> 9, &mdev
->rs_sect_ev
);
2087 spin_lock_irq(&mdev
->req_lock
);
2088 list_add_tail(&e
->w
.list
, &mdev
->read_ee
);
2089 spin_unlock_irq(&mdev
->req_lock
);
2091 if (drbd_submit_ee(mdev
, e
, READ
, fault_type
) == 0)
2094 /* drbd_submit_ee currently fails for one reason only:
2095 * not being able to allocate enough bios.
2096 * Is dropping the connection going to help? */
2097 spin_lock_irq(&mdev
->req_lock
);
2098 list_del(&e
->w
.list
);
2099 spin_unlock_irq(&mdev
->req_lock
);
2100 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2104 drbd_free_ee(mdev
, e
);
2108 static int drbd_asb_recover_0p(struct drbd_conf
*mdev
) __must_hold(local
)
2110 int self
, peer
, rv
= -100;
2111 unsigned long ch_self
, ch_peer
;
2113 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & 1;
2114 peer
= mdev
->p_uuid
[UI_BITMAP
] & 1;
2116 ch_peer
= mdev
->p_uuid
[UI_SIZE
];
2117 ch_self
= mdev
->comm_bm_set
;
2119 switch (mdev
->net_conf
->after_sb_0p
) {
2121 case ASB_DISCARD_SECONDARY
:
2122 case ASB_CALL_HELPER
:
2123 dev_err(DEV
, "Configuration error.\n");
2125 case ASB_DISCONNECT
:
2127 case ASB_DISCARD_YOUNGER_PRI
:
2128 if (self
== 0 && peer
== 1) {
2132 if (self
== 1 && peer
== 0) {
2136 /* Else fall through to one of the other strategies... */
2137 case ASB_DISCARD_OLDER_PRI
:
2138 if (self
== 0 && peer
== 1) {
2142 if (self
== 1 && peer
== 0) {
2146 /* Else fall through to one of the other strategies... */
2147 dev_warn(DEV
, "Discard younger/older primary did not find a decision\n"
2148 "Using discard-least-changes instead\n");
2149 case ASB_DISCARD_ZERO_CHG
:
2150 if (ch_peer
== 0 && ch_self
== 0) {
2151 rv
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
)
2155 if (ch_peer
== 0) { rv
= 1; break; }
2156 if (ch_self
== 0) { rv
= -1; break; }
2158 if (mdev
->net_conf
->after_sb_0p
== ASB_DISCARD_ZERO_CHG
)
2160 case ASB_DISCARD_LEAST_CHG
:
2161 if (ch_self
< ch_peer
)
2163 else if (ch_self
> ch_peer
)
2165 else /* ( ch_self == ch_peer ) */
2166 /* Well, then use something else. */
2167 rv
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
)
2170 case ASB_DISCARD_LOCAL
:
2173 case ASB_DISCARD_REMOTE
:
2180 static int drbd_asb_recover_1p(struct drbd_conf
*mdev
) __must_hold(local
)
2184 switch (mdev
->net_conf
->after_sb_1p
) {
2185 case ASB_DISCARD_YOUNGER_PRI
:
2186 case ASB_DISCARD_OLDER_PRI
:
2187 case ASB_DISCARD_LEAST_CHG
:
2188 case ASB_DISCARD_LOCAL
:
2189 case ASB_DISCARD_REMOTE
:
2190 dev_err(DEV
, "Configuration error.\n");
2192 case ASB_DISCONNECT
:
2195 hg
= drbd_asb_recover_0p(mdev
);
2196 if (hg
== -1 && mdev
->state
.role
== R_SECONDARY
)
2198 if (hg
== 1 && mdev
->state
.role
== R_PRIMARY
)
2202 rv
= drbd_asb_recover_0p(mdev
);
2204 case ASB_DISCARD_SECONDARY
:
2205 return mdev
->state
.role
== R_PRIMARY
? 1 : -1;
2206 case ASB_CALL_HELPER
:
2207 hg
= drbd_asb_recover_0p(mdev
);
2208 if (hg
== -1 && mdev
->state
.role
== R_PRIMARY
) {
2209 enum drbd_state_rv rv2
;
2211 drbd_set_role(mdev
, R_SECONDARY
, 0);
2212 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2213 * we might be here in C_WF_REPORT_PARAMS which is transient.
2214 * we do not need to wait for the after state change work either. */
2215 rv2
= drbd_change_state(mdev
, CS_VERBOSE
, NS(role
, R_SECONDARY
));
2216 if (rv2
!= SS_SUCCESS
) {
2217 drbd_khelper(mdev
, "pri-lost-after-sb");
2219 dev_warn(DEV
, "Successfully gave up primary role.\n");
2229 static int drbd_asb_recover_2p(struct drbd_conf
*mdev
) __must_hold(local
)
2233 switch (mdev
->net_conf
->after_sb_2p
) {
2234 case ASB_DISCARD_YOUNGER_PRI
:
2235 case ASB_DISCARD_OLDER_PRI
:
2236 case ASB_DISCARD_LEAST_CHG
:
2237 case ASB_DISCARD_LOCAL
:
2238 case ASB_DISCARD_REMOTE
:
2240 case ASB_DISCARD_SECONDARY
:
2241 dev_err(DEV
, "Configuration error.\n");
2244 rv
= drbd_asb_recover_0p(mdev
);
2246 case ASB_DISCONNECT
:
2248 case ASB_CALL_HELPER
:
2249 hg
= drbd_asb_recover_0p(mdev
);
2251 enum drbd_state_rv rv2
;
2253 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2254 * we might be here in C_WF_REPORT_PARAMS which is transient.
2255 * we do not need to wait for the after state change work either. */
2256 rv2
= drbd_change_state(mdev
, CS_VERBOSE
, NS(role
, R_SECONDARY
));
2257 if (rv2
!= SS_SUCCESS
) {
2258 drbd_khelper(mdev
, "pri-lost-after-sb");
2260 dev_warn(DEV
, "Successfully gave up primary role.\n");
2270 static void drbd_uuid_dump(struct drbd_conf
*mdev
, char *text
, u64
*uuid
,
2271 u64 bits
, u64 flags
)
2274 dev_info(DEV
, "%s uuid info vanished while I was looking!\n", text
);
2277 dev_info(DEV
, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2279 (unsigned long long)uuid
[UI_CURRENT
],
2280 (unsigned long long)uuid
[UI_BITMAP
],
2281 (unsigned long long)uuid
[UI_HISTORY_START
],
2282 (unsigned long long)uuid
[UI_HISTORY_END
],
2283 (unsigned long long)bits
,
2284 (unsigned long long)flags
);
2288 100 after split brain try auto recover
2289 2 C_SYNC_SOURCE set BitMap
2290 1 C_SYNC_SOURCE use BitMap
2292 -1 C_SYNC_TARGET use BitMap
2293 -2 C_SYNC_TARGET set BitMap
2294 -100 after split brain, disconnect
2295 -1000 unrelated data
2297 static int drbd_uuid_compare(struct drbd_conf
*mdev
, int *rule_nr
) __must_hold(local
)
2302 self
= mdev
->ldev
->md
.uuid
[UI_CURRENT
] & ~((u64
)1);
2303 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2306 if (self
== UUID_JUST_CREATED
&& peer
== UUID_JUST_CREATED
)
2310 if ((self
== UUID_JUST_CREATED
|| self
== (u64
)0) &&
2311 peer
!= UUID_JUST_CREATED
)
2315 if (self
!= UUID_JUST_CREATED
&&
2316 (peer
== UUID_JUST_CREATED
|| peer
== (u64
)0))
2320 int rct
, dc
; /* roles at crash time */
2322 if (mdev
->p_uuid
[UI_BITMAP
] == (u64
)0 && mdev
->ldev
->md
.uuid
[UI_BITMAP
] != (u64
)0) {
2324 if (mdev
->agreed_pro_version
< 91)
2327 if ((mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1)) &&
2328 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1))) {
2329 dev_info(DEV
, "was SyncSource, missed the resync finished event, corrected myself:\n");
2330 drbd_uuid_set_bm(mdev
, 0UL);
2332 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
,
2333 mdev
->state
.disk
>= D_NEGOTIATING
? drbd_bm_total_weight(mdev
) : 0, 0);
2336 dev_info(DEV
, "was SyncSource (peer failed to write sync_uuid)\n");
2343 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
] == (u64
)0 && mdev
->p_uuid
[UI_BITMAP
] != (u64
)0) {
2345 if (mdev
->agreed_pro_version
< 91)
2348 if ((mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1)) &&
2349 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1))) {
2350 dev_info(DEV
, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2352 mdev
->p_uuid
[UI_HISTORY_START
+ 1] = mdev
->p_uuid
[UI_HISTORY_START
];
2353 mdev
->p_uuid
[UI_HISTORY_START
] = mdev
->p_uuid
[UI_BITMAP
];
2354 mdev
->p_uuid
[UI_BITMAP
] = 0UL;
2356 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
, mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2359 dev_info(DEV
, "was SyncTarget (failed to write sync_uuid)\n");
2366 /* Common power [off|failure] */
2367 rct
= (test_bit(CRASHED_PRIMARY
, &mdev
->flags
) ? 1 : 0) +
2368 (mdev
->p_uuid
[UI_FLAGS
] & 2);
2369 /* lowest bit is set when we were primary,
2370 * next bit (weight 2) is set when peer was primary */
2374 case 0: /* !self_pri && !peer_pri */ return 0;
2375 case 1: /* self_pri && !peer_pri */ return 1;
2376 case 2: /* !self_pri && peer_pri */ return -1;
2377 case 3: /* self_pri && peer_pri */
2378 dc
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
2384 peer
= mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1);
2389 peer
= mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1);
2391 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1);
2392 peer
= mdev
->p_uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1);
2394 /* The last P_SYNC_UUID did not get though. Undo the last start of
2395 resync as sync source modifications of the peer's UUIDs. */
2397 if (mdev
->agreed_pro_version
< 91)
2400 mdev
->p_uuid
[UI_BITMAP
] = mdev
->p_uuid
[UI_HISTORY_START
];
2401 mdev
->p_uuid
[UI_HISTORY_START
] = mdev
->p_uuid
[UI_HISTORY_START
+ 1];
2407 self
= mdev
->ldev
->md
.uuid
[UI_CURRENT
] & ~((u64
)1);
2408 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2409 peer
= mdev
->p_uuid
[i
] & ~((u64
)1);
2415 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1);
2416 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2421 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1);
2423 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1);
2424 peer
= mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1);
2426 /* The last P_SYNC_UUID did not get though. Undo the last start of
2427 resync as sync source modifications of our UUIDs. */
2429 if (mdev
->agreed_pro_version
< 91)
2432 _drbd_uuid_set(mdev
, UI_BITMAP
, mdev
->ldev
->md
.uuid
[UI_HISTORY_START
]);
2433 _drbd_uuid_set(mdev
, UI_HISTORY_START
, mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1]);
2435 dev_info(DEV
, "Undid last start of resync:\n");
2437 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
,
2438 mdev
->state
.disk
>= D_NEGOTIATING
? drbd_bm_total_weight(mdev
) : 0, 0);
2446 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2447 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2448 self
= mdev
->ldev
->md
.uuid
[i
] & ~((u64
)1);
2454 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1);
2455 peer
= mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1);
2456 if (self
== peer
&& self
!= ((u64
)0))
2460 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2461 self
= mdev
->ldev
->md
.uuid
[i
] & ~((u64
)1);
2462 for (j
= UI_HISTORY_START
; j
<= UI_HISTORY_END
; j
++) {
2463 peer
= mdev
->p_uuid
[j
] & ~((u64
)1);
2472 /* drbd_sync_handshake() returns the new conn state on success, or
2473 CONN_MASK (-1) on failure.
2475 static enum drbd_conns
drbd_sync_handshake(struct drbd_conf
*mdev
, enum drbd_role peer_role
,
2476 enum drbd_disk_state peer_disk
) __must_hold(local
)
2479 enum drbd_conns rv
= C_MASK
;
2480 enum drbd_disk_state mydisk
;
2482 mydisk
= mdev
->state
.disk
;
2483 if (mydisk
== D_NEGOTIATING
)
2484 mydisk
= mdev
->new_state_tmp
.disk
;
2486 dev_info(DEV
, "drbd_sync_handshake:\n");
2487 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
, mdev
->comm_bm_set
, 0);
2488 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
,
2489 mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2491 hg
= drbd_uuid_compare(mdev
, &rule_nr
);
2493 dev_info(DEV
, "uuid_compare()=%d by rule %d\n", hg
, rule_nr
);
2496 dev_alert(DEV
, "Unrelated data, aborting!\n");
2500 dev_alert(DEV
, "To resolve this both sides have to support at least protocol 91\n");
2504 if ((mydisk
== D_INCONSISTENT
&& peer_disk
> D_INCONSISTENT
) ||
2505 (peer_disk
== D_INCONSISTENT
&& mydisk
> D_INCONSISTENT
)) {
2506 int f
= (hg
== -100) || abs(hg
) == 2;
2507 hg
= mydisk
> D_INCONSISTENT
? 1 : -1;
2510 dev_info(DEV
, "Becoming sync %s due to disk states.\n",
2511 hg
> 0 ? "source" : "target");
2515 drbd_khelper(mdev
, "initial-split-brain");
2517 if (hg
== 100 || (hg
== -100 && mdev
->net_conf
->always_asbp
)) {
2518 int pcount
= (mdev
->state
.role
== R_PRIMARY
)
2519 + (peer_role
== R_PRIMARY
);
2520 int forced
= (hg
== -100);
2524 hg
= drbd_asb_recover_0p(mdev
);
2527 hg
= drbd_asb_recover_1p(mdev
);
2530 hg
= drbd_asb_recover_2p(mdev
);
2533 if (abs(hg
) < 100) {
2534 dev_warn(DEV
, "Split-Brain detected, %d primaries, "
2535 "automatically solved. Sync from %s node\n",
2536 pcount
, (hg
< 0) ? "peer" : "this");
2538 dev_warn(DEV
, "Doing a full sync, since"
2539 " UUIDs where ambiguous.\n");
2546 if (mdev
->net_conf
->want_lose
&& !(mdev
->p_uuid
[UI_FLAGS
]&1))
2548 if (!mdev
->net_conf
->want_lose
&& (mdev
->p_uuid
[UI_FLAGS
]&1))
2552 dev_warn(DEV
, "Split-Brain detected, manually solved. "
2553 "Sync from %s node\n",
2554 (hg
< 0) ? "peer" : "this");
2558 /* FIXME this log message is not correct if we end up here
2559 * after an attempted attach on a diskless node.
2560 * We just refuse to attach -- well, we drop the "connection"
2561 * to that disk, in a way... */
2562 dev_alert(DEV
, "Split-Brain detected but unresolved, dropping connection!\n");
2563 drbd_khelper(mdev
, "split-brain");
2567 if (hg
> 0 && mydisk
<= D_INCONSISTENT
) {
2568 dev_err(DEV
, "I shall become SyncSource, but I am inconsistent!\n");
2572 if (hg
< 0 && /* by intention we do not use mydisk here. */
2573 mdev
->state
.role
== R_PRIMARY
&& mdev
->state
.disk
>= D_CONSISTENT
) {
2574 switch (mdev
->net_conf
->rr_conflict
) {
2575 case ASB_CALL_HELPER
:
2576 drbd_khelper(mdev
, "pri-lost");
2578 case ASB_DISCONNECT
:
2579 dev_err(DEV
, "I shall become SyncTarget, but I am primary!\n");
2582 dev_warn(DEV
, "Becoming SyncTarget, violating the stable-data"
2587 if (mdev
->net_conf
->dry_run
|| test_bit(CONN_DRY_RUN
, &mdev
->flags
)) {
2589 dev_info(DEV
, "dry-run connect: No resync, would become Connected immediately.\n");
2591 dev_info(DEV
, "dry-run connect: Would become %s, doing a %s resync.",
2592 drbd_conn_str(hg
> 0 ? C_SYNC_SOURCE
: C_SYNC_TARGET
),
2593 abs(hg
) >= 2 ? "full" : "bit-map based");
2598 dev_info(DEV
, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2599 if (drbd_bitmap_io(mdev
, &drbd_bmio_set_n_write
, "set_n_write from sync_handshake"))
2603 if (hg
> 0) { /* become sync source. */
2605 } else if (hg
< 0) { /* become sync target */
2609 if (drbd_bm_total_weight(mdev
)) {
2610 dev_info(DEV
, "No resync, but %lu bits in bitmap!\n",
2611 drbd_bm_total_weight(mdev
));
2618 /* returns 1 if invalid */
2619 static int cmp_after_sb(enum drbd_after_sb_p peer
, enum drbd_after_sb_p self
)
2621 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2622 if ((peer
== ASB_DISCARD_REMOTE
&& self
== ASB_DISCARD_LOCAL
) ||
2623 (self
== ASB_DISCARD_REMOTE
&& peer
== ASB_DISCARD_LOCAL
))
2626 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2627 if (peer
== ASB_DISCARD_REMOTE
|| peer
== ASB_DISCARD_LOCAL
||
2628 self
== ASB_DISCARD_REMOTE
|| self
== ASB_DISCARD_LOCAL
)
2631 /* everything else is valid if they are equal on both sides. */
2635 /* everything es is invalid. */
2639 static int receive_protocol(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
2641 struct p_protocol
*p
= &mdev
->data
.rbuf
.protocol
;
2642 int p_proto
, p_after_sb_0p
, p_after_sb_1p
, p_after_sb_2p
;
2643 int p_want_lose
, p_two_primaries
, cf
;
2644 char p_integrity_alg
[SHARED_SECRET_MAX
] = "";
2646 p_proto
= be32_to_cpu(p
->protocol
);
2647 p_after_sb_0p
= be32_to_cpu(p
->after_sb_0p
);
2648 p_after_sb_1p
= be32_to_cpu(p
->after_sb_1p
);
2649 p_after_sb_2p
= be32_to_cpu(p
->after_sb_2p
);
2650 p_two_primaries
= be32_to_cpu(p
->two_primaries
);
2651 cf
= be32_to_cpu(p
->conn_flags
);
2652 p_want_lose
= cf
& CF_WANT_LOSE
;
2654 clear_bit(CONN_DRY_RUN
, &mdev
->flags
);
2656 if (cf
& CF_DRY_RUN
)
2657 set_bit(CONN_DRY_RUN
, &mdev
->flags
);
2659 if (p_proto
!= mdev
->net_conf
->wire_protocol
) {
2660 dev_err(DEV
, "incompatible communication protocols\n");
2664 if (cmp_after_sb(p_after_sb_0p
, mdev
->net_conf
->after_sb_0p
)) {
2665 dev_err(DEV
, "incompatible after-sb-0pri settings\n");
2669 if (cmp_after_sb(p_after_sb_1p
, mdev
->net_conf
->after_sb_1p
)) {
2670 dev_err(DEV
, "incompatible after-sb-1pri settings\n");
2674 if (cmp_after_sb(p_after_sb_2p
, mdev
->net_conf
->after_sb_2p
)) {
2675 dev_err(DEV
, "incompatible after-sb-2pri settings\n");
2679 if (p_want_lose
&& mdev
->net_conf
->want_lose
) {
2680 dev_err(DEV
, "both sides have the 'want_lose' flag set\n");
2684 if (p_two_primaries
!= mdev
->net_conf
->two_primaries
) {
2685 dev_err(DEV
, "incompatible setting of the two-primaries options\n");
2689 if (mdev
->agreed_pro_version
>= 87) {
2690 unsigned char *my_alg
= mdev
->net_conf
->integrity_alg
;
2692 if (drbd_recv(mdev
, p_integrity_alg
, data_size
) != data_size
)
2695 p_integrity_alg
[SHARED_SECRET_MAX
-1] = 0;
2696 if (strcmp(p_integrity_alg
, my_alg
)) {
2697 dev_err(DEV
, "incompatible setting of the data-integrity-alg\n");
2700 dev_info(DEV
, "data-integrity-alg: %s\n",
2701 my_alg
[0] ? my_alg
: (unsigned char *)"<not-used>");
2707 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2712 * input: alg name, feature name
2713 * return: NULL (alg name was "")
2714 * ERR_PTR(error) if something goes wrong
2715 * or the crypto hash ptr, if it worked out ok. */
2716 struct crypto_hash
*drbd_crypto_alloc_digest_safe(const struct drbd_conf
*mdev
,
2717 const char *alg
, const char *name
)
2719 struct crypto_hash
*tfm
;
2724 tfm
= crypto_alloc_hash(alg
, 0, CRYPTO_ALG_ASYNC
);
2726 dev_err(DEV
, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2727 alg
, name
, PTR_ERR(tfm
));
2730 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm
))) {
2731 crypto_free_hash(tfm
);
2732 dev_err(DEV
, "\"%s\" is not a digest (%s)\n", alg
, name
);
2733 return ERR_PTR(-EINVAL
);
2738 static int receive_SyncParam(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int packet_size
)
2741 struct p_rs_param_95
*p
= &mdev
->data
.rbuf
.rs_param_95
;
2742 unsigned int header_size
, data_size
, exp_max_sz
;
2743 struct crypto_hash
*verify_tfm
= NULL
;
2744 struct crypto_hash
*csums_tfm
= NULL
;
2745 const int apv
= mdev
->agreed_pro_version
;
2746 int *rs_plan_s
= NULL
;
2749 exp_max_sz
= apv
<= 87 ? sizeof(struct p_rs_param
)
2750 : apv
== 88 ? sizeof(struct p_rs_param
)
2752 : apv
<= 94 ? sizeof(struct p_rs_param_89
)
2753 : /* apv >= 95 */ sizeof(struct p_rs_param_95
);
2755 if (packet_size
> exp_max_sz
) {
2756 dev_err(DEV
, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2757 packet_size
, exp_max_sz
);
2762 header_size
= sizeof(struct p_rs_param
) - sizeof(struct p_header80
);
2763 data_size
= packet_size
- header_size
;
2764 } else if (apv
<= 94) {
2765 header_size
= sizeof(struct p_rs_param_89
) - sizeof(struct p_header80
);
2766 data_size
= packet_size
- header_size
;
2767 D_ASSERT(data_size
== 0);
2769 header_size
= sizeof(struct p_rs_param_95
) - sizeof(struct p_header80
);
2770 data_size
= packet_size
- header_size
;
2771 D_ASSERT(data_size
== 0);
2774 /* initialize verify_alg and csums_alg */
2775 memset(p
->verify_alg
, 0, 2 * SHARED_SECRET_MAX
);
2777 if (drbd_recv(mdev
, &p
->head
.payload
, header_size
) != header_size
)
2780 mdev
->sync_conf
.rate
= be32_to_cpu(p
->rate
);
2784 if (data_size
> SHARED_SECRET_MAX
) {
2785 dev_err(DEV
, "verify-alg too long, "
2786 "peer wants %u, accepting only %u byte\n",
2787 data_size
, SHARED_SECRET_MAX
);
2791 if (drbd_recv(mdev
, p
->verify_alg
, data_size
) != data_size
)
2794 /* we expect NUL terminated string */
2795 /* but just in case someone tries to be evil */
2796 D_ASSERT(p
->verify_alg
[data_size
-1] == 0);
2797 p
->verify_alg
[data_size
-1] = 0;
2799 } else /* apv >= 89 */ {
2800 /* we still expect NUL terminated strings */
2801 /* but just in case someone tries to be evil */
2802 D_ASSERT(p
->verify_alg
[SHARED_SECRET_MAX
-1] == 0);
2803 D_ASSERT(p
->csums_alg
[SHARED_SECRET_MAX
-1] == 0);
2804 p
->verify_alg
[SHARED_SECRET_MAX
-1] = 0;
2805 p
->csums_alg
[SHARED_SECRET_MAX
-1] = 0;
2808 if (strcmp(mdev
->sync_conf
.verify_alg
, p
->verify_alg
)) {
2809 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
2810 dev_err(DEV
, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2811 mdev
->sync_conf
.verify_alg
, p
->verify_alg
);
2814 verify_tfm
= drbd_crypto_alloc_digest_safe(mdev
,
2815 p
->verify_alg
, "verify-alg");
2816 if (IS_ERR(verify_tfm
)) {
2822 if (apv
>= 89 && strcmp(mdev
->sync_conf
.csums_alg
, p
->csums_alg
)) {
2823 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
2824 dev_err(DEV
, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2825 mdev
->sync_conf
.csums_alg
, p
->csums_alg
);
2828 csums_tfm
= drbd_crypto_alloc_digest_safe(mdev
,
2829 p
->csums_alg
, "csums-alg");
2830 if (IS_ERR(csums_tfm
)) {
2837 mdev
->sync_conf
.rate
= be32_to_cpu(p
->rate
);
2838 mdev
->sync_conf
.c_plan_ahead
= be32_to_cpu(p
->c_plan_ahead
);
2839 mdev
->sync_conf
.c_delay_target
= be32_to_cpu(p
->c_delay_target
);
2840 mdev
->sync_conf
.c_fill_target
= be32_to_cpu(p
->c_fill_target
);
2841 mdev
->sync_conf
.c_max_rate
= be32_to_cpu(p
->c_max_rate
);
2843 fifo_size
= (mdev
->sync_conf
.c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
;
2844 if (fifo_size
!= mdev
->rs_plan_s
.size
&& fifo_size
> 0) {
2845 rs_plan_s
= kzalloc(sizeof(int) * fifo_size
, GFP_KERNEL
);
2847 dev_err(DEV
, "kmalloc of fifo_buffer failed");
2853 spin_lock(&mdev
->peer_seq_lock
);
2854 /* lock against drbd_nl_syncer_conf() */
2856 strcpy(mdev
->sync_conf
.verify_alg
, p
->verify_alg
);
2857 mdev
->sync_conf
.verify_alg_len
= strlen(p
->verify_alg
) + 1;
2858 crypto_free_hash(mdev
->verify_tfm
);
2859 mdev
->verify_tfm
= verify_tfm
;
2860 dev_info(DEV
, "using verify-alg: \"%s\"\n", p
->verify_alg
);
2863 strcpy(mdev
->sync_conf
.csums_alg
, p
->csums_alg
);
2864 mdev
->sync_conf
.csums_alg_len
= strlen(p
->csums_alg
) + 1;
2865 crypto_free_hash(mdev
->csums_tfm
);
2866 mdev
->csums_tfm
= csums_tfm
;
2867 dev_info(DEV
, "using csums-alg: \"%s\"\n", p
->csums_alg
);
2869 if (fifo_size
!= mdev
->rs_plan_s
.size
) {
2870 kfree(mdev
->rs_plan_s
.values
);
2871 mdev
->rs_plan_s
.values
= rs_plan_s
;
2872 mdev
->rs_plan_s
.size
= fifo_size
;
2873 mdev
->rs_planed
= 0;
2875 spin_unlock(&mdev
->peer_seq_lock
);
2880 /* just for completeness: actually not needed,
2881 * as this is not reached if csums_tfm was ok. */
2882 crypto_free_hash(csums_tfm
);
2883 /* but free the verify_tfm again, if csums_tfm did not work out */
2884 crypto_free_hash(verify_tfm
);
2885 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2889 static void drbd_setup_order_type(struct drbd_conf
*mdev
, int peer
)
2891 /* sorry, we currently have no working implementation
2892 * of distributed TCQ */
2895 /* warn if the arguments differ by more than 12.5% */
2896 static void warn_if_differ_considerably(struct drbd_conf
*mdev
,
2897 const char *s
, sector_t a
, sector_t b
)
2900 if (a
== 0 || b
== 0)
2902 d
= (a
> b
) ? (a
- b
) : (b
- a
);
2903 if (d
> (a
>>3) || d
> (b
>>3))
2904 dev_warn(DEV
, "Considerable difference in %s: %llus vs. %llus\n", s
,
2905 (unsigned long long)a
, (unsigned long long)b
);
2908 static int receive_sizes(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
2910 struct p_sizes
*p
= &mdev
->data
.rbuf
.sizes
;
2911 enum determine_dev_size dd
= unchanged
;
2912 unsigned int max_bio_size
;
2913 sector_t p_size
, p_usize
, my_usize
;
2914 int ldsc
= 0; /* local disk size changed */
2915 enum dds_flags ddsf
;
2917 p_size
= be64_to_cpu(p
->d_size
);
2918 p_usize
= be64_to_cpu(p
->u_size
);
2920 if (p_size
== 0 && mdev
->state
.disk
== D_DISKLESS
) {
2921 dev_err(DEV
, "some backing storage is needed\n");
2922 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2926 /* just store the peer's disk size for now.
2927 * we still need to figure out whether we accept that. */
2928 mdev
->p_size
= p_size
;
2930 if (get_ldev(mdev
)) {
2931 warn_if_differ_considerably(mdev
, "lower level device sizes",
2932 p_size
, drbd_get_max_capacity(mdev
->ldev
));
2933 warn_if_differ_considerably(mdev
, "user requested size",
2934 p_usize
, mdev
->ldev
->dc
.disk_size
);
2936 /* if this is the first connect, or an otherwise expected
2937 * param exchange, choose the minimum */
2938 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
)
2939 p_usize
= min_not_zero((sector_t
)mdev
->ldev
->dc
.disk_size
,
2942 my_usize
= mdev
->ldev
->dc
.disk_size
;
2944 if (mdev
->ldev
->dc
.disk_size
!= p_usize
) {
2945 mdev
->ldev
->dc
.disk_size
= p_usize
;
2946 dev_info(DEV
, "Peer sets u_size to %lu sectors\n",
2947 (unsigned long)mdev
->ldev
->dc
.disk_size
);
2950 /* Never shrink a device with usable data during connect.
2951 But allow online shrinking if we are connected. */
2952 if (drbd_new_dev_size(mdev
, mdev
->ldev
, 0) <
2953 drbd_get_capacity(mdev
->this_bdev
) &&
2954 mdev
->state
.disk
>= D_OUTDATED
&&
2955 mdev
->state
.conn
< C_CONNECTED
) {
2956 dev_err(DEV
, "The peer's disk size is too small!\n");
2957 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2958 mdev
->ldev
->dc
.disk_size
= my_usize
;
2965 ddsf
= be16_to_cpu(p
->dds_flags
);
2966 if (get_ldev(mdev
)) {
2967 dd
= drbd_determin_dev_size(mdev
, ddsf
);
2969 if (dd
== dev_size_error
)
2973 /* I am diskless, need to accept the peer's size. */
2974 drbd_set_my_capacity(mdev
, p_size
);
2977 if (get_ldev(mdev
)) {
2978 if (mdev
->ldev
->known_size
!= drbd_get_capacity(mdev
->ldev
->backing_bdev
)) {
2979 mdev
->ldev
->known_size
= drbd_get_capacity(mdev
->ldev
->backing_bdev
);
2983 if (mdev
->agreed_pro_version
< 94)
2984 max_bio_size
= be32_to_cpu(p
->max_bio_size
);
2985 else if (mdev
->agreed_pro_version
== 94)
2986 max_bio_size
= DRBD_MAX_SIZE_H80_PACKET
;
2987 else /* drbd 8.3.8 onwards */
2988 max_bio_size
= DRBD_MAX_BIO_SIZE
;
2990 if (max_bio_size
!= queue_max_hw_sectors(mdev
->rq_queue
) << 9)
2991 drbd_setup_queue_param(mdev
, max_bio_size
);
2993 drbd_setup_order_type(mdev
, be16_to_cpu(p
->queue_order_type
));
2997 if (mdev
->state
.conn
> C_WF_REPORT_PARAMS
) {
2998 if (be64_to_cpu(p
->c_size
) !=
2999 drbd_get_capacity(mdev
->this_bdev
) || ldsc
) {
3000 /* we have different sizes, probably peer
3001 * needs to know my new size... */
3002 drbd_send_sizes(mdev
, 0, ddsf
);
3004 if (test_and_clear_bit(RESIZE_PENDING
, &mdev
->flags
) ||
3005 (dd
== grew
&& mdev
->state
.conn
== C_CONNECTED
)) {
3006 if (mdev
->state
.pdsk
>= D_INCONSISTENT
&&
3007 mdev
->state
.disk
>= D_INCONSISTENT
) {
3008 if (ddsf
& DDSF_NO_RESYNC
)
3009 dev_info(DEV
, "Resync of new storage suppressed with --assume-clean\n");
3011 resync_after_online_grow(mdev
);
3013 set_bit(RESYNC_AFTER_NEG
, &mdev
->flags
);
3020 static int receive_uuids(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3022 struct p_uuids
*p
= &mdev
->data
.rbuf
.uuids
;
3026 p_uuid
= kmalloc(sizeof(u64
)*UI_EXTENDED_SIZE
, GFP_NOIO
);
3028 for (i
= UI_CURRENT
; i
< UI_EXTENDED_SIZE
; i
++)
3029 p_uuid
[i
] = be64_to_cpu(p
->uuid
[i
]);
3031 kfree(mdev
->p_uuid
);
3032 mdev
->p_uuid
= p_uuid
;
3034 if (mdev
->state
.conn
< C_CONNECTED
&&
3035 mdev
->state
.disk
< D_INCONSISTENT
&&
3036 mdev
->state
.role
== R_PRIMARY
&&
3037 (mdev
->ed_uuid
& ~((u64
)1)) != (p_uuid
[UI_CURRENT
] & ~((u64
)1))) {
3038 dev_err(DEV
, "Can only connect to data with current UUID=%016llX\n",
3039 (unsigned long long)mdev
->ed_uuid
);
3040 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3044 if (get_ldev(mdev
)) {
3045 int skip_initial_sync
=
3046 mdev
->state
.conn
== C_CONNECTED
&&
3047 mdev
->agreed_pro_version
>= 90 &&
3048 mdev
->ldev
->md
.uuid
[UI_CURRENT
] == UUID_JUST_CREATED
&&
3049 (p_uuid
[UI_FLAGS
] & 8);
3050 if (skip_initial_sync
) {
3051 dev_info(DEV
, "Accepted new current UUID, preparing to skip initial sync\n");
3052 drbd_bitmap_io(mdev
, &drbd_bmio_clear_n_write
,
3053 "clear_n_write from receive_uuids");
3054 _drbd_uuid_set(mdev
, UI_CURRENT
, p_uuid
[UI_CURRENT
]);
3055 _drbd_uuid_set(mdev
, UI_BITMAP
, 0);
3056 _drbd_set_state(_NS2(mdev
, disk
, D_UP_TO_DATE
, pdsk
, D_UP_TO_DATE
),
3061 } else if (mdev
->state
.disk
< D_INCONSISTENT
&&
3062 mdev
->state
.role
== R_PRIMARY
) {
3063 /* I am a diskless primary, the peer just created a new current UUID
3065 drbd_set_ed_uuid(mdev
, p_uuid
[UI_CURRENT
]);
3068 /* Before we test for the disk state, we should wait until an eventually
3069 ongoing cluster wide state change is finished. That is important if
3070 we are primary and are detaching from our disk. We need to see the
3071 new disk state... */
3072 wait_event(mdev
->misc_wait
, !test_bit(CLUSTER_ST_CHANGE
, &mdev
->flags
));
3073 if (mdev
->state
.conn
>= C_CONNECTED
&& mdev
->state
.disk
< D_INCONSISTENT
)
3074 drbd_set_ed_uuid(mdev
, p_uuid
[UI_CURRENT
]);
3080 * convert_state() - Converts the peer's view of the cluster state to our point of view
3081 * @ps: The state as seen by the peer.
3083 static union drbd_state
convert_state(union drbd_state ps
)
3085 union drbd_state ms
;
3087 static enum drbd_conns c_tab
[] = {
3088 [C_CONNECTED
] = C_CONNECTED
,
3090 [C_STARTING_SYNC_S
] = C_STARTING_SYNC_T
,
3091 [C_STARTING_SYNC_T
] = C_STARTING_SYNC_S
,
3092 [C_DISCONNECTING
] = C_TEAR_DOWN
, /* C_NETWORK_FAILURE, */
3093 [C_VERIFY_S
] = C_VERIFY_T
,
3099 ms
.conn
= c_tab
[ps
.conn
];
3104 ms
.peer_isp
= (ps
.aftr_isp
| ps
.user_isp
);
3109 static int receive_req_state(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3111 struct p_req_state
*p
= &mdev
->data
.rbuf
.req_state
;
3112 union drbd_state mask
, val
;
3113 enum drbd_state_rv rv
;
3115 mask
.i
= be32_to_cpu(p
->mask
);
3116 val
.i
= be32_to_cpu(p
->val
);
3118 if (test_bit(DISCARD_CONCURRENT
, &mdev
->flags
) &&
3119 test_bit(CLUSTER_ST_CHANGE
, &mdev
->flags
)) {
3120 drbd_send_sr_reply(mdev
, SS_CONCURRENT_ST_CHG
);
3124 mask
= convert_state(mask
);
3125 val
= convert_state(val
);
3127 rv
= drbd_change_state(mdev
, CS_VERBOSE
, mask
, val
);
3129 drbd_send_sr_reply(mdev
, rv
);
3135 static int receive_state(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3137 struct p_state
*p
= &mdev
->data
.rbuf
.state
;
3138 union drbd_state os
, ns
, peer_state
;
3139 enum drbd_disk_state real_peer_disk
;
3140 enum chg_state_flags cs_flags
;
3143 peer_state
.i
= be32_to_cpu(p
->state
);
3145 real_peer_disk
= peer_state
.disk
;
3146 if (peer_state
.disk
== D_NEGOTIATING
) {
3147 real_peer_disk
= mdev
->p_uuid
[UI_FLAGS
] & 4 ? D_INCONSISTENT
: D_CONSISTENT
;
3148 dev_info(DEV
, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk
));
3151 spin_lock_irq(&mdev
->req_lock
);
3153 os
= ns
= mdev
->state
;
3154 spin_unlock_irq(&mdev
->req_lock
);
3156 /* peer says his disk is uptodate, while we think it is inconsistent,
3157 * and this happens while we think we have a sync going on. */
3158 if (os
.pdsk
== D_INCONSISTENT
&& real_peer_disk
== D_UP_TO_DATE
&&
3159 os
.conn
> C_CONNECTED
&& os
.disk
== D_UP_TO_DATE
) {
3160 /* If we are (becoming) SyncSource, but peer is still in sync
3161 * preparation, ignore its uptodate-ness to avoid flapping, it
3162 * will change to inconsistent once the peer reaches active
3164 * It may have changed syncer-paused flags, however, so we
3165 * cannot ignore this completely. */
3166 if (peer_state
.conn
> C_CONNECTED
&&
3167 peer_state
.conn
< C_SYNC_SOURCE
)
3168 real_peer_disk
= D_INCONSISTENT
;
3170 /* if peer_state changes to connected at the same time,
3171 * it explicitly notifies us that it finished resync.
3172 * Maybe we should finish it up, too? */
3173 else if (os
.conn
>= C_SYNC_SOURCE
&&
3174 peer_state
.conn
== C_CONNECTED
) {
3175 if (drbd_bm_total_weight(mdev
) <= mdev
->rs_failed
)
3176 drbd_resync_finished(mdev
);
3181 /* peer says his disk is inconsistent, while we think it is uptodate,
3182 * and this happens while the peer still thinks we have a sync going on,
3183 * but we think we are already done with the sync.
3184 * We ignore this to avoid flapping pdsk.
3185 * This should not happen, if the peer is a recent version of drbd. */
3186 if (os
.pdsk
== D_UP_TO_DATE
&& real_peer_disk
== D_INCONSISTENT
&&
3187 os
.conn
== C_CONNECTED
&& peer_state
.conn
> C_SYNC_SOURCE
)
3188 real_peer_disk
= D_UP_TO_DATE
;
3190 if (ns
.conn
== C_WF_REPORT_PARAMS
)
3191 ns
.conn
= C_CONNECTED
;
3193 if (peer_state
.conn
== C_AHEAD
)
3196 if (mdev
->p_uuid
&& peer_state
.disk
>= D_NEGOTIATING
&&
3197 get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
3198 int cr
; /* consider resync */
3200 /* if we established a new connection */
3201 cr
= (os
.conn
< C_CONNECTED
);
3202 /* if we had an established connection
3203 * and one of the nodes newly attaches a disk */
3204 cr
|= (os
.conn
== C_CONNECTED
&&
3205 (peer_state
.disk
== D_NEGOTIATING
||
3206 os
.disk
== D_NEGOTIATING
));
3207 /* if we have both been inconsistent, and the peer has been
3208 * forced to be UpToDate with --overwrite-data */
3209 cr
|= test_bit(CONSIDER_RESYNC
, &mdev
->flags
);
3210 /* if we had been plain connected, and the admin requested to
3211 * start a sync by "invalidate" or "invalidate-remote" */
3212 cr
|= (os
.conn
== C_CONNECTED
&&
3213 (peer_state
.conn
>= C_STARTING_SYNC_S
&&
3214 peer_state
.conn
<= C_WF_BITMAP_T
));
3217 ns
.conn
= drbd_sync_handshake(mdev
, peer_state
.role
, real_peer_disk
);
3220 if (ns
.conn
== C_MASK
) {
3221 ns
.conn
= C_CONNECTED
;
3222 if (mdev
->state
.disk
== D_NEGOTIATING
) {
3223 drbd_force_state(mdev
, NS(disk
, D_FAILED
));
3224 } else if (peer_state
.disk
== D_NEGOTIATING
) {
3225 dev_err(DEV
, "Disk attach process on the peer node was aborted.\n");
3226 peer_state
.disk
= D_DISKLESS
;
3227 real_peer_disk
= D_DISKLESS
;
3229 if (test_and_clear_bit(CONN_DRY_RUN
, &mdev
->flags
))
3231 D_ASSERT(os
.conn
== C_WF_REPORT_PARAMS
);
3232 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3238 spin_lock_irq(&mdev
->req_lock
);
3239 if (mdev
->state
.i
!= os
.i
)
3241 clear_bit(CONSIDER_RESYNC
, &mdev
->flags
);
3242 ns
.peer
= peer_state
.role
;
3243 ns
.pdsk
= real_peer_disk
;
3244 ns
.peer_isp
= (peer_state
.aftr_isp
| peer_state
.user_isp
);
3245 if ((ns
.conn
== C_CONNECTED
|| ns
.conn
== C_WF_BITMAP_S
) && ns
.disk
== D_NEGOTIATING
)
3246 ns
.disk
= mdev
->new_state_tmp
.disk
;
3247 cs_flags
= CS_VERBOSE
+ (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
? 0 : CS_HARD
);
3248 if (ns
.pdsk
== D_CONSISTENT
&& is_susp(ns
) && ns
.conn
== C_CONNECTED
&& os
.conn
< C_CONNECTED
&&
3249 test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
3250 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3251 for temporal network outages! */
3252 spin_unlock_irq(&mdev
->req_lock
);
3253 dev_err(DEV
, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3255 drbd_uuid_new_current(mdev
);
3256 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
3257 drbd_force_state(mdev
, NS2(conn
, C_PROTOCOL_ERROR
, susp
, 0));
3260 rv
= _drbd_set_state(mdev
, ns
, cs_flags
, NULL
);
3262 spin_unlock_irq(&mdev
->req_lock
);
3264 if (rv
< SS_SUCCESS
) {
3265 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3269 if (os
.conn
> C_WF_REPORT_PARAMS
) {
3270 if (ns
.conn
> C_CONNECTED
&& peer_state
.conn
<= C_CONNECTED
&&
3271 peer_state
.disk
!= D_NEGOTIATING
) {
3272 /* we want resync, peer has not yet decided to sync... */
3273 /* Nowadays only used when forcing a node into primary role and
3274 setting its disk to UpToDate with that */
3275 drbd_send_uuids(mdev
);
3276 drbd_send_state(mdev
);
3280 mdev
->net_conf
->want_lose
= 0;
3282 drbd_md_sync(mdev
); /* update connected indicator, la_size, ... */
3287 static int receive_sync_uuid(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3289 struct p_rs_uuid
*p
= &mdev
->data
.rbuf
.rs_uuid
;
3291 wait_event(mdev
->misc_wait
,
3292 mdev
->state
.conn
== C_WF_SYNC_UUID
||
3293 mdev
->state
.conn
== C_BEHIND
||
3294 mdev
->state
.conn
< C_CONNECTED
||
3295 mdev
->state
.disk
< D_NEGOTIATING
);
3297 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3299 /* Here the _drbd_uuid_ functions are right, current should
3300 _not_ be rotated into the history */
3301 if (get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
3302 _drbd_uuid_set(mdev
, UI_CURRENT
, be64_to_cpu(p
->uuid
));
3303 _drbd_uuid_set(mdev
, UI_BITMAP
, 0UL);
3305 drbd_start_resync(mdev
, C_SYNC_TARGET
);
3309 dev_err(DEV
, "Ignoring SyncUUID packet!\n");
3314 enum receive_bitmap_ret
{ OK
, DONE
, FAILED
};
3316 static enum receive_bitmap_ret
3317 receive_bitmap_plain(struct drbd_conf
*mdev
, unsigned int data_size
,
3318 unsigned long *buffer
, struct bm_xfer_ctx
*c
)
3320 unsigned num_words
= min_t(size_t, BM_PACKET_WORDS
, c
->bm_words
- c
->word_offset
);
3321 unsigned want
= num_words
* sizeof(long);
3323 if (want
!= data_size
) {
3324 dev_err(DEV
, "%s:want (%u) != data_size (%u)\n", __func__
, want
, data_size
);
3329 if (drbd_recv(mdev
, buffer
, want
) != want
)
3332 drbd_bm_merge_lel(mdev
, c
->word_offset
, num_words
, buffer
);
3334 c
->word_offset
+= num_words
;
3335 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
3336 if (c
->bit_offset
> c
->bm_bits
)
3337 c
->bit_offset
= c
->bm_bits
;
3342 static enum receive_bitmap_ret
3343 recv_bm_rle_bits(struct drbd_conf
*mdev
,
3344 struct p_compressed_bm
*p
,
3345 struct bm_xfer_ctx
*c
)
3347 struct bitstream bs
;
3351 unsigned long s
= c
->bit_offset
;
3353 int len
= be16_to_cpu(p
->head
.length
) - (sizeof(*p
) - sizeof(p
->head
));
3354 int toggle
= DCBP_get_start(p
);
3358 bitstream_init(&bs
, p
->code
, len
, DCBP_get_pad_bits(p
));
3360 bits
= bitstream_get_bits(&bs
, &look_ahead
, 64);
3364 for (have
= bits
; have
> 0; s
+= rl
, toggle
= !toggle
) {
3365 bits
= vli_decode_bits(&rl
, look_ahead
);
3371 if (e
>= c
->bm_bits
) {
3372 dev_err(DEV
, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e
);
3375 _drbd_bm_set_bits(mdev
, s
, e
);
3379 dev_err(DEV
, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3380 have
, bits
, look_ahead
,
3381 (unsigned int)(bs
.cur
.b
- p
->code
),
3382 (unsigned int)bs
.buf_len
);
3385 look_ahead
>>= bits
;
3388 bits
= bitstream_get_bits(&bs
, &tmp
, 64 - have
);
3391 look_ahead
|= tmp
<< have
;
3396 bm_xfer_ctx_bit_to_word_offset(c
);
3398 return (s
== c
->bm_bits
) ? DONE
: OK
;
3401 static enum receive_bitmap_ret
3402 decode_bitmap_c(struct drbd_conf
*mdev
,
3403 struct p_compressed_bm
*p
,
3404 struct bm_xfer_ctx
*c
)
3406 if (DCBP_get_code(p
) == RLE_VLI_Bits
)
3407 return recv_bm_rle_bits(mdev
, p
, c
);
3409 /* other variants had been implemented for evaluation,
3410 * but have been dropped as this one turned out to be "best"
3411 * during all our tests. */
3413 dev_err(DEV
, "receive_bitmap_c: unknown encoding %u\n", p
->encoding
);
3414 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
3418 void INFO_bm_xfer_stats(struct drbd_conf
*mdev
,
3419 const char *direction
, struct bm_xfer_ctx
*c
)
3421 /* what would it take to transfer it "plaintext" */
3422 unsigned plain
= sizeof(struct p_header80
) *
3423 ((c
->bm_words
+BM_PACKET_WORDS
-1)/BM_PACKET_WORDS
+1)
3424 + c
->bm_words
* sizeof(long);
3425 unsigned total
= c
->bytes
[0] + c
->bytes
[1];
3428 /* total can not be zero. but just in case: */
3432 /* don't report if not compressed */
3436 /* total < plain. check for overflow, still */
3437 r
= (total
> UINT_MAX
/1000) ? (total
/ (plain
/1000))
3438 : (1000 * total
/ plain
);
3444 dev_info(DEV
, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3445 "total %u; compression: %u.%u%%\n",
3447 c
->bytes
[1], c
->packets
[1],
3448 c
->bytes
[0], c
->packets
[0],
3449 total
, r
/10, r
% 10);
3452 /* Since we are processing the bitfield from lower addresses to higher,
3453 it does not matter if the process it in 32 bit chunks or 64 bit
3454 chunks as long as it is little endian. (Understand it as byte stream,
3455 beginning with the lowest byte...) If we would use big endian
3456 we would need to process it from the highest address to the lowest,
3457 in order to be agnostic to the 32 vs 64 bits issue.
3459 returns 0 on failure, 1 if we successfully received it. */
3460 static int receive_bitmap(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3462 struct bm_xfer_ctx c
;
3464 enum receive_bitmap_ret ret
;
3466 struct p_header80
*h
= &mdev
->data
.rbuf
.header
.h80
;
3468 /* drbd_bm_lock(mdev, "receive bitmap"); By intention no bm_lock */
3470 /* maybe we should use some per thread scratch page,
3471 * and allocate that during initial device creation? */
3472 buffer
= (unsigned long *) __get_free_page(GFP_NOIO
);
3474 dev_err(DEV
, "failed to allocate one page buffer in %s\n", __func__
);
3478 c
= (struct bm_xfer_ctx
) {
3479 .bm_bits
= drbd_bm_bits(mdev
),
3480 .bm_words
= drbd_bm_words(mdev
),
3484 if (cmd
== P_BITMAP
) {
3485 ret
= receive_bitmap_plain(mdev
, data_size
, buffer
, &c
);
3486 } else if (cmd
== P_COMPRESSED_BITMAP
) {
3487 /* MAYBE: sanity check that we speak proto >= 90,
3488 * and the feature is enabled! */
3489 struct p_compressed_bm
*p
;
3491 if (data_size
> BM_PACKET_PAYLOAD_BYTES
) {
3492 dev_err(DEV
, "ReportCBitmap packet too large\n");
3495 /* use the page buff */
3497 memcpy(p
, h
, sizeof(*h
));
3498 if (drbd_recv(mdev
, p
->head
.payload
, data_size
) != data_size
)
3500 if (data_size
<= (sizeof(*p
) - sizeof(p
->head
))) {
3501 dev_err(DEV
, "ReportCBitmap packet too small (l:%u)\n", data_size
);
3504 ret
= decode_bitmap_c(mdev
, p
, &c
);
3506 dev_warn(DEV
, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd
);
3510 c
.packets
[cmd
== P_BITMAP
]++;
3511 c
.bytes
[cmd
== P_BITMAP
] += sizeof(struct p_header80
) + data_size
;
3516 if (!drbd_recv_header(mdev
, &cmd
, &data_size
))
3518 } while (ret
== OK
);
3522 INFO_bm_xfer_stats(mdev
, "receive", &c
);
3524 if (mdev
->state
.conn
== C_WF_BITMAP_T
) {
3525 enum drbd_state_rv rv
;
3527 ok
= !drbd_send_bitmap(mdev
);
3530 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3531 rv
= _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
3532 D_ASSERT(rv
== SS_SUCCESS
);
3533 } else if (mdev
->state
.conn
!= C_WF_BITMAP_S
) {
3534 /* admin may have requested C_DISCONNECTING,
3535 * other threads may have noticed network errors */
3536 dev_info(DEV
, "unexpected cstate (%s) in receive_bitmap\n",
3537 drbd_conn_str(mdev
->state
.conn
));
3542 /* drbd_bm_unlock(mdev); by intention no lock */
3543 if (ok
&& mdev
->state
.conn
== C_WF_BITMAP_S
)
3544 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
3545 free_page((unsigned long) buffer
);
3549 static int receive_skip(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3551 /* TODO zero copy sink :) */
3552 static char sink
[128];
3555 dev_warn(DEV
, "skipping unknown optional packet type %d, l: %d!\n",
3560 want
= min_t(int, size
, sizeof(sink
));
3561 r
= drbd_recv(mdev
, sink
, want
);
3562 ERR_IF(r
<= 0) break;
3568 static int receive_UnplugRemote(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3570 /* Make sure we've acked all the TCP data associated
3571 * with the data requests being unplugged */
3572 drbd_tcp_quickack(mdev
->data
.socket
);
3577 static int receive_out_of_sync(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3579 struct p_block_desc
*p
= &mdev
->data
.rbuf
.block_desc
;
3581 drbd_set_out_of_sync(mdev
, be64_to_cpu(p
->sector
), be32_to_cpu(p
->blksize
));
3586 typedef int (*drbd_cmd_handler_f
)(struct drbd_conf
*, enum drbd_packets cmd
, unsigned int to_receive
);
3591 drbd_cmd_handler_f function
;
3594 static struct data_cmd drbd_cmd_handler
[] = {
3595 [P_DATA
] = { 1, sizeof(struct p_data
), receive_Data
},
3596 [P_DATA_REPLY
] = { 1, sizeof(struct p_data
), receive_DataReply
},
3597 [P_RS_DATA_REPLY
] = { 1, sizeof(struct p_data
), receive_RSDataReply
} ,
3598 [P_BARRIER
] = { 0, sizeof(struct p_barrier
), receive_Barrier
} ,
3599 [P_BITMAP
] = { 1, sizeof(struct p_header80
), receive_bitmap
} ,
3600 [P_COMPRESSED_BITMAP
] = { 1, sizeof(struct p_header80
), receive_bitmap
} ,
3601 [P_UNPLUG_REMOTE
] = { 0, sizeof(struct p_header80
), receive_UnplugRemote
},
3602 [P_DATA_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
3603 [P_RS_DATA_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
3604 [P_SYNC_PARAM
] = { 1, sizeof(struct p_header80
), receive_SyncParam
},
3605 [P_SYNC_PARAM89
] = { 1, sizeof(struct p_header80
), receive_SyncParam
},
3606 [P_PROTOCOL
] = { 1, sizeof(struct p_protocol
), receive_protocol
},
3607 [P_UUIDS
] = { 0, sizeof(struct p_uuids
), receive_uuids
},
3608 [P_SIZES
] = { 0, sizeof(struct p_sizes
), receive_sizes
},
3609 [P_STATE
] = { 0, sizeof(struct p_state
), receive_state
},
3610 [P_STATE_CHG_REQ
] = { 0, sizeof(struct p_req_state
), receive_req_state
},
3611 [P_SYNC_UUID
] = { 0, sizeof(struct p_rs_uuid
), receive_sync_uuid
},
3612 [P_OV_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
3613 [P_OV_REPLY
] = { 1, sizeof(struct p_block_req
), receive_DataRequest
},
3614 [P_CSUM_RS_REQUEST
] = { 1, sizeof(struct p_block_req
), receive_DataRequest
},
3615 [P_DELAY_PROBE
] = { 0, sizeof(struct p_delay_probe93
), receive_skip
},
3616 [P_OUT_OF_SYNC
] = { 0, sizeof(struct p_block_desc
), receive_out_of_sync
},
3617 /* anything missing from this table is in
3618 * the asender_tbl, see get_asender_cmd */
3619 [P_MAX_CMD
] = { 0, 0, NULL
},
3622 /* All handler functions that expect a sub-header get that sub-heder in
3623 mdev->data.rbuf.header.head.payload.
3625 Usually in mdev->data.rbuf.header.head the callback can find the usual
3626 p_header, but they may not rely on that. Since there is also p_header95 !
3629 static void drbdd(struct drbd_conf
*mdev
)
3631 union p_header
*header
= &mdev
->data
.rbuf
.header
;
3632 unsigned int packet_size
;
3633 enum drbd_packets cmd
;
3634 size_t shs
; /* sub header size */
3637 while (get_t_state(&mdev
->receiver
) == Running
) {
3638 drbd_thread_current_set_cpu(mdev
);
3639 if (!drbd_recv_header(mdev
, &cmd
, &packet_size
))
3642 if (unlikely(cmd
>= P_MAX_CMD
|| !drbd_cmd_handler
[cmd
].function
)) {
3643 dev_err(DEV
, "unknown packet type %d, l: %d!\n", cmd
, packet_size
);
3647 shs
= drbd_cmd_handler
[cmd
].pkt_size
- sizeof(union p_header
);
3648 if (packet_size
- shs
> 0 && !drbd_cmd_handler
[cmd
].expect_payload
) {
3649 dev_err(DEV
, "No payload expected %s l:%d\n", cmdname(cmd
), packet_size
);
3654 rv
= drbd_recv(mdev
, &header
->h80
.payload
, shs
);
3655 if (unlikely(rv
!= shs
)) {
3656 dev_err(DEV
, "short read while reading sub header: rv=%d\n", rv
);
3661 rv
= drbd_cmd_handler
[cmd
].function(mdev
, cmd
, packet_size
- shs
);
3663 if (unlikely(!rv
)) {
3664 dev_err(DEV
, "error receiving %s, l: %d!\n",
3665 cmdname(cmd
), packet_size
);
3672 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
3674 /* If we leave here, we probably want to update at least the
3675 * "Connected" indicator on stable storage. Do so explicitly here. */
3679 void drbd_flush_workqueue(struct drbd_conf
*mdev
)
3681 struct drbd_wq_barrier barr
;
3683 barr
.w
.cb
= w_prev_work_done
;
3684 init_completion(&barr
.done
);
3685 drbd_queue_work(&mdev
->data
.work
, &barr
.w
);
3686 wait_for_completion(&barr
.done
);
3689 void drbd_free_tl_hash(struct drbd_conf
*mdev
)
3691 struct hlist_head
*h
;
3693 spin_lock_irq(&mdev
->req_lock
);
3695 if (!mdev
->tl_hash
|| mdev
->state
.conn
!= C_STANDALONE
) {
3696 spin_unlock_irq(&mdev
->req_lock
);
3700 for (h
= mdev
->ee_hash
; h
< mdev
->ee_hash
+ mdev
->ee_hash_s
; h
++)
3702 dev_err(DEV
, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3703 (int)(h
- mdev
->ee_hash
), h
->first
);
3704 kfree(mdev
->ee_hash
);
3705 mdev
->ee_hash
= NULL
;
3706 mdev
->ee_hash_s
= 0;
3709 for (h
= mdev
->tl_hash
; h
< mdev
->tl_hash
+ mdev
->tl_hash_s
; h
++)
3711 dev_err(DEV
, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3712 (int)(h
- mdev
->tl_hash
), h
->first
);
3713 kfree(mdev
->tl_hash
);
3714 mdev
->tl_hash
= NULL
;
3715 mdev
->tl_hash_s
= 0;
3716 spin_unlock_irq(&mdev
->req_lock
);
3719 static void drbd_disconnect(struct drbd_conf
*mdev
)
3721 enum drbd_fencing_p fp
;
3722 union drbd_state os
, ns
;
3723 int rv
= SS_UNKNOWN_ERROR
;
3726 if (mdev
->state
.conn
== C_STANDALONE
)
3728 if (mdev
->state
.conn
>= C_WF_CONNECTION
)
3729 dev_err(DEV
, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3730 drbd_conn_str(mdev
->state
.conn
));
3732 /* asender does not clean up anything. it must not interfere, either */
3733 drbd_thread_stop(&mdev
->asender
);
3734 drbd_free_sock(mdev
);
3736 /* wait for current activity to cease. */
3737 spin_lock_irq(&mdev
->req_lock
);
3738 _drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
3739 _drbd_wait_ee_list_empty(mdev
, &mdev
->sync_ee
);
3740 _drbd_wait_ee_list_empty(mdev
, &mdev
->read_ee
);
3741 spin_unlock_irq(&mdev
->req_lock
);
3743 /* We do not have data structures that would allow us to
3744 * get the rs_pending_cnt down to 0 again.
3745 * * On C_SYNC_TARGET we do not have any data structures describing
3746 * the pending RSDataRequest's we have sent.
3747 * * On C_SYNC_SOURCE there is no data structure that tracks
3748 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3749 * And no, it is not the sum of the reference counts in the
3750 * resync_LRU. The resync_LRU tracks the whole operation including
3751 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3753 drbd_rs_cancel_all(mdev
);
3755 mdev
->rs_failed
= 0;
3756 atomic_set(&mdev
->rs_pending_cnt
, 0);
3757 wake_up(&mdev
->misc_wait
);
3759 /* make sure syncer is stopped and w_resume_next_sg queued */
3760 del_timer_sync(&mdev
->resync_timer
);
3761 resync_timer_fn((unsigned long)mdev
);
3763 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3764 * w_make_resync_request etc. which may still be on the worker queue
3765 * to be "canceled" */
3766 drbd_flush_workqueue(mdev
);
3768 /* This also does reclaim_net_ee(). If we do this too early, we might
3769 * miss some resync ee and pages.*/
3770 drbd_process_done_ee(mdev
);
3772 kfree(mdev
->p_uuid
);
3773 mdev
->p_uuid
= NULL
;
3775 if (!is_susp(mdev
->state
))
3778 dev_info(DEV
, "Connection closed\n");
3783 if (get_ldev(mdev
)) {
3784 fp
= mdev
->ldev
->dc
.fencing
;
3788 if (mdev
->state
.role
== R_PRIMARY
&& fp
>= FP_RESOURCE
&& mdev
->state
.pdsk
>= D_UNKNOWN
)
3789 drbd_try_outdate_peer_async(mdev
);
3791 spin_lock_irq(&mdev
->req_lock
);
3793 if (os
.conn
>= C_UNCONNECTED
) {
3794 /* Do not restart in case we are C_DISCONNECTING */
3796 ns
.conn
= C_UNCONNECTED
;
3797 rv
= _drbd_set_state(mdev
, ns
, CS_VERBOSE
, NULL
);
3799 spin_unlock_irq(&mdev
->req_lock
);
3801 if (os
.conn
== C_DISCONNECTING
) {
3802 wait_event(mdev
->net_cnt_wait
, atomic_read(&mdev
->net_cnt
) == 0);
3804 crypto_free_hash(mdev
->cram_hmac_tfm
);
3805 mdev
->cram_hmac_tfm
= NULL
;
3807 kfree(mdev
->net_conf
);
3808 mdev
->net_conf
= NULL
;
3809 drbd_request_state(mdev
, NS(conn
, C_STANDALONE
));
3812 /* tcp_close and release of sendpage pages can be deferred. I don't
3813 * want to use SO_LINGER, because apparently it can be deferred for
3814 * more than 20 seconds (longest time I checked).
3816 * Actually we don't care for exactly when the network stack does its
3817 * put_page(), but release our reference on these pages right here.
3819 i
= drbd_release_ee(mdev
, &mdev
->net_ee
);
3821 dev_info(DEV
, "net_ee not empty, killed %u entries\n", i
);
3822 i
= atomic_read(&mdev
->pp_in_use_by_net
);
3824 dev_info(DEV
, "pp_in_use_by_net = %d, expected 0\n", i
);
3825 i
= atomic_read(&mdev
->pp_in_use
);
3827 dev_info(DEV
, "pp_in_use = %d, expected 0\n", i
);
3829 D_ASSERT(list_empty(&mdev
->read_ee
));
3830 D_ASSERT(list_empty(&mdev
->active_ee
));
3831 D_ASSERT(list_empty(&mdev
->sync_ee
));
3832 D_ASSERT(list_empty(&mdev
->done_ee
));
3834 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3835 atomic_set(&mdev
->current_epoch
->epoch_size
, 0);
3836 D_ASSERT(list_empty(&mdev
->current_epoch
->list
));
3840 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3841 * we can agree on is stored in agreed_pro_version.
3843 * feature flags and the reserved array should be enough room for future
3844 * enhancements of the handshake protocol, and possible plugins...
3846 * for now, they are expected to be zero, but ignored.
3848 static int drbd_send_handshake(struct drbd_conf
*mdev
)
3850 /* ASSERT current == mdev->receiver ... */
3851 struct p_handshake
*p
= &mdev
->data
.sbuf
.handshake
;
3854 if (mutex_lock_interruptible(&mdev
->data
.mutex
)) {
3855 dev_err(DEV
, "interrupted during initial handshake\n");
3856 return 0; /* interrupted. not ok. */
3859 if (mdev
->data
.socket
== NULL
) {
3860 mutex_unlock(&mdev
->data
.mutex
);
3864 memset(p
, 0, sizeof(*p
));
3865 p
->protocol_min
= cpu_to_be32(PRO_VERSION_MIN
);
3866 p
->protocol_max
= cpu_to_be32(PRO_VERSION_MAX
);
3867 ok
= _drbd_send_cmd( mdev
, mdev
->data
.socket
, P_HAND_SHAKE
,
3868 (struct p_header80
*)p
, sizeof(*p
), 0 );
3869 mutex_unlock(&mdev
->data
.mutex
);
3875 * 1 yes, we have a valid connection
3876 * 0 oops, did not work out, please try again
3877 * -1 peer talks different language,
3878 * no point in trying again, please go standalone.
3880 static int drbd_do_handshake(struct drbd_conf
*mdev
)
3882 /* ASSERT current == mdev->receiver ... */
3883 struct p_handshake
*p
= &mdev
->data
.rbuf
.handshake
;
3884 const int expect
= sizeof(struct p_handshake
) - sizeof(struct p_header80
);
3885 unsigned int length
;
3886 enum drbd_packets cmd
;
3889 rv
= drbd_send_handshake(mdev
);
3893 rv
= drbd_recv_header(mdev
, &cmd
, &length
);
3897 if (cmd
!= P_HAND_SHAKE
) {
3898 dev_err(DEV
, "expected HandShake packet, received: %s (0x%04x)\n",
3903 if (length
!= expect
) {
3904 dev_err(DEV
, "expected HandShake length: %u, received: %u\n",
3909 rv
= drbd_recv(mdev
, &p
->head
.payload
, expect
);
3912 dev_err(DEV
, "short read receiving handshake packet: l=%u\n", rv
);
3916 p
->protocol_min
= be32_to_cpu(p
->protocol_min
);
3917 p
->protocol_max
= be32_to_cpu(p
->protocol_max
);
3918 if (p
->protocol_max
== 0)
3919 p
->protocol_max
= p
->protocol_min
;
3921 if (PRO_VERSION_MAX
< p
->protocol_min
||
3922 PRO_VERSION_MIN
> p
->protocol_max
)
3925 mdev
->agreed_pro_version
= min_t(int, PRO_VERSION_MAX
, p
->protocol_max
);
3927 dev_info(DEV
, "Handshake successful: "
3928 "Agreed network protocol version %d\n", mdev
->agreed_pro_version
);
3933 dev_err(DEV
, "incompatible DRBD dialects: "
3934 "I support %d-%d, peer supports %d-%d\n",
3935 PRO_VERSION_MIN
, PRO_VERSION_MAX
,
3936 p
->protocol_min
, p
->protocol_max
);
3940 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3941 static int drbd_do_auth(struct drbd_conf
*mdev
)
3943 dev_err(DEV
, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3944 dev_err(DEV
, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
3948 #define CHALLENGE_LEN 64
3952 0 - failed, try again (network error),
3953 -1 - auth failed, don't try again.
3956 static int drbd_do_auth(struct drbd_conf
*mdev
)
3958 char my_challenge
[CHALLENGE_LEN
]; /* 64 Bytes... */
3959 struct scatterlist sg
;
3960 char *response
= NULL
;
3961 char *right_response
= NULL
;
3962 char *peers_ch
= NULL
;
3963 unsigned int key_len
= strlen(mdev
->net_conf
->shared_secret
);
3964 unsigned int resp_size
;
3965 struct hash_desc desc
;
3966 enum drbd_packets cmd
;
3967 unsigned int length
;
3970 desc
.tfm
= mdev
->cram_hmac_tfm
;
3973 rv
= crypto_hash_setkey(mdev
->cram_hmac_tfm
,
3974 (u8
*)mdev
->net_conf
->shared_secret
, key_len
);
3976 dev_err(DEV
, "crypto_hash_setkey() failed with %d\n", rv
);
3981 get_random_bytes(my_challenge
, CHALLENGE_LEN
);
3983 rv
= drbd_send_cmd2(mdev
, P_AUTH_CHALLENGE
, my_challenge
, CHALLENGE_LEN
);
3987 rv
= drbd_recv_header(mdev
, &cmd
, &length
);
3991 if (cmd
!= P_AUTH_CHALLENGE
) {
3992 dev_err(DEV
, "expected AuthChallenge packet, received: %s (0x%04x)\n",
3998 if (length
> CHALLENGE_LEN
* 2) {
3999 dev_err(DEV
, "expected AuthChallenge payload too big.\n");
4004 peers_ch
= kmalloc(length
, GFP_NOIO
);
4005 if (peers_ch
== NULL
) {
4006 dev_err(DEV
, "kmalloc of peers_ch failed\n");
4011 rv
= drbd_recv(mdev
, peers_ch
, length
);
4014 dev_err(DEV
, "short read AuthChallenge: l=%u\n", rv
);
4019 resp_size
= crypto_hash_digestsize(mdev
->cram_hmac_tfm
);
4020 response
= kmalloc(resp_size
, GFP_NOIO
);
4021 if (response
== NULL
) {
4022 dev_err(DEV
, "kmalloc of response failed\n");
4027 sg_init_table(&sg
, 1);
4028 sg_set_buf(&sg
, peers_ch
, length
);
4030 rv
= crypto_hash_digest(&desc
, &sg
, sg
.length
, response
);
4032 dev_err(DEV
, "crypto_hash_digest() failed with %d\n", rv
);
4037 rv
= drbd_send_cmd2(mdev
, P_AUTH_RESPONSE
, response
, resp_size
);
4041 rv
= drbd_recv_header(mdev
, &cmd
, &length
);
4045 if (cmd
!= P_AUTH_RESPONSE
) {
4046 dev_err(DEV
, "expected AuthResponse packet, received: %s (0x%04x)\n",
4052 if (length
!= resp_size
) {
4053 dev_err(DEV
, "expected AuthResponse payload of wrong size\n");
4058 rv
= drbd_recv(mdev
, response
, resp_size
);
4060 if (rv
!= resp_size
) {
4061 dev_err(DEV
, "short read receiving AuthResponse: l=%u\n", rv
);
4066 right_response
= kmalloc(resp_size
, GFP_NOIO
);
4067 if (right_response
== NULL
) {
4068 dev_err(DEV
, "kmalloc of right_response failed\n");
4073 sg_set_buf(&sg
, my_challenge
, CHALLENGE_LEN
);
4075 rv
= crypto_hash_digest(&desc
, &sg
, sg
.length
, right_response
);
4077 dev_err(DEV
, "crypto_hash_digest() failed with %d\n", rv
);
4082 rv
= !memcmp(response
, right_response
, resp_size
);
4085 dev_info(DEV
, "Peer authenticated using %d bytes of '%s' HMAC\n",
4086 resp_size
, mdev
->net_conf
->cram_hmac_alg
);
4093 kfree(right_response
);
4099 int drbdd_init(struct drbd_thread
*thi
)
4101 struct drbd_conf
*mdev
= thi
->mdev
;
4102 unsigned int minor
= mdev_to_minor(mdev
);
4105 sprintf(current
->comm
, "drbd%d_receiver", minor
);
4107 dev_info(DEV
, "receiver (re)started\n");
4110 h
= drbd_connect(mdev
);
4112 drbd_disconnect(mdev
);
4113 __set_current_state(TASK_INTERRUPTIBLE
);
4114 schedule_timeout(HZ
);
4117 dev_warn(DEV
, "Discarding network configuration.\n");
4118 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
4123 if (get_net_conf(mdev
)) {
4129 drbd_disconnect(mdev
);
4131 dev_info(DEV
, "receiver terminated\n");
4135 /* ********* acknowledge sender ******** */
4137 static int got_RqSReply(struct drbd_conf
*mdev
, struct p_header80
*h
)
4139 struct p_req_state_reply
*p
= (struct p_req_state_reply
*)h
;
4141 int retcode
= be32_to_cpu(p
->retcode
);
4143 if (retcode
>= SS_SUCCESS
) {
4144 set_bit(CL_ST_CHG_SUCCESS
, &mdev
->flags
);
4146 set_bit(CL_ST_CHG_FAIL
, &mdev
->flags
);
4147 dev_err(DEV
, "Requested state change failed by peer: %s (%d)\n",
4148 drbd_set_st_err_str(retcode
), retcode
);
4150 wake_up(&mdev
->state_wait
);
4155 static int got_Ping(struct drbd_conf
*mdev
, struct p_header80
*h
)
4157 return drbd_send_ping_ack(mdev
);
4161 static int got_PingAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4163 /* restore idle timeout */
4164 mdev
->meta
.socket
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_int
*HZ
;
4165 if (!test_and_set_bit(GOT_PING_ACK
, &mdev
->flags
))
4166 wake_up(&mdev
->misc_wait
);
4171 static int got_IsInSync(struct drbd_conf
*mdev
, struct p_header80
*h
)
4173 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4174 sector_t sector
= be64_to_cpu(p
->sector
);
4175 int blksize
= be32_to_cpu(p
->blksize
);
4177 D_ASSERT(mdev
->agreed_pro_version
>= 89);
4179 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4181 if (get_ldev(mdev
)) {
4182 drbd_rs_complete_io(mdev
, sector
);
4183 drbd_set_in_sync(mdev
, sector
, blksize
);
4184 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4185 mdev
->rs_same_csum
+= (blksize
>> BM_BLOCK_SHIFT
);
4188 dec_rs_pending(mdev
);
4189 atomic_add(blksize
>> 9, &mdev
->rs_sect_in
);
4194 /* when we receive the ACK for a write request,
4195 * verify that we actually know about it */
4196 static struct drbd_request
*_ack_id_to_req(struct drbd_conf
*mdev
,
4197 u64 id
, sector_t sector
)
4199 struct hlist_head
*slot
= tl_hash_slot(mdev
, sector
);
4200 struct hlist_node
*n
;
4201 struct drbd_request
*req
;
4203 hlist_for_each_entry(req
, n
, slot
, colision
) {
4204 if ((unsigned long)req
== (unsigned long)id
) {
4205 if (req
->sector
!= sector
) {
4206 dev_err(DEV
, "_ack_id_to_req: found req %p but it has "
4207 "wrong sector (%llus versus %llus)\n", req
,
4208 (unsigned long long)req
->sector
,
4209 (unsigned long long)sector
);
4215 dev_err(DEV
, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4216 (void *)(unsigned long)id
, (unsigned long long)sector
);
4220 typedef struct drbd_request
*(req_validator_fn
)
4221 (struct drbd_conf
*mdev
, u64 id
, sector_t sector
);
4223 static int validate_req_change_req_state(struct drbd_conf
*mdev
,
4224 u64 id
, sector_t sector
, req_validator_fn validator
,
4225 const char *func
, enum drbd_req_event what
)
4227 struct drbd_request
*req
;
4228 struct bio_and_error m
;
4230 spin_lock_irq(&mdev
->req_lock
);
4231 req
= validator(mdev
, id
, sector
);
4232 if (unlikely(!req
)) {
4233 spin_unlock_irq(&mdev
->req_lock
);
4234 dev_err(DEV
, "%s: got a corrupt block_id/sector pair\n", func
);
4237 __req_mod(req
, what
, &m
);
4238 spin_unlock_irq(&mdev
->req_lock
);
4241 complete_master_bio(mdev
, &m
);
4245 static int got_BlockAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4247 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4248 sector_t sector
= be64_to_cpu(p
->sector
);
4249 int blksize
= be32_to_cpu(p
->blksize
);
4250 enum drbd_req_event what
;
4252 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4254 if (is_syncer_block_id(p
->block_id
)) {
4255 drbd_set_in_sync(mdev
, sector
, blksize
);
4256 dec_rs_pending(mdev
);
4259 switch (be16_to_cpu(h
->command
)) {
4260 case P_RS_WRITE_ACK
:
4261 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4262 what
= write_acked_by_peer_and_sis
;
4265 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4266 what
= write_acked_by_peer
;
4269 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_B
);
4270 what
= recv_acked_by_peer
;
4273 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4274 what
= conflict_discarded_by_peer
;
4281 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4282 _ack_id_to_req
, __func__
, what
);
4285 static int got_NegAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4287 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4288 sector_t sector
= be64_to_cpu(p
->sector
);
4290 if (__ratelimit(&drbd_ratelimit_state
))
4291 dev_warn(DEV
, "Got NegAck packet. Peer is in troubles?\n");
4293 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4295 if (is_syncer_block_id(p
->block_id
)) {
4296 int size
= be32_to_cpu(p
->blksize
);
4297 dec_rs_pending(mdev
);
4298 drbd_rs_failed_io(mdev
, sector
, size
);
4301 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4302 _ack_id_to_req
, __func__
, neg_acked
);
4305 static int got_NegDReply(struct drbd_conf
*mdev
, struct p_header80
*h
)
4307 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4308 sector_t sector
= be64_to_cpu(p
->sector
);
4310 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4311 dev_err(DEV
, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4312 (unsigned long long)sector
, be32_to_cpu(p
->blksize
));
4314 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4315 _ar_id_to_req
, __func__
, neg_acked
);
4318 static int got_NegRSDReply(struct drbd_conf
*mdev
, struct p_header80
*h
)
4322 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4324 sector
= be64_to_cpu(p
->sector
);
4325 size
= be32_to_cpu(p
->blksize
);
4327 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4329 dec_rs_pending(mdev
);
4331 if (get_ldev_if_state(mdev
, D_FAILED
)) {
4332 drbd_rs_complete_io(mdev
, sector
);
4333 drbd_rs_failed_io(mdev
, sector
, size
);
4340 static int got_BarrierAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4342 struct p_barrier_ack
*p
= (struct p_barrier_ack
*)h
;
4344 tl_release(mdev
, p
->barrier
, be32_to_cpu(p
->set_size
));
4346 if (mdev
->state
.conn
== C_AHEAD
&&
4347 atomic_read(&mdev
->ap_in_flight
) == 0 &&
4348 list_empty(&mdev
->start_resync_work
.list
)) {
4349 struct drbd_work
*w
= &mdev
->start_resync_work
;
4350 w
->cb
= w_start_resync
;
4351 drbd_queue_work_front(&mdev
->data
.work
, w
);
4357 static int got_OVResult(struct drbd_conf
*mdev
, struct p_header80
*h
)
4359 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4360 struct drbd_work
*w
;
4364 sector
= be64_to_cpu(p
->sector
);
4365 size
= be32_to_cpu(p
->blksize
);
4367 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4369 if (be64_to_cpu(p
->block_id
) == ID_OUT_OF_SYNC
)
4370 drbd_ov_oos_found(mdev
, sector
, size
);
4374 if (!get_ldev(mdev
))
4377 drbd_rs_complete_io(mdev
, sector
);
4378 dec_rs_pending(mdev
);
4382 /* let's advance progress step marks only for every other megabyte */
4383 if ((mdev
->ov_left
& 0x200) == 0x200)
4384 drbd_advance_rs_marks(mdev
, mdev
->ov_left
);
4386 if (mdev
->ov_left
== 0) {
4387 w
= kmalloc(sizeof(*w
), GFP_NOIO
);
4389 w
->cb
= w_ov_finished
;
4390 drbd_queue_work_front(&mdev
->data
.work
, w
);
4392 dev_err(DEV
, "kmalloc(w) failed.");
4394 drbd_resync_finished(mdev
);
4401 static int got_skip(struct drbd_conf
*mdev
, struct p_header80
*h
)
4406 struct asender_cmd
{
4408 int (*process
)(struct drbd_conf
*mdev
, struct p_header80
*h
);
4411 static struct asender_cmd
*get_asender_cmd(int cmd
)
4413 static struct asender_cmd asender_tbl
[] = {
4414 /* anything missing from this table is in
4415 * the drbd_cmd_handler (drbd_default_handler) table,
4416 * see the beginning of drbdd() */
4417 [P_PING
] = { sizeof(struct p_header80
), got_Ping
},
4418 [P_PING_ACK
] = { sizeof(struct p_header80
), got_PingAck
},
4419 [P_RECV_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4420 [P_WRITE_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4421 [P_RS_WRITE_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4422 [P_DISCARD_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4423 [P_NEG_ACK
] = { sizeof(struct p_block_ack
), got_NegAck
},
4424 [P_NEG_DREPLY
] = { sizeof(struct p_block_ack
), got_NegDReply
},
4425 [P_NEG_RS_DREPLY
] = { sizeof(struct p_block_ack
), got_NegRSDReply
},
4426 [P_OV_RESULT
] = { sizeof(struct p_block_ack
), got_OVResult
},
4427 [P_BARRIER_ACK
] = { sizeof(struct p_barrier_ack
), got_BarrierAck
},
4428 [P_STATE_CHG_REPLY
] = { sizeof(struct p_req_state_reply
), got_RqSReply
},
4429 [P_RS_IS_IN_SYNC
] = { sizeof(struct p_block_ack
), got_IsInSync
},
4430 [P_DELAY_PROBE
] = { sizeof(struct p_delay_probe93
), got_skip
},
4431 [P_MAX_CMD
] = { 0, NULL
},
4433 if (cmd
> P_MAX_CMD
|| asender_tbl
[cmd
].process
== NULL
)
4435 return &asender_tbl
[cmd
];
4438 int drbd_asender(struct drbd_thread
*thi
)
4440 struct drbd_conf
*mdev
= thi
->mdev
;
4441 struct p_header80
*h
= &mdev
->meta
.rbuf
.header
.h80
;
4442 struct asender_cmd
*cmd
= NULL
;
4447 int expect
= sizeof(struct p_header80
);
4450 sprintf(current
->comm
, "drbd%d_asender", mdev_to_minor(mdev
));
4452 current
->policy
= SCHED_RR
; /* Make this a realtime task! */
4453 current
->rt_priority
= 2; /* more important than all other tasks */
4455 while (get_t_state(thi
) == Running
) {
4456 drbd_thread_current_set_cpu(mdev
);
4457 if (test_and_clear_bit(SEND_PING
, &mdev
->flags
)) {
4458 ERR_IF(!drbd_send_ping(mdev
)) goto reconnect
;
4459 mdev
->meta
.socket
->sk
->sk_rcvtimeo
=
4460 mdev
->net_conf
->ping_timeo
*HZ
/10;
4463 /* conditionally cork;
4464 * it may hurt latency if we cork without much to send */
4465 if (!mdev
->net_conf
->no_cork
&&
4466 3 < atomic_read(&mdev
->unacked_cnt
))
4467 drbd_tcp_cork(mdev
->meta
.socket
);
4469 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4470 flush_signals(current
);
4471 if (!drbd_process_done_ee(mdev
))
4473 /* to avoid race with newly queued ACKs */
4474 set_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4475 spin_lock_irq(&mdev
->req_lock
);
4476 empty
= list_empty(&mdev
->done_ee
);
4477 spin_unlock_irq(&mdev
->req_lock
);
4478 /* new ack may have been queued right here,
4479 * but then there is also a signal pending,
4480 * and we start over... */
4484 /* but unconditionally uncork unless disabled */
4485 if (!mdev
->net_conf
->no_cork
)
4486 drbd_tcp_uncork(mdev
->meta
.socket
);
4488 /* short circuit, recv_msg would return EINTR anyways. */
4489 if (signal_pending(current
))
4492 rv
= drbd_recv_short(mdev
, mdev
->meta
.socket
,
4493 buf
, expect
-received
, 0);
4494 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4496 flush_signals(current
);
4499 * -EINTR (on meta) we got a signal
4500 * -EAGAIN (on meta) rcvtimeo expired
4501 * -ECONNRESET other side closed the connection
4502 * -ERESTARTSYS (on data) we got a signal
4503 * rv < 0 other than above: unexpected error!
4504 * rv == expected: full header or command
4505 * rv < expected: "woken" by signal during receive
4506 * rv == 0 : "connection shut down by peer"
4508 if (likely(rv
> 0)) {
4511 } else if (rv
== 0) {
4512 dev_err(DEV
, "meta connection shut down by peer.\n");
4514 } else if (rv
== -EAGAIN
) {
4515 if (mdev
->meta
.socket
->sk
->sk_rcvtimeo
==
4516 mdev
->net_conf
->ping_timeo
*HZ
/10) {
4517 dev_err(DEV
, "PingAck did not arrive in time.\n");
4520 set_bit(SEND_PING
, &mdev
->flags
);
4522 } else if (rv
== -EINTR
) {
4525 dev_err(DEV
, "sock_recvmsg returned %d\n", rv
);
4529 if (received
== expect
&& cmd
== NULL
) {
4530 if (unlikely(h
->magic
!= BE_DRBD_MAGIC
)) {
4531 dev_err(DEV
, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4532 be32_to_cpu(h
->magic
),
4533 be16_to_cpu(h
->command
),
4534 be16_to_cpu(h
->length
));
4537 cmd
= get_asender_cmd(be16_to_cpu(h
->command
));
4538 len
= be16_to_cpu(h
->length
);
4539 if (unlikely(cmd
== NULL
)) {
4540 dev_err(DEV
, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4541 be32_to_cpu(h
->magic
),
4542 be16_to_cpu(h
->command
),
4543 be16_to_cpu(h
->length
));
4546 expect
= cmd
->pkt_size
;
4547 ERR_IF(len
!= expect
-sizeof(struct p_header80
))
4550 if (received
== expect
) {
4551 D_ASSERT(cmd
!= NULL
);
4552 if (!cmd
->process(mdev
, h
))
4557 expect
= sizeof(struct p_header80
);
4564 drbd_force_state(mdev
, NS(conn
, C_NETWORK_FAILURE
));
4569 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
4572 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4574 D_ASSERT(mdev
->state
.conn
< C_CONNECTED
);
4575 dev_info(DEV
, "asender terminated\n");