1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/socket.h>
12 #include <linux/string.h>
14 #include <linux/bio.h>
15 #endif /* CONFIG_BLOCK */
16 #include <linux/dns_resolver.h>
19 #include <linux/ceph/libceph.h>
20 #include <linux/ceph/messenger.h>
21 #include <linux/ceph/decode.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/export.h>
25 #define list_entry_next(pos, member) \
26 list_entry(pos->member.next, typeof(*pos), member)
29 * Ceph uses the messenger to exchange ceph_msg messages with other
30 * hosts in the system. The messenger provides ordered and reliable
31 * delivery. We tolerate TCP disconnects by reconnecting (with
32 * exponential backoff) in the case of a fault (disconnection, bad
33 * crc, protocol error). Acks allow sent messages to be discarded by
38 * We track the state of the socket on a given connection using
39 * values defined below. The transition to a new socket state is
40 * handled by a function which verifies we aren't coming from an
44 * | NEW* | transient initial state
46 * | con_sock_state_init()
49 * | CLOSED | initialized, but no socket (and no
50 * ---------- TCP connection)
52 * | \ con_sock_state_connecting()
53 * | ----------------------
55 * + con_sock_state_closed() \
56 * |+--------------------------- \
59 * | | CLOSING | socket event; \ \
60 * | ----------- await close \ \
63 * | + con_sock_state_closing() \ |
65 * | / --------------- | |
68 * | / -----------------| CONNECTING | socket created, TCP
69 * | | / -------------- connect initiated
70 * | | | con_sock_state_connected()
73 * | CONNECTED | TCP connection established
76 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
79 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
80 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
81 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
82 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
83 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
88 #define CON_STATE_CLOSED 1 /* -> PREOPEN */
89 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */
90 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */
91 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */
92 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */
93 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */
96 * ceph_connection flag bits
98 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop
99 * messages on errors */
100 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
101 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */
102 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
103 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
105 static bool con_flag_valid(unsigned long con_flag
)
108 case CON_FLAG_LOSSYTX
:
109 case CON_FLAG_KEEPALIVE_PENDING
:
110 case CON_FLAG_WRITE_PENDING
:
111 case CON_FLAG_SOCK_CLOSED
:
112 case CON_FLAG_BACKOFF
:
119 static void con_flag_clear(struct ceph_connection
*con
, unsigned long con_flag
)
121 BUG_ON(!con_flag_valid(con_flag
));
123 clear_bit(con_flag
, &con
->flags
);
126 static void con_flag_set(struct ceph_connection
*con
, unsigned long con_flag
)
128 BUG_ON(!con_flag_valid(con_flag
));
130 set_bit(con_flag
, &con
->flags
);
133 static bool con_flag_test(struct ceph_connection
*con
, unsigned long con_flag
)
135 BUG_ON(!con_flag_valid(con_flag
));
137 return test_bit(con_flag
, &con
->flags
);
140 static bool con_flag_test_and_clear(struct ceph_connection
*con
,
141 unsigned long con_flag
)
143 BUG_ON(!con_flag_valid(con_flag
));
145 return test_and_clear_bit(con_flag
, &con
->flags
);
148 static bool con_flag_test_and_set(struct ceph_connection
*con
,
149 unsigned long con_flag
)
151 BUG_ON(!con_flag_valid(con_flag
));
153 return test_and_set_bit(con_flag
, &con
->flags
);
156 /* Slab caches for frequently-allocated structures */
158 static struct kmem_cache
*ceph_msg_cache
;
159 static struct kmem_cache
*ceph_msg_data_cache
;
161 /* static tag bytes (protocol control messages) */
162 static char tag_msg
= CEPH_MSGR_TAG_MSG
;
163 static char tag_ack
= CEPH_MSGR_TAG_ACK
;
164 static char tag_keepalive
= CEPH_MSGR_TAG_KEEPALIVE
;
166 #ifdef CONFIG_LOCKDEP
167 static struct lock_class_key socket_class
;
171 * When skipping (ignoring) a block of input we read it into a "skip
172 * buffer," which is this many bytes in size.
174 #define SKIP_BUF_SIZE 1024
176 static void queue_con(struct ceph_connection
*con
);
177 static void con_work(struct work_struct
*);
178 static void con_fault(struct ceph_connection
*con
);
181 * Nicely render a sockaddr as a string. An array of formatted
182 * strings is used, to approximate reentrancy.
184 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
185 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
186 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
187 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
189 static char addr_str
[ADDR_STR_COUNT
][MAX_ADDR_STR_LEN
];
190 static atomic_t addr_str_seq
= ATOMIC_INIT(0);
192 static struct page
*zero_page
; /* used in certain error cases */
194 const char *ceph_pr_addr(const struct sockaddr_storage
*ss
)
198 struct sockaddr_in
*in4
= (struct sockaddr_in
*) ss
;
199 struct sockaddr_in6
*in6
= (struct sockaddr_in6
*) ss
;
201 i
= atomic_inc_return(&addr_str_seq
) & ADDR_STR_COUNT_MASK
;
204 switch (ss
->ss_family
) {
206 snprintf(s
, MAX_ADDR_STR_LEN
, "%pI4:%hu", &in4
->sin_addr
,
207 ntohs(in4
->sin_port
));
211 snprintf(s
, MAX_ADDR_STR_LEN
, "[%pI6c]:%hu", &in6
->sin6_addr
,
212 ntohs(in6
->sin6_port
));
216 snprintf(s
, MAX_ADDR_STR_LEN
, "(unknown sockaddr family %hu)",
222 EXPORT_SYMBOL(ceph_pr_addr
);
224 static void encode_my_addr(struct ceph_messenger
*msgr
)
226 memcpy(&msgr
->my_enc_addr
, &msgr
->inst
.addr
, sizeof(msgr
->my_enc_addr
));
227 ceph_encode_addr(&msgr
->my_enc_addr
);
231 * work queue for all reading and writing to/from the socket.
233 static struct workqueue_struct
*ceph_msgr_wq
;
235 static int ceph_msgr_slab_init(void)
237 BUG_ON(ceph_msg_cache
);
238 ceph_msg_cache
= kmem_cache_create("ceph_msg",
239 sizeof (struct ceph_msg
),
240 __alignof__(struct ceph_msg
), 0, NULL
);
245 BUG_ON(ceph_msg_data_cache
);
246 ceph_msg_data_cache
= kmem_cache_create("ceph_msg_data",
247 sizeof (struct ceph_msg_data
),
248 __alignof__(struct ceph_msg_data
),
250 if (ceph_msg_data_cache
)
253 kmem_cache_destroy(ceph_msg_cache
);
254 ceph_msg_cache
= NULL
;
259 static void ceph_msgr_slab_exit(void)
261 BUG_ON(!ceph_msg_data_cache
);
262 kmem_cache_destroy(ceph_msg_data_cache
);
263 ceph_msg_data_cache
= NULL
;
265 BUG_ON(!ceph_msg_cache
);
266 kmem_cache_destroy(ceph_msg_cache
);
267 ceph_msg_cache
= NULL
;
270 static void _ceph_msgr_exit(void)
273 destroy_workqueue(ceph_msgr_wq
);
277 ceph_msgr_slab_exit();
279 BUG_ON(zero_page
== NULL
);
281 page_cache_release(zero_page
);
285 int ceph_msgr_init(void)
287 BUG_ON(zero_page
!= NULL
);
288 zero_page
= ZERO_PAGE(0);
289 page_cache_get(zero_page
);
291 if (ceph_msgr_slab_init())
294 ceph_msgr_wq
= alloc_workqueue("ceph-msgr",
295 WQ_NON_REENTRANT
| WQ_MEM_RECLAIM
, 0);
299 pr_err("msgr_init failed to create workqueue\n");
304 EXPORT_SYMBOL(ceph_msgr_init
);
306 void ceph_msgr_exit(void)
308 BUG_ON(ceph_msgr_wq
== NULL
);
312 EXPORT_SYMBOL(ceph_msgr_exit
);
314 void ceph_msgr_flush(void)
316 flush_workqueue(ceph_msgr_wq
);
318 EXPORT_SYMBOL(ceph_msgr_flush
);
320 /* Connection socket state transition functions */
322 static void con_sock_state_init(struct ceph_connection
*con
)
326 old_state
= atomic_xchg(&con
->sock_state
, CON_SOCK_STATE_CLOSED
);
327 if (WARN_ON(old_state
!= CON_SOCK_STATE_NEW
))
328 printk("%s: unexpected old state %d\n", __func__
, old_state
);
329 dout("%s con %p sock %d -> %d\n", __func__
, con
, old_state
,
330 CON_SOCK_STATE_CLOSED
);
333 static void con_sock_state_connecting(struct ceph_connection
*con
)
337 old_state
= atomic_xchg(&con
->sock_state
, CON_SOCK_STATE_CONNECTING
);
338 if (WARN_ON(old_state
!= CON_SOCK_STATE_CLOSED
))
339 printk("%s: unexpected old state %d\n", __func__
, old_state
);
340 dout("%s con %p sock %d -> %d\n", __func__
, con
, old_state
,
341 CON_SOCK_STATE_CONNECTING
);
344 static void con_sock_state_connected(struct ceph_connection
*con
)
348 old_state
= atomic_xchg(&con
->sock_state
, CON_SOCK_STATE_CONNECTED
);
349 if (WARN_ON(old_state
!= CON_SOCK_STATE_CONNECTING
))
350 printk("%s: unexpected old state %d\n", __func__
, old_state
);
351 dout("%s con %p sock %d -> %d\n", __func__
, con
, old_state
,
352 CON_SOCK_STATE_CONNECTED
);
355 static void con_sock_state_closing(struct ceph_connection
*con
)
359 old_state
= atomic_xchg(&con
->sock_state
, CON_SOCK_STATE_CLOSING
);
360 if (WARN_ON(old_state
!= CON_SOCK_STATE_CONNECTING
&&
361 old_state
!= CON_SOCK_STATE_CONNECTED
&&
362 old_state
!= CON_SOCK_STATE_CLOSING
))
363 printk("%s: unexpected old state %d\n", __func__
, old_state
);
364 dout("%s con %p sock %d -> %d\n", __func__
, con
, old_state
,
365 CON_SOCK_STATE_CLOSING
);
368 static void con_sock_state_closed(struct ceph_connection
*con
)
372 old_state
= atomic_xchg(&con
->sock_state
, CON_SOCK_STATE_CLOSED
);
373 if (WARN_ON(old_state
!= CON_SOCK_STATE_CONNECTED
&&
374 old_state
!= CON_SOCK_STATE_CLOSING
&&
375 old_state
!= CON_SOCK_STATE_CONNECTING
&&
376 old_state
!= CON_SOCK_STATE_CLOSED
))
377 printk("%s: unexpected old state %d\n", __func__
, old_state
);
378 dout("%s con %p sock %d -> %d\n", __func__
, con
, old_state
,
379 CON_SOCK_STATE_CLOSED
);
383 * socket callback functions
386 /* data available on socket, or listen socket received a connect */
387 static void ceph_sock_data_ready(struct sock
*sk
, int count_unused
)
389 struct ceph_connection
*con
= sk
->sk_user_data
;
390 if (atomic_read(&con
->msgr
->stopping
)) {
394 if (sk
->sk_state
!= TCP_CLOSE_WAIT
) {
395 dout("%s on %p state = %lu, queueing work\n", __func__
,
401 /* socket has buffer space for writing */
402 static void ceph_sock_write_space(struct sock
*sk
)
404 struct ceph_connection
*con
= sk
->sk_user_data
;
406 /* only queue to workqueue if there is data we want to write,
407 * and there is sufficient space in the socket buffer to accept
408 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
409 * doesn't get called again until try_write() fills the socket
410 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
411 * and net/core/stream.c:sk_stream_write_space().
413 if (con_flag_test(con
, CON_FLAG_WRITE_PENDING
)) {
414 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
)) {
415 dout("%s %p queueing write work\n", __func__
, con
);
416 clear_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
420 dout("%s %p nothing to write\n", __func__
, con
);
424 /* socket's state has changed */
425 static void ceph_sock_state_change(struct sock
*sk
)
427 struct ceph_connection
*con
= sk
->sk_user_data
;
429 dout("%s %p state = %lu sk_state = %u\n", __func__
,
430 con
, con
->state
, sk
->sk_state
);
432 switch (sk
->sk_state
) {
434 dout("%s TCP_CLOSE\n", __func__
);
436 dout("%s TCP_CLOSE_WAIT\n", __func__
);
437 con_sock_state_closing(con
);
438 con_flag_set(con
, CON_FLAG_SOCK_CLOSED
);
441 case TCP_ESTABLISHED
:
442 dout("%s TCP_ESTABLISHED\n", __func__
);
443 con_sock_state_connected(con
);
446 default: /* Everything else is uninteresting */
452 * set up socket callbacks
454 static void set_sock_callbacks(struct socket
*sock
,
455 struct ceph_connection
*con
)
457 struct sock
*sk
= sock
->sk
;
458 sk
->sk_user_data
= con
;
459 sk
->sk_data_ready
= ceph_sock_data_ready
;
460 sk
->sk_write_space
= ceph_sock_write_space
;
461 sk
->sk_state_change
= ceph_sock_state_change
;
470 * initiate connection to a remote socket.
472 static int ceph_tcp_connect(struct ceph_connection
*con
)
474 struct sockaddr_storage
*paddr
= &con
->peer_addr
.in_addr
;
476 unsigned int noio_flag
;
481 /* sock_create_kern() allocates with GFP_KERNEL */
482 noio_flag
= memalloc_noio_save();
483 ret
= sock_create_kern(con
->peer_addr
.in_addr
.ss_family
, SOCK_STREAM
,
485 memalloc_noio_restore(noio_flag
);
488 sock
->sk
->sk_allocation
= GFP_NOFS
;
490 #ifdef CONFIG_LOCKDEP
491 lockdep_set_class(&sock
->sk
->sk_lock
, &socket_class
);
494 set_sock_callbacks(sock
, con
);
496 dout("connect %s\n", ceph_pr_addr(&con
->peer_addr
.in_addr
));
498 con_sock_state_connecting(con
);
499 ret
= sock
->ops
->connect(sock
, (struct sockaddr
*)paddr
, sizeof(*paddr
),
501 if (ret
== -EINPROGRESS
) {
502 dout("connect %s EINPROGRESS sk_state = %u\n",
503 ceph_pr_addr(&con
->peer_addr
.in_addr
),
505 } else if (ret
< 0) {
506 pr_err("connect %s error %d\n",
507 ceph_pr_addr(&con
->peer_addr
.in_addr
), ret
);
509 con
->error_msg
= "connect error";
517 static int ceph_tcp_recvmsg(struct socket
*sock
, void *buf
, size_t len
)
519 struct kvec iov
= {buf
, len
};
520 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
};
523 r
= kernel_recvmsg(sock
, &msg
, &iov
, 1, len
, msg
.msg_flags
);
529 static int ceph_tcp_recvpage(struct socket
*sock
, struct page
*page
,
530 int page_offset
, size_t length
)
535 BUG_ON(page_offset
+ length
> PAGE_SIZE
);
539 ret
= ceph_tcp_recvmsg(sock
, kaddr
+ page_offset
, length
);
546 * write something. @more is true if caller will be sending more data
549 static int ceph_tcp_sendmsg(struct socket
*sock
, struct kvec
*iov
,
550 size_t kvlen
, size_t len
, int more
)
552 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
};
556 msg
.msg_flags
|= MSG_MORE
;
558 msg
.msg_flags
|= MSG_EOR
; /* superfluous, but what the hell */
560 r
= kernel_sendmsg(sock
, &msg
, iov
, kvlen
, len
);
566 static int __ceph_tcp_sendpage(struct socket
*sock
, struct page
*page
,
567 int offset
, size_t size
, bool more
)
569 int flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
| (more
? MSG_MORE
: MSG_EOR
);
572 ret
= kernel_sendpage(sock
, page
, offset
, size
, flags
);
579 static int ceph_tcp_sendpage(struct socket
*sock
, struct page
*page
,
580 int offset
, size_t size
, bool more
)
585 /* sendpage cannot properly handle pages with page_count == 0,
586 * we need to fallback to sendmsg if that's the case */
587 if (page_count(page
) >= 1)
588 return __ceph_tcp_sendpage(sock
, page
, offset
, size
, more
);
590 iov
.iov_base
= kmap(page
) + offset
;
592 ret
= ceph_tcp_sendmsg(sock
, &iov
, 1, size
, more
);
599 * Shutdown/close the socket for the given connection.
601 static int con_close_socket(struct ceph_connection
*con
)
605 dout("con_close_socket on %p sock %p\n", con
, con
->sock
);
607 rc
= con
->sock
->ops
->shutdown(con
->sock
, SHUT_RDWR
);
608 sock_release(con
->sock
);
613 * Forcibly clear the SOCK_CLOSED flag. It gets set
614 * independent of the connection mutex, and we could have
615 * received a socket close event before we had the chance to
616 * shut the socket down.
618 con_flag_clear(con
, CON_FLAG_SOCK_CLOSED
);
620 con_sock_state_closed(con
);
625 * Reset a connection. Discard all incoming and outgoing messages
626 * and clear *_seq state.
628 static void ceph_msg_remove(struct ceph_msg
*msg
)
630 list_del_init(&msg
->list_head
);
631 BUG_ON(msg
->con
== NULL
);
632 msg
->con
->ops
->put(msg
->con
);
637 static void ceph_msg_remove_list(struct list_head
*head
)
639 while (!list_empty(head
)) {
640 struct ceph_msg
*msg
= list_first_entry(head
, struct ceph_msg
,
642 ceph_msg_remove(msg
);
646 static void reset_connection(struct ceph_connection
*con
)
648 /* reset connection, out_queue, msg_ and connect_seq */
649 /* discard existing out_queue and msg_seq */
650 dout("reset_connection %p\n", con
);
651 ceph_msg_remove_list(&con
->out_queue
);
652 ceph_msg_remove_list(&con
->out_sent
);
655 BUG_ON(con
->in_msg
->con
!= con
);
656 con
->in_msg
->con
= NULL
;
657 ceph_msg_put(con
->in_msg
);
662 con
->connect_seq
= 0;
665 ceph_msg_put(con
->out_msg
);
669 con
->in_seq_acked
= 0;
673 * mark a peer down. drop any open connections.
675 void ceph_con_close(struct ceph_connection
*con
)
677 mutex_lock(&con
->mutex
);
678 dout("con_close %p peer %s\n", con
,
679 ceph_pr_addr(&con
->peer_addr
.in_addr
));
680 con
->state
= CON_STATE_CLOSED
;
682 con_flag_clear(con
, CON_FLAG_LOSSYTX
); /* so we retry next connect */
683 con_flag_clear(con
, CON_FLAG_KEEPALIVE_PENDING
);
684 con_flag_clear(con
, CON_FLAG_WRITE_PENDING
);
685 con_flag_clear(con
, CON_FLAG_BACKOFF
);
687 reset_connection(con
);
688 con
->peer_global_seq
= 0;
689 cancel_delayed_work(&con
->work
);
690 con_close_socket(con
);
691 mutex_unlock(&con
->mutex
);
693 EXPORT_SYMBOL(ceph_con_close
);
696 * Reopen a closed connection, with a new peer address.
698 void ceph_con_open(struct ceph_connection
*con
,
699 __u8 entity_type
, __u64 entity_num
,
700 struct ceph_entity_addr
*addr
)
702 mutex_lock(&con
->mutex
);
703 dout("con_open %p %s\n", con
, ceph_pr_addr(&addr
->in_addr
));
705 WARN_ON(con
->state
!= CON_STATE_CLOSED
);
706 con
->state
= CON_STATE_PREOPEN
;
708 con
->peer_name
.type
= (__u8
) entity_type
;
709 con
->peer_name
.num
= cpu_to_le64(entity_num
);
711 memcpy(&con
->peer_addr
, addr
, sizeof(*addr
));
712 con
->delay
= 0; /* reset backoff memory */
713 mutex_unlock(&con
->mutex
);
716 EXPORT_SYMBOL(ceph_con_open
);
719 * return true if this connection ever successfully opened
721 bool ceph_con_opened(struct ceph_connection
*con
)
723 return con
->connect_seq
> 0;
727 * initialize a new connection.
729 void ceph_con_init(struct ceph_connection
*con
, void *private,
730 const struct ceph_connection_operations
*ops
,
731 struct ceph_messenger
*msgr
)
733 dout("con_init %p\n", con
);
734 memset(con
, 0, sizeof(*con
));
735 con
->private = private;
739 con_sock_state_init(con
);
741 mutex_init(&con
->mutex
);
742 INIT_LIST_HEAD(&con
->out_queue
);
743 INIT_LIST_HEAD(&con
->out_sent
);
744 INIT_DELAYED_WORK(&con
->work
, con_work
);
746 con
->state
= CON_STATE_CLOSED
;
748 EXPORT_SYMBOL(ceph_con_init
);
752 * We maintain a global counter to order connection attempts. Get
753 * a unique seq greater than @gt.
755 static u32
get_global_seq(struct ceph_messenger
*msgr
, u32 gt
)
759 spin_lock(&msgr
->global_seq_lock
);
760 if (msgr
->global_seq
< gt
)
761 msgr
->global_seq
= gt
;
762 ret
= ++msgr
->global_seq
;
763 spin_unlock(&msgr
->global_seq_lock
);
767 static void con_out_kvec_reset(struct ceph_connection
*con
)
769 con
->out_kvec_left
= 0;
770 con
->out_kvec_bytes
= 0;
771 con
->out_kvec_cur
= &con
->out_kvec
[0];
774 static void con_out_kvec_add(struct ceph_connection
*con
,
775 size_t size
, void *data
)
779 index
= con
->out_kvec_left
;
780 BUG_ON(index
>= ARRAY_SIZE(con
->out_kvec
));
782 con
->out_kvec
[index
].iov_len
= size
;
783 con
->out_kvec
[index
].iov_base
= data
;
784 con
->out_kvec_left
++;
785 con
->out_kvec_bytes
+= size
;
791 * For a bio data item, a piece is whatever remains of the next
792 * entry in the current bio iovec, or the first entry in the next
795 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor
*cursor
,
798 struct ceph_msg_data
*data
= cursor
->data
;
801 BUG_ON(data
->type
!= CEPH_MSG_DATA_BIO
);
805 BUG_ON(!bio
->bi_vcnt
);
807 cursor
->resid
= min(length
, data
->bio_length
);
809 cursor
->vector_index
= 0;
810 cursor
->vector_offset
= 0;
811 cursor
->last_piece
= length
<= bio
->bi_io_vec
[0].bv_len
;
814 static struct page
*ceph_msg_data_bio_next(struct ceph_msg_data_cursor
*cursor
,
818 struct ceph_msg_data
*data
= cursor
->data
;
820 struct bio_vec
*bio_vec
;
823 BUG_ON(data
->type
!= CEPH_MSG_DATA_BIO
);
828 index
= cursor
->vector_index
;
829 BUG_ON(index
>= (unsigned int) bio
->bi_vcnt
);
831 bio_vec
= &bio
->bi_io_vec
[index
];
832 BUG_ON(cursor
->vector_offset
>= bio_vec
->bv_len
);
833 *page_offset
= (size_t) (bio_vec
->bv_offset
+ cursor
->vector_offset
);
834 BUG_ON(*page_offset
>= PAGE_SIZE
);
835 if (cursor
->last_piece
) /* pagelist offset is always 0 */
836 *length
= cursor
->resid
;
838 *length
= (size_t) (bio_vec
->bv_len
- cursor
->vector_offset
);
839 BUG_ON(*length
> cursor
->resid
);
840 BUG_ON(*page_offset
+ *length
> PAGE_SIZE
);
842 return bio_vec
->bv_page
;
845 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor
*cursor
,
849 struct bio_vec
*bio_vec
;
852 BUG_ON(cursor
->data
->type
!= CEPH_MSG_DATA_BIO
);
857 index
= cursor
->vector_index
;
858 BUG_ON(index
>= (unsigned int) bio
->bi_vcnt
);
859 bio_vec
= &bio
->bi_io_vec
[index
];
861 /* Advance the cursor offset */
863 BUG_ON(cursor
->resid
< bytes
);
864 cursor
->resid
-= bytes
;
865 cursor
->vector_offset
+= bytes
;
866 if (cursor
->vector_offset
< bio_vec
->bv_len
)
867 return false; /* more bytes to process in this segment */
868 BUG_ON(cursor
->vector_offset
!= bio_vec
->bv_len
);
870 /* Move on to the next segment, and possibly the next bio */
872 if (++index
== (unsigned int) bio
->bi_vcnt
) {
877 cursor
->vector_index
= index
;
878 cursor
->vector_offset
= 0;
880 if (!cursor
->last_piece
) {
881 BUG_ON(!cursor
->resid
);
883 /* A short read is OK, so use <= rather than == */
884 if (cursor
->resid
<= bio
->bi_io_vec
[index
].bv_len
)
885 cursor
->last_piece
= true;
890 #endif /* CONFIG_BLOCK */
893 * For a page array, a piece comes from the first page in the array
894 * that has not already been fully consumed.
896 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor
*cursor
,
899 struct ceph_msg_data
*data
= cursor
->data
;
902 BUG_ON(data
->type
!= CEPH_MSG_DATA_PAGES
);
904 BUG_ON(!data
->pages
);
905 BUG_ON(!data
->length
);
907 cursor
->resid
= min(length
, data
->length
);
908 page_count
= calc_pages_for(data
->alignment
, (u64
)data
->length
);
909 cursor
->page_offset
= data
->alignment
& ~PAGE_MASK
;
910 cursor
->page_index
= 0;
911 BUG_ON(page_count
> (int)USHRT_MAX
);
912 cursor
->page_count
= (unsigned short)page_count
;
913 BUG_ON(length
> SIZE_MAX
- cursor
->page_offset
);
914 cursor
->last_piece
= cursor
->page_offset
+ cursor
->resid
<= PAGE_SIZE
;
918 ceph_msg_data_pages_next(struct ceph_msg_data_cursor
*cursor
,
919 size_t *page_offset
, size_t *length
)
921 struct ceph_msg_data
*data
= cursor
->data
;
923 BUG_ON(data
->type
!= CEPH_MSG_DATA_PAGES
);
925 BUG_ON(cursor
->page_index
>= cursor
->page_count
);
926 BUG_ON(cursor
->page_offset
>= PAGE_SIZE
);
928 *page_offset
= cursor
->page_offset
;
929 if (cursor
->last_piece
)
930 *length
= cursor
->resid
;
932 *length
= PAGE_SIZE
- *page_offset
;
934 return data
->pages
[cursor
->page_index
];
937 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor
*cursor
,
940 BUG_ON(cursor
->data
->type
!= CEPH_MSG_DATA_PAGES
);
942 BUG_ON(cursor
->page_offset
+ bytes
> PAGE_SIZE
);
944 /* Advance the cursor page offset */
946 cursor
->resid
-= bytes
;
947 cursor
->page_offset
= (cursor
->page_offset
+ bytes
) & ~PAGE_MASK
;
948 if (!bytes
|| cursor
->page_offset
)
949 return false; /* more bytes to process in the current page */
951 /* Move on to the next page; offset is already at 0 */
953 BUG_ON(cursor
->page_index
>= cursor
->page_count
);
954 cursor
->page_index
++;
955 cursor
->last_piece
= cursor
->resid
<= PAGE_SIZE
;
961 * For a pagelist, a piece is whatever remains to be consumed in the
962 * first page in the list, or the front of the next page.
965 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor
*cursor
,
968 struct ceph_msg_data
*data
= cursor
->data
;
969 struct ceph_pagelist
*pagelist
;
972 BUG_ON(data
->type
!= CEPH_MSG_DATA_PAGELIST
);
974 pagelist
= data
->pagelist
;
978 return; /* pagelist can be assigned but empty */
980 BUG_ON(list_empty(&pagelist
->head
));
981 page
= list_first_entry(&pagelist
->head
, struct page
, lru
);
983 cursor
->resid
= min(length
, pagelist
->length
);
986 cursor
->last_piece
= cursor
->resid
<= PAGE_SIZE
;
990 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor
*cursor
,
991 size_t *page_offset
, size_t *length
)
993 struct ceph_msg_data
*data
= cursor
->data
;
994 struct ceph_pagelist
*pagelist
;
996 BUG_ON(data
->type
!= CEPH_MSG_DATA_PAGELIST
);
998 pagelist
= data
->pagelist
;
1001 BUG_ON(!cursor
->page
);
1002 BUG_ON(cursor
->offset
+ cursor
->resid
!= pagelist
->length
);
1004 /* offset of first page in pagelist is always 0 */
1005 *page_offset
= cursor
->offset
& ~PAGE_MASK
;
1006 if (cursor
->last_piece
)
1007 *length
= cursor
->resid
;
1009 *length
= PAGE_SIZE
- *page_offset
;
1011 return cursor
->page
;
1014 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor
*cursor
,
1017 struct ceph_msg_data
*data
= cursor
->data
;
1018 struct ceph_pagelist
*pagelist
;
1020 BUG_ON(data
->type
!= CEPH_MSG_DATA_PAGELIST
);
1022 pagelist
= data
->pagelist
;
1025 BUG_ON(cursor
->offset
+ cursor
->resid
!= pagelist
->length
);
1026 BUG_ON((cursor
->offset
& ~PAGE_MASK
) + bytes
> PAGE_SIZE
);
1028 /* Advance the cursor offset */
1030 cursor
->resid
-= bytes
;
1031 cursor
->offset
+= bytes
;
1032 /* offset of first page in pagelist is always 0 */
1033 if (!bytes
|| cursor
->offset
& ~PAGE_MASK
)
1034 return false; /* more bytes to process in the current page */
1036 /* Move on to the next page */
1038 BUG_ON(list_is_last(&cursor
->page
->lru
, &pagelist
->head
));
1039 cursor
->page
= list_entry_next(cursor
->page
, lru
);
1040 cursor
->last_piece
= cursor
->resid
<= PAGE_SIZE
;
1046 * Message data is handled (sent or received) in pieces, where each
1047 * piece resides on a single page. The network layer might not
1048 * consume an entire piece at once. A data item's cursor keeps
1049 * track of which piece is next to process and how much remains to
1050 * be processed in that piece. It also tracks whether the current
1051 * piece is the last one in the data item.
1053 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor
*cursor
)
1055 size_t length
= cursor
->total_resid
;
1057 switch (cursor
->data
->type
) {
1058 case CEPH_MSG_DATA_PAGELIST
:
1059 ceph_msg_data_pagelist_cursor_init(cursor
, length
);
1061 case CEPH_MSG_DATA_PAGES
:
1062 ceph_msg_data_pages_cursor_init(cursor
, length
);
1065 case CEPH_MSG_DATA_BIO
:
1066 ceph_msg_data_bio_cursor_init(cursor
, length
);
1068 #endif /* CONFIG_BLOCK */
1069 case CEPH_MSG_DATA_NONE
:
1074 cursor
->need_crc
= true;
1077 static void ceph_msg_data_cursor_init(struct ceph_msg
*msg
, size_t length
)
1079 struct ceph_msg_data_cursor
*cursor
= &msg
->cursor
;
1080 struct ceph_msg_data
*data
;
1083 BUG_ON(length
> msg
->data_length
);
1084 BUG_ON(list_empty(&msg
->data
));
1086 cursor
->data_head
= &msg
->data
;
1087 cursor
->total_resid
= length
;
1088 data
= list_first_entry(&msg
->data
, struct ceph_msg_data
, links
);
1089 cursor
->data
= data
;
1091 __ceph_msg_data_cursor_init(cursor
);
1095 * Return the page containing the next piece to process for a given
1096 * data item, and supply the page offset and length of that piece.
1097 * Indicate whether this is the last piece in this data item.
1099 static struct page
*ceph_msg_data_next(struct ceph_msg_data_cursor
*cursor
,
1100 size_t *page_offset
, size_t *length
,
1105 switch (cursor
->data
->type
) {
1106 case CEPH_MSG_DATA_PAGELIST
:
1107 page
= ceph_msg_data_pagelist_next(cursor
, page_offset
, length
);
1109 case CEPH_MSG_DATA_PAGES
:
1110 page
= ceph_msg_data_pages_next(cursor
, page_offset
, length
);
1113 case CEPH_MSG_DATA_BIO
:
1114 page
= ceph_msg_data_bio_next(cursor
, page_offset
, length
);
1116 #endif /* CONFIG_BLOCK */
1117 case CEPH_MSG_DATA_NONE
:
1123 BUG_ON(*page_offset
+ *length
> PAGE_SIZE
);
1126 *last_piece
= cursor
->last_piece
;
1132 * Returns true if the result moves the cursor on to the next piece
1135 static bool ceph_msg_data_advance(struct ceph_msg_data_cursor
*cursor
,
1140 BUG_ON(bytes
> cursor
->resid
);
1141 switch (cursor
->data
->type
) {
1142 case CEPH_MSG_DATA_PAGELIST
:
1143 new_piece
= ceph_msg_data_pagelist_advance(cursor
, bytes
);
1145 case CEPH_MSG_DATA_PAGES
:
1146 new_piece
= ceph_msg_data_pages_advance(cursor
, bytes
);
1149 case CEPH_MSG_DATA_BIO
:
1150 new_piece
= ceph_msg_data_bio_advance(cursor
, bytes
);
1152 #endif /* CONFIG_BLOCK */
1153 case CEPH_MSG_DATA_NONE
:
1158 cursor
->total_resid
-= bytes
;
1160 if (!cursor
->resid
&& cursor
->total_resid
) {
1161 WARN_ON(!cursor
->last_piece
);
1162 BUG_ON(list_is_last(&cursor
->data
->links
, cursor
->data_head
));
1163 cursor
->data
= list_entry_next(cursor
->data
, links
);
1164 __ceph_msg_data_cursor_init(cursor
);
1167 cursor
->need_crc
= new_piece
;
1172 static void prepare_message_data(struct ceph_msg
*msg
, u32 data_len
)
1177 /* Initialize data cursor */
1179 ceph_msg_data_cursor_init(msg
, (size_t)data_len
);
1183 * Prepare footer for currently outgoing message, and finish things
1184 * off. Assumes out_kvec* are already valid.. we just add on to the end.
1186 static void prepare_write_message_footer(struct ceph_connection
*con
)
1188 struct ceph_msg
*m
= con
->out_msg
;
1189 int v
= con
->out_kvec_left
;
1191 m
->footer
.flags
|= CEPH_MSG_FOOTER_COMPLETE
;
1193 dout("prepare_write_message_footer %p\n", con
);
1194 con
->out_kvec_is_msg
= true;
1195 con
->out_kvec
[v
].iov_base
= &m
->footer
;
1196 con
->out_kvec
[v
].iov_len
= sizeof(m
->footer
);
1197 con
->out_kvec_bytes
+= sizeof(m
->footer
);
1198 con
->out_kvec_left
++;
1199 con
->out_more
= m
->more_to_follow
;
1200 con
->out_msg_done
= true;
1204 * Prepare headers for the next outgoing message.
1206 static void prepare_write_message(struct ceph_connection
*con
)
1211 con_out_kvec_reset(con
);
1212 con
->out_kvec_is_msg
= true;
1213 con
->out_msg_done
= false;
1215 /* Sneak an ack in there first? If we can get it into the same
1216 * TCP packet that's a good thing. */
1217 if (con
->in_seq
> con
->in_seq_acked
) {
1218 con
->in_seq_acked
= con
->in_seq
;
1219 con_out_kvec_add(con
, sizeof (tag_ack
), &tag_ack
);
1220 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
1221 con_out_kvec_add(con
, sizeof (con
->out_temp_ack
),
1222 &con
->out_temp_ack
);
1225 BUG_ON(list_empty(&con
->out_queue
));
1226 m
= list_first_entry(&con
->out_queue
, struct ceph_msg
, list_head
);
1228 BUG_ON(m
->con
!= con
);
1230 /* put message on sent list */
1232 list_move_tail(&m
->list_head
, &con
->out_sent
);
1235 * only assign outgoing seq # if we haven't sent this message
1236 * yet. if it is requeued, resend with it's original seq.
1238 if (m
->needs_out_seq
) {
1239 m
->hdr
.seq
= cpu_to_le64(++con
->out_seq
);
1240 m
->needs_out_seq
= false;
1242 WARN_ON(m
->data_length
!= le32_to_cpu(m
->hdr
.data_len
));
1244 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n",
1245 m
, con
->out_seq
, le16_to_cpu(m
->hdr
.type
),
1246 le32_to_cpu(m
->hdr
.front_len
), le32_to_cpu(m
->hdr
.middle_len
),
1248 BUG_ON(le32_to_cpu(m
->hdr
.front_len
) != m
->front
.iov_len
);
1250 /* tag + hdr + front + middle */
1251 con_out_kvec_add(con
, sizeof (tag_msg
), &tag_msg
);
1252 con_out_kvec_add(con
, sizeof (m
->hdr
), &m
->hdr
);
1253 con_out_kvec_add(con
, m
->front
.iov_len
, m
->front
.iov_base
);
1256 con_out_kvec_add(con
, m
->middle
->vec
.iov_len
,
1257 m
->middle
->vec
.iov_base
);
1259 /* fill in crc (except data pages), footer */
1260 crc
= crc32c(0, &m
->hdr
, offsetof(struct ceph_msg_header
, crc
));
1261 con
->out_msg
->hdr
.crc
= cpu_to_le32(crc
);
1262 con
->out_msg
->footer
.flags
= 0;
1264 crc
= crc32c(0, m
->front
.iov_base
, m
->front
.iov_len
);
1265 con
->out_msg
->footer
.front_crc
= cpu_to_le32(crc
);
1267 crc
= crc32c(0, m
->middle
->vec
.iov_base
,
1268 m
->middle
->vec
.iov_len
);
1269 con
->out_msg
->footer
.middle_crc
= cpu_to_le32(crc
);
1271 con
->out_msg
->footer
.middle_crc
= 0;
1272 dout("%s front_crc %u middle_crc %u\n", __func__
,
1273 le32_to_cpu(con
->out_msg
->footer
.front_crc
),
1274 le32_to_cpu(con
->out_msg
->footer
.middle_crc
));
1276 /* is there a data payload? */
1277 con
->out_msg
->footer
.data_crc
= 0;
1278 if (m
->data_length
) {
1279 prepare_message_data(con
->out_msg
, m
->data_length
);
1280 con
->out_more
= 1; /* data + footer will follow */
1282 /* no, queue up footer too and be done */
1283 prepare_write_message_footer(con
);
1286 con_flag_set(con
, CON_FLAG_WRITE_PENDING
);
1292 static void prepare_write_ack(struct ceph_connection
*con
)
1294 dout("prepare_write_ack %p %llu -> %llu\n", con
,
1295 con
->in_seq_acked
, con
->in_seq
);
1296 con
->in_seq_acked
= con
->in_seq
;
1298 con_out_kvec_reset(con
);
1300 con_out_kvec_add(con
, sizeof (tag_ack
), &tag_ack
);
1302 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
1303 con_out_kvec_add(con
, sizeof (con
->out_temp_ack
),
1304 &con
->out_temp_ack
);
1306 con
->out_more
= 1; /* more will follow.. eventually.. */
1307 con_flag_set(con
, CON_FLAG_WRITE_PENDING
);
1311 * Prepare to share the seq during handshake
1313 static void prepare_write_seq(struct ceph_connection
*con
)
1315 dout("prepare_write_seq %p %llu -> %llu\n", con
,
1316 con
->in_seq_acked
, con
->in_seq
);
1317 con
->in_seq_acked
= con
->in_seq
;
1319 con_out_kvec_reset(con
);
1321 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
1322 con_out_kvec_add(con
, sizeof (con
->out_temp_ack
),
1323 &con
->out_temp_ack
);
1325 con_flag_set(con
, CON_FLAG_WRITE_PENDING
);
1329 * Prepare to write keepalive byte.
1331 static void prepare_write_keepalive(struct ceph_connection
*con
)
1333 dout("prepare_write_keepalive %p\n", con
);
1334 con_out_kvec_reset(con
);
1335 con_out_kvec_add(con
, sizeof (tag_keepalive
), &tag_keepalive
);
1336 con_flag_set(con
, CON_FLAG_WRITE_PENDING
);
1340 * Connection negotiation.
1343 static struct ceph_auth_handshake
*get_connect_authorizer(struct ceph_connection
*con
,
1346 struct ceph_auth_handshake
*auth
;
1348 if (!con
->ops
->get_authorizer
) {
1349 con
->out_connect
.authorizer_protocol
= CEPH_AUTH_UNKNOWN
;
1350 con
->out_connect
.authorizer_len
= 0;
1354 /* Can't hold the mutex while getting authorizer */
1355 mutex_unlock(&con
->mutex
);
1356 auth
= con
->ops
->get_authorizer(con
, auth_proto
, con
->auth_retry
);
1357 mutex_lock(&con
->mutex
);
1361 if (con
->state
!= CON_STATE_NEGOTIATING
)
1362 return ERR_PTR(-EAGAIN
);
1364 con
->auth_reply_buf
= auth
->authorizer_reply_buf
;
1365 con
->auth_reply_buf_len
= auth
->authorizer_reply_buf_len
;
1370 * We connected to a peer and are saying hello.
1372 static void prepare_write_banner(struct ceph_connection
*con
)
1374 con_out_kvec_add(con
, strlen(CEPH_BANNER
), CEPH_BANNER
);
1375 con_out_kvec_add(con
, sizeof (con
->msgr
->my_enc_addr
),
1376 &con
->msgr
->my_enc_addr
);
1379 con_flag_set(con
, CON_FLAG_WRITE_PENDING
);
1382 static int prepare_write_connect(struct ceph_connection
*con
)
1384 unsigned int global_seq
= get_global_seq(con
->msgr
, 0);
1387 struct ceph_auth_handshake
*auth
;
1389 switch (con
->peer_name
.type
) {
1390 case CEPH_ENTITY_TYPE_MON
:
1391 proto
= CEPH_MONC_PROTOCOL
;
1393 case CEPH_ENTITY_TYPE_OSD
:
1394 proto
= CEPH_OSDC_PROTOCOL
;
1396 case CEPH_ENTITY_TYPE_MDS
:
1397 proto
= CEPH_MDSC_PROTOCOL
;
1403 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con
,
1404 con
->connect_seq
, global_seq
, proto
);
1406 con
->out_connect
.features
= cpu_to_le64(con
->msgr
->supported_features
);
1407 con
->out_connect
.host_type
= cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT
);
1408 con
->out_connect
.connect_seq
= cpu_to_le32(con
->connect_seq
);
1409 con
->out_connect
.global_seq
= cpu_to_le32(global_seq
);
1410 con
->out_connect
.protocol_version
= cpu_to_le32(proto
);
1411 con
->out_connect
.flags
= 0;
1413 auth_proto
= CEPH_AUTH_UNKNOWN
;
1414 auth
= get_connect_authorizer(con
, &auth_proto
);
1416 return PTR_ERR(auth
);
1418 con
->out_connect
.authorizer_protocol
= cpu_to_le32(auth_proto
);
1419 con
->out_connect
.authorizer_len
= auth
?
1420 cpu_to_le32(auth
->authorizer_buf_len
) : 0;
1422 con_out_kvec_add(con
, sizeof (con
->out_connect
),
1424 if (auth
&& auth
->authorizer_buf_len
)
1425 con_out_kvec_add(con
, auth
->authorizer_buf_len
,
1426 auth
->authorizer_buf
);
1429 con_flag_set(con
, CON_FLAG_WRITE_PENDING
);
1435 * write as much of pending kvecs to the socket as we can.
1437 * 0 -> socket full, but more to do
1440 static int write_partial_kvec(struct ceph_connection
*con
)
1444 dout("write_partial_kvec %p %d left\n", con
, con
->out_kvec_bytes
);
1445 while (con
->out_kvec_bytes
> 0) {
1446 ret
= ceph_tcp_sendmsg(con
->sock
, con
->out_kvec_cur
,
1447 con
->out_kvec_left
, con
->out_kvec_bytes
,
1451 con
->out_kvec_bytes
-= ret
;
1452 if (con
->out_kvec_bytes
== 0)
1455 /* account for full iov entries consumed */
1456 while (ret
>= con
->out_kvec_cur
->iov_len
) {
1457 BUG_ON(!con
->out_kvec_left
);
1458 ret
-= con
->out_kvec_cur
->iov_len
;
1459 con
->out_kvec_cur
++;
1460 con
->out_kvec_left
--;
1462 /* and for a partially-consumed entry */
1464 con
->out_kvec_cur
->iov_len
-= ret
;
1465 con
->out_kvec_cur
->iov_base
+= ret
;
1468 con
->out_kvec_left
= 0;
1469 con
->out_kvec_is_msg
= false;
1472 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con
,
1473 con
->out_kvec_bytes
, con
->out_kvec_left
, ret
);
1474 return ret
; /* done! */
1477 static u32
ceph_crc32c_page(u32 crc
, struct page
*page
,
1478 unsigned int page_offset
,
1479 unsigned int length
)
1484 BUG_ON(kaddr
== NULL
);
1485 crc
= crc32c(crc
, kaddr
+ page_offset
, length
);
1491 * Write as much message data payload as we can. If we finish, queue
1493 * 1 -> done, footer is now queued in out_kvec[].
1494 * 0 -> socket full, but more to do
1497 static int write_partial_message_data(struct ceph_connection
*con
)
1499 struct ceph_msg
*msg
= con
->out_msg
;
1500 struct ceph_msg_data_cursor
*cursor
= &msg
->cursor
;
1501 bool do_datacrc
= !con
->msgr
->nocrc
;
1504 dout("%s %p msg %p\n", __func__
, con
, msg
);
1506 if (list_empty(&msg
->data
))
1510 * Iterate through each page that contains data to be
1511 * written, and send as much as possible for each.
1513 * If we are calculating the data crc (the default), we will
1514 * need to map the page. If we have no pages, they have
1515 * been revoked, so use the zero page.
1517 crc
= do_datacrc
? le32_to_cpu(msg
->footer
.data_crc
) : 0;
1518 while (cursor
->resid
) {
1526 page
= ceph_msg_data_next(&msg
->cursor
, &page_offset
, &length
,
1528 ret
= ceph_tcp_sendpage(con
->sock
, page
, page_offset
,
1529 length
, last_piece
);
1532 msg
->footer
.data_crc
= cpu_to_le32(crc
);
1536 if (do_datacrc
&& cursor
->need_crc
)
1537 crc
= ceph_crc32c_page(crc
, page
, page_offset
, length
);
1538 need_crc
= ceph_msg_data_advance(&msg
->cursor
, (size_t)ret
);
1541 dout("%s %p msg %p done\n", __func__
, con
, msg
);
1543 /* prepare and queue up footer, too */
1545 msg
->footer
.data_crc
= cpu_to_le32(crc
);
1547 msg
->footer
.flags
|= CEPH_MSG_FOOTER_NOCRC
;
1548 con_out_kvec_reset(con
);
1549 prepare_write_message_footer(con
);
1551 return 1; /* must return > 0 to indicate success */
1557 static int write_partial_skip(struct ceph_connection
*con
)
1561 while (con
->out_skip
> 0) {
1562 size_t size
= min(con
->out_skip
, (int) PAGE_CACHE_SIZE
);
1564 ret
= ceph_tcp_sendpage(con
->sock
, zero_page
, 0, size
, true);
1567 con
->out_skip
-= ret
;
1575 * Prepare to read connection handshake, or an ack.
1577 static void prepare_read_banner(struct ceph_connection
*con
)
1579 dout("prepare_read_banner %p\n", con
);
1580 con
->in_base_pos
= 0;
1583 static void prepare_read_connect(struct ceph_connection
*con
)
1585 dout("prepare_read_connect %p\n", con
);
1586 con
->in_base_pos
= 0;
1589 static void prepare_read_ack(struct ceph_connection
*con
)
1591 dout("prepare_read_ack %p\n", con
);
1592 con
->in_base_pos
= 0;
1595 static void prepare_read_seq(struct ceph_connection
*con
)
1597 dout("prepare_read_seq %p\n", con
);
1598 con
->in_base_pos
= 0;
1599 con
->in_tag
= CEPH_MSGR_TAG_SEQ
;
1602 static void prepare_read_tag(struct ceph_connection
*con
)
1604 dout("prepare_read_tag %p\n", con
);
1605 con
->in_base_pos
= 0;
1606 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1610 * Prepare to read a message.
1612 static int prepare_read_message(struct ceph_connection
*con
)
1614 dout("prepare_read_message %p\n", con
);
1615 BUG_ON(con
->in_msg
!= NULL
);
1616 con
->in_base_pos
= 0;
1617 con
->in_front_crc
= con
->in_middle_crc
= con
->in_data_crc
= 0;
1622 static int read_partial(struct ceph_connection
*con
,
1623 int end
, int size
, void *object
)
1625 while (con
->in_base_pos
< end
) {
1626 int left
= end
- con
->in_base_pos
;
1627 int have
= size
- left
;
1628 int ret
= ceph_tcp_recvmsg(con
->sock
, object
+ have
, left
);
1631 con
->in_base_pos
+= ret
;
1638 * Read all or part of the connect-side handshake on a new connection
1640 static int read_partial_banner(struct ceph_connection
*con
)
1646 dout("read_partial_banner %p at %d\n", con
, con
->in_base_pos
);
1649 size
= strlen(CEPH_BANNER
);
1651 ret
= read_partial(con
, end
, size
, con
->in_banner
);
1655 size
= sizeof (con
->actual_peer_addr
);
1657 ret
= read_partial(con
, end
, size
, &con
->actual_peer_addr
);
1661 size
= sizeof (con
->peer_addr_for_me
);
1663 ret
= read_partial(con
, end
, size
, &con
->peer_addr_for_me
);
1671 static int read_partial_connect(struct ceph_connection
*con
)
1677 dout("read_partial_connect %p at %d\n", con
, con
->in_base_pos
);
1679 size
= sizeof (con
->in_reply
);
1681 ret
= read_partial(con
, end
, size
, &con
->in_reply
);
1685 size
= le32_to_cpu(con
->in_reply
.authorizer_len
);
1687 ret
= read_partial(con
, end
, size
, con
->auth_reply_buf
);
1691 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1692 con
, (int)con
->in_reply
.tag
,
1693 le32_to_cpu(con
->in_reply
.connect_seq
),
1694 le32_to_cpu(con
->in_reply
.global_seq
));
1701 * Verify the hello banner looks okay.
1703 static int verify_hello(struct ceph_connection
*con
)
1705 if (memcmp(con
->in_banner
, CEPH_BANNER
, strlen(CEPH_BANNER
))) {
1706 pr_err("connect to %s got bad banner\n",
1707 ceph_pr_addr(&con
->peer_addr
.in_addr
));
1708 con
->error_msg
= "protocol error, bad banner";
1714 static bool addr_is_blank(struct sockaddr_storage
*ss
)
1716 switch (ss
->ss_family
) {
1718 return ((struct sockaddr_in
*)ss
)->sin_addr
.s_addr
== 0;
1721 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[0] == 0 &&
1722 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[1] == 0 &&
1723 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[2] == 0 &&
1724 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[3] == 0;
1729 static int addr_port(struct sockaddr_storage
*ss
)
1731 switch (ss
->ss_family
) {
1733 return ntohs(((struct sockaddr_in
*)ss
)->sin_port
);
1735 return ntohs(((struct sockaddr_in6
*)ss
)->sin6_port
);
1740 static void addr_set_port(struct sockaddr_storage
*ss
, int p
)
1742 switch (ss
->ss_family
) {
1744 ((struct sockaddr_in
*)ss
)->sin_port
= htons(p
);
1747 ((struct sockaddr_in6
*)ss
)->sin6_port
= htons(p
);
1753 * Unlike other *_pton function semantics, zero indicates success.
1755 static int ceph_pton(const char *str
, size_t len
, struct sockaddr_storage
*ss
,
1756 char delim
, const char **ipend
)
1758 struct sockaddr_in
*in4
= (struct sockaddr_in
*) ss
;
1759 struct sockaddr_in6
*in6
= (struct sockaddr_in6
*) ss
;
1761 memset(ss
, 0, sizeof(*ss
));
1763 if (in4_pton(str
, len
, (u8
*)&in4
->sin_addr
.s_addr
, delim
, ipend
)) {
1764 ss
->ss_family
= AF_INET
;
1768 if (in6_pton(str
, len
, (u8
*)&in6
->sin6_addr
.s6_addr
, delim
, ipend
)) {
1769 ss
->ss_family
= AF_INET6
;
1777 * Extract hostname string and resolve using kernel DNS facility.
1779 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1780 static int ceph_dns_resolve_name(const char *name
, size_t namelen
,
1781 struct sockaddr_storage
*ss
, char delim
, const char **ipend
)
1783 const char *end
, *delim_p
;
1784 char *colon_p
, *ip_addr
= NULL
;
1788 * The end of the hostname occurs immediately preceding the delimiter or
1789 * the port marker (':') where the delimiter takes precedence.
1791 delim_p
= memchr(name
, delim
, namelen
);
1792 colon_p
= memchr(name
, ':', namelen
);
1794 if (delim_p
&& colon_p
)
1795 end
= delim_p
< colon_p
? delim_p
: colon_p
;
1796 else if (!delim_p
&& colon_p
)
1800 if (!end
) /* case: hostname:/ */
1801 end
= name
+ namelen
;
1807 /* do dns_resolve upcall */
1808 ip_len
= dns_query(NULL
, name
, end
- name
, NULL
, &ip_addr
, NULL
);
1810 ret
= ceph_pton(ip_addr
, ip_len
, ss
, -1, NULL
);
1818 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end
- name
), name
,
1819 ret
, ret
? "failed" : ceph_pr_addr(ss
));
1824 static inline int ceph_dns_resolve_name(const char *name
, size_t namelen
,
1825 struct sockaddr_storage
*ss
, char delim
, const char **ipend
)
1832 * Parse a server name (IP or hostname). If a valid IP address is not found
1833 * then try to extract a hostname to resolve using userspace DNS upcall.
1835 static int ceph_parse_server_name(const char *name
, size_t namelen
,
1836 struct sockaddr_storage
*ss
, char delim
, const char **ipend
)
1840 ret
= ceph_pton(name
, namelen
, ss
, delim
, ipend
);
1842 ret
= ceph_dns_resolve_name(name
, namelen
, ss
, delim
, ipend
);
1848 * Parse an ip[:port] list into an addr array. Use the default
1849 * monitor port if a port isn't specified.
1851 int ceph_parse_ips(const char *c
, const char *end
,
1852 struct ceph_entity_addr
*addr
,
1853 int max_count
, int *count
)
1855 int i
, ret
= -EINVAL
;
1858 dout("parse_ips on '%.*s'\n", (int)(end
-c
), c
);
1859 for (i
= 0; i
< max_count
; i
++) {
1861 struct sockaddr_storage
*ss
= &addr
[i
].in_addr
;
1870 ret
= ceph_parse_server_name(p
, end
- p
, ss
, delim
, &ipend
);
1879 dout("missing matching ']'\n");
1886 if (p
< end
&& *p
== ':') {
1889 while (p
< end
&& *p
>= '0' && *p
<= '9') {
1890 port
= (port
* 10) + (*p
- '0');
1893 if (port
> 65535 || port
== 0)
1896 port
= CEPH_MON_PORT
;
1899 addr_set_port(ss
, port
);
1901 dout("parse_ips got %s\n", ceph_pr_addr(ss
));
1918 pr_err("parse_ips bad ip '%.*s'\n", (int)(end
- c
), c
);
1921 EXPORT_SYMBOL(ceph_parse_ips
);
1923 static int process_banner(struct ceph_connection
*con
)
1925 dout("process_banner on %p\n", con
);
1927 if (verify_hello(con
) < 0)
1930 ceph_decode_addr(&con
->actual_peer_addr
);
1931 ceph_decode_addr(&con
->peer_addr_for_me
);
1934 * Make sure the other end is who we wanted. note that the other
1935 * end may not yet know their ip address, so if it's 0.0.0.0, give
1936 * them the benefit of the doubt.
1938 if (memcmp(&con
->peer_addr
, &con
->actual_peer_addr
,
1939 sizeof(con
->peer_addr
)) != 0 &&
1940 !(addr_is_blank(&con
->actual_peer_addr
.in_addr
) &&
1941 con
->actual_peer_addr
.nonce
== con
->peer_addr
.nonce
)) {
1942 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1943 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1944 (int)le32_to_cpu(con
->peer_addr
.nonce
),
1945 ceph_pr_addr(&con
->actual_peer_addr
.in_addr
),
1946 (int)le32_to_cpu(con
->actual_peer_addr
.nonce
));
1947 con
->error_msg
= "wrong peer at address";
1952 * did we learn our address?
1954 if (addr_is_blank(&con
->msgr
->inst
.addr
.in_addr
)) {
1955 int port
= addr_port(&con
->msgr
->inst
.addr
.in_addr
);
1957 memcpy(&con
->msgr
->inst
.addr
.in_addr
,
1958 &con
->peer_addr_for_me
.in_addr
,
1959 sizeof(con
->peer_addr_for_me
.in_addr
));
1960 addr_set_port(&con
->msgr
->inst
.addr
.in_addr
, port
);
1961 encode_my_addr(con
->msgr
);
1962 dout("process_banner learned my addr is %s\n",
1963 ceph_pr_addr(&con
->msgr
->inst
.addr
.in_addr
));
1969 static int process_connect(struct ceph_connection
*con
)
1971 u64 sup_feat
= con
->msgr
->supported_features
;
1972 u64 req_feat
= con
->msgr
->required_features
;
1973 u64 server_feat
= le64_to_cpu(con
->in_reply
.features
);
1976 dout("process_connect on %p tag %d\n", con
, (int)con
->in_tag
);
1978 if (con
->auth_reply_buf
) {
1980 * Any connection that defines ->get_authorizer()
1981 * should also define ->verify_authorizer_reply().
1982 * See get_connect_authorizer().
1984 ret
= con
->ops
->verify_authorizer_reply(con
, 0);
1986 con
->error_msg
= "bad authorize reply";
1991 switch (con
->in_reply
.tag
) {
1992 case CEPH_MSGR_TAG_FEATURES
:
1993 pr_err("%s%lld %s feature set mismatch,"
1994 " my %llx < server's %llx, missing %llx\n",
1995 ENTITY_NAME(con
->peer_name
),
1996 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1997 sup_feat
, server_feat
, server_feat
& ~sup_feat
);
1998 con
->error_msg
= "missing required protocol features";
1999 reset_connection(con
);
2002 case CEPH_MSGR_TAG_BADPROTOVER
:
2003 pr_err("%s%lld %s protocol version mismatch,"
2004 " my %d != server's %d\n",
2005 ENTITY_NAME(con
->peer_name
),
2006 ceph_pr_addr(&con
->peer_addr
.in_addr
),
2007 le32_to_cpu(con
->out_connect
.protocol_version
),
2008 le32_to_cpu(con
->in_reply
.protocol_version
));
2009 con
->error_msg
= "protocol version mismatch";
2010 reset_connection(con
);
2013 case CEPH_MSGR_TAG_BADAUTHORIZER
:
2015 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con
,
2017 if (con
->auth_retry
== 2) {
2018 con
->error_msg
= "connect authorization failure";
2021 con_out_kvec_reset(con
);
2022 ret
= prepare_write_connect(con
);
2025 prepare_read_connect(con
);
2028 case CEPH_MSGR_TAG_RESETSESSION
:
2030 * If we connected with a large connect_seq but the peer
2031 * has no record of a session with us (no connection, or
2032 * connect_seq == 0), they will send RESETSESION to indicate
2033 * that they must have reset their session, and may have
2036 dout("process_connect got RESET peer seq %u\n",
2037 le32_to_cpu(con
->in_reply
.connect_seq
));
2038 pr_err("%s%lld %s connection reset\n",
2039 ENTITY_NAME(con
->peer_name
),
2040 ceph_pr_addr(&con
->peer_addr
.in_addr
));
2041 reset_connection(con
);
2042 con_out_kvec_reset(con
);
2043 ret
= prepare_write_connect(con
);
2046 prepare_read_connect(con
);
2048 /* Tell ceph about it. */
2049 mutex_unlock(&con
->mutex
);
2050 pr_info("reset on %s%lld\n", ENTITY_NAME(con
->peer_name
));
2051 if (con
->ops
->peer_reset
)
2052 con
->ops
->peer_reset(con
);
2053 mutex_lock(&con
->mutex
);
2054 if (con
->state
!= CON_STATE_NEGOTIATING
)
2058 case CEPH_MSGR_TAG_RETRY_SESSION
:
2060 * If we sent a smaller connect_seq than the peer has, try
2061 * again with a larger value.
2063 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
2064 le32_to_cpu(con
->out_connect
.connect_seq
),
2065 le32_to_cpu(con
->in_reply
.connect_seq
));
2066 con
->connect_seq
= le32_to_cpu(con
->in_reply
.connect_seq
);
2067 con_out_kvec_reset(con
);
2068 ret
= prepare_write_connect(con
);
2071 prepare_read_connect(con
);
2074 case CEPH_MSGR_TAG_RETRY_GLOBAL
:
2076 * If we sent a smaller global_seq than the peer has, try
2077 * again with a larger value.
2079 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
2080 con
->peer_global_seq
,
2081 le32_to_cpu(con
->in_reply
.global_seq
));
2082 get_global_seq(con
->msgr
,
2083 le32_to_cpu(con
->in_reply
.global_seq
));
2084 con_out_kvec_reset(con
);
2085 ret
= prepare_write_connect(con
);
2088 prepare_read_connect(con
);
2091 case CEPH_MSGR_TAG_SEQ
:
2092 case CEPH_MSGR_TAG_READY
:
2093 if (req_feat
& ~server_feat
) {
2094 pr_err("%s%lld %s protocol feature mismatch,"
2095 " my required %llx > server's %llx, need %llx\n",
2096 ENTITY_NAME(con
->peer_name
),
2097 ceph_pr_addr(&con
->peer_addr
.in_addr
),
2098 req_feat
, server_feat
, req_feat
& ~server_feat
);
2099 con
->error_msg
= "missing required protocol features";
2100 reset_connection(con
);
2104 WARN_ON(con
->state
!= CON_STATE_NEGOTIATING
);
2105 con
->state
= CON_STATE_OPEN
;
2106 con
->auth_retry
= 0; /* we authenticated; clear flag */
2107 con
->peer_global_seq
= le32_to_cpu(con
->in_reply
.global_seq
);
2109 con
->peer_features
= server_feat
;
2110 dout("process_connect got READY gseq %d cseq %d (%d)\n",
2111 con
->peer_global_seq
,
2112 le32_to_cpu(con
->in_reply
.connect_seq
),
2114 WARN_ON(con
->connect_seq
!=
2115 le32_to_cpu(con
->in_reply
.connect_seq
));
2117 if (con
->in_reply
.flags
& CEPH_MSG_CONNECT_LOSSY
)
2118 con_flag_set(con
, CON_FLAG_LOSSYTX
);
2120 con
->delay
= 0; /* reset backoff memory */
2122 if (con
->in_reply
.tag
== CEPH_MSGR_TAG_SEQ
) {
2123 prepare_write_seq(con
);
2124 prepare_read_seq(con
);
2126 prepare_read_tag(con
);
2130 case CEPH_MSGR_TAG_WAIT
:
2132 * If there is a connection race (we are opening
2133 * connections to each other), one of us may just have
2134 * to WAIT. This shouldn't happen if we are the
2137 pr_err("process_connect got WAIT as client\n");
2138 con
->error_msg
= "protocol error, got WAIT as client";
2142 pr_err("connect protocol error, will retry\n");
2143 con
->error_msg
= "protocol error, garbage tag during connect";
2151 * read (part of) an ack
2153 static int read_partial_ack(struct ceph_connection
*con
)
2155 int size
= sizeof (con
->in_temp_ack
);
2158 return read_partial(con
, end
, size
, &con
->in_temp_ack
);
2162 * We can finally discard anything that's been acked.
2164 static void process_ack(struct ceph_connection
*con
)
2167 u64 ack
= le64_to_cpu(con
->in_temp_ack
);
2170 while (!list_empty(&con
->out_sent
)) {
2171 m
= list_first_entry(&con
->out_sent
, struct ceph_msg
,
2173 seq
= le64_to_cpu(m
->hdr
.seq
);
2176 dout("got ack for seq %llu type %d at %p\n", seq
,
2177 le16_to_cpu(m
->hdr
.type
), m
);
2178 m
->ack_stamp
= jiffies
;
2181 prepare_read_tag(con
);
2185 static int read_partial_message_section(struct ceph_connection
*con
,
2186 struct kvec
*section
,
2187 unsigned int sec_len
, u32
*crc
)
2193 while (section
->iov_len
< sec_len
) {
2194 BUG_ON(section
->iov_base
== NULL
);
2195 left
= sec_len
- section
->iov_len
;
2196 ret
= ceph_tcp_recvmsg(con
->sock
, (char *)section
->iov_base
+
2197 section
->iov_len
, left
);
2200 section
->iov_len
+= ret
;
2202 if (section
->iov_len
== sec_len
)
2203 *crc
= crc32c(0, section
->iov_base
, section
->iov_len
);
2208 static int read_partial_msg_data(struct ceph_connection
*con
)
2210 struct ceph_msg
*msg
= con
->in_msg
;
2211 struct ceph_msg_data_cursor
*cursor
= &msg
->cursor
;
2212 const bool do_datacrc
= !con
->msgr
->nocrc
;
2220 if (list_empty(&msg
->data
))
2224 crc
= con
->in_data_crc
;
2225 while (cursor
->resid
) {
2226 page
= ceph_msg_data_next(&msg
->cursor
, &page_offset
, &length
,
2228 ret
= ceph_tcp_recvpage(con
->sock
, page
, page_offset
, length
);
2231 con
->in_data_crc
= crc
;
2237 crc
= ceph_crc32c_page(crc
, page
, page_offset
, ret
);
2238 (void) ceph_msg_data_advance(&msg
->cursor
, (size_t)ret
);
2241 con
->in_data_crc
= crc
;
2243 return 1; /* must return > 0 to indicate success */
2247 * read (part of) a message.
2249 static int ceph_con_in_msg_alloc(struct ceph_connection
*con
, int *skip
);
2251 static int read_partial_message(struct ceph_connection
*con
)
2253 struct ceph_msg
*m
= con
->in_msg
;
2257 unsigned int front_len
, middle_len
, data_len
;
2258 bool do_datacrc
= !con
->msgr
->nocrc
;
2262 dout("read_partial_message con %p msg %p\n", con
, m
);
2265 size
= sizeof (con
->in_hdr
);
2267 ret
= read_partial(con
, end
, size
, &con
->in_hdr
);
2271 crc
= crc32c(0, &con
->in_hdr
, offsetof(struct ceph_msg_header
, crc
));
2272 if (cpu_to_le32(crc
) != con
->in_hdr
.crc
) {
2273 pr_err("read_partial_message bad hdr "
2274 " crc %u != expected %u\n",
2275 crc
, con
->in_hdr
.crc
);
2279 front_len
= le32_to_cpu(con
->in_hdr
.front_len
);
2280 if (front_len
> CEPH_MSG_MAX_FRONT_LEN
)
2282 middle_len
= le32_to_cpu(con
->in_hdr
.middle_len
);
2283 if (middle_len
> CEPH_MSG_MAX_MIDDLE_LEN
)
2285 data_len
= le32_to_cpu(con
->in_hdr
.data_len
);
2286 if (data_len
> CEPH_MSG_MAX_DATA_LEN
)
2290 seq
= le64_to_cpu(con
->in_hdr
.seq
);
2291 if ((s64
)seq
- (s64
)con
->in_seq
< 1) {
2292 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
2293 ENTITY_NAME(con
->peer_name
),
2294 ceph_pr_addr(&con
->peer_addr
.in_addr
),
2295 seq
, con
->in_seq
+ 1);
2296 con
->in_base_pos
= -front_len
- middle_len
- data_len
-
2298 con
->in_tag
= CEPH_MSGR_TAG_READY
;
2300 } else if ((s64
)seq
- (s64
)con
->in_seq
> 1) {
2301 pr_err("read_partial_message bad seq %lld expected %lld\n",
2302 seq
, con
->in_seq
+ 1);
2303 con
->error_msg
= "bad message sequence # for incoming message";
2307 /* allocate message? */
2311 dout("got hdr type %d front %d data %d\n", con
->in_hdr
.type
,
2312 front_len
, data_len
);
2313 ret
= ceph_con_in_msg_alloc(con
, &skip
);
2317 BUG_ON(!con
->in_msg
^ skip
);
2318 if (con
->in_msg
&& data_len
> con
->in_msg
->data_length
) {
2319 pr_warning("%s skipping long message (%u > %zd)\n",
2320 __func__
, data_len
, con
->in_msg
->data_length
);
2321 ceph_msg_put(con
->in_msg
);
2326 /* skip this message */
2327 dout("alloc_msg said skip message\n");
2328 con
->in_base_pos
= -front_len
- middle_len
- data_len
-
2330 con
->in_tag
= CEPH_MSGR_TAG_READY
;
2335 BUG_ON(!con
->in_msg
);
2336 BUG_ON(con
->in_msg
->con
!= con
);
2338 m
->front
.iov_len
= 0; /* haven't read it yet */
2340 m
->middle
->vec
.iov_len
= 0;
2342 /* prepare for data payload, if any */
2345 prepare_message_data(con
->in_msg
, data_len
);
2349 ret
= read_partial_message_section(con
, &m
->front
, front_len
,
2350 &con
->in_front_crc
);
2356 ret
= read_partial_message_section(con
, &m
->middle
->vec
,
2358 &con
->in_middle_crc
);
2365 ret
= read_partial_msg_data(con
);
2371 size
= sizeof (m
->footer
);
2373 ret
= read_partial(con
, end
, size
, &m
->footer
);
2377 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
2378 m
, front_len
, m
->footer
.front_crc
, middle_len
,
2379 m
->footer
.middle_crc
, data_len
, m
->footer
.data_crc
);
2382 if (con
->in_front_crc
!= le32_to_cpu(m
->footer
.front_crc
)) {
2383 pr_err("read_partial_message %p front crc %u != exp. %u\n",
2384 m
, con
->in_front_crc
, m
->footer
.front_crc
);
2387 if (con
->in_middle_crc
!= le32_to_cpu(m
->footer
.middle_crc
)) {
2388 pr_err("read_partial_message %p middle crc %u != exp %u\n",
2389 m
, con
->in_middle_crc
, m
->footer
.middle_crc
);
2393 (m
->footer
.flags
& CEPH_MSG_FOOTER_NOCRC
) == 0 &&
2394 con
->in_data_crc
!= le32_to_cpu(m
->footer
.data_crc
)) {
2395 pr_err("read_partial_message %p data crc %u != exp. %u\n", m
,
2396 con
->in_data_crc
, le32_to_cpu(m
->footer
.data_crc
));
2400 return 1; /* done! */
2404 * Process message. This happens in the worker thread. The callback should
2405 * be careful not to do anything that waits on other incoming messages or it
2408 static void process_message(struct ceph_connection
*con
)
2410 struct ceph_msg
*msg
;
2412 BUG_ON(con
->in_msg
->con
!= con
);
2413 con
->in_msg
->con
= NULL
;
2418 /* if first message, set peer_name */
2419 if (con
->peer_name
.type
== 0)
2420 con
->peer_name
= msg
->hdr
.src
;
2423 mutex_unlock(&con
->mutex
);
2425 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
2426 msg
, le64_to_cpu(msg
->hdr
.seq
),
2427 ENTITY_NAME(msg
->hdr
.src
),
2428 le16_to_cpu(msg
->hdr
.type
),
2429 ceph_msg_type_name(le16_to_cpu(msg
->hdr
.type
)),
2430 le32_to_cpu(msg
->hdr
.front_len
),
2431 le32_to_cpu(msg
->hdr
.data_len
),
2432 con
->in_front_crc
, con
->in_middle_crc
, con
->in_data_crc
);
2433 con
->ops
->dispatch(con
, msg
);
2435 mutex_lock(&con
->mutex
);
2440 * Write something to the socket. Called in a worker thread when the
2441 * socket appears to be writeable and we have something ready to send.
2443 static int try_write(struct ceph_connection
*con
)
2447 dout("try_write start %p state %lu\n", con
, con
->state
);
2450 dout("try_write out_kvec_bytes %d\n", con
->out_kvec_bytes
);
2452 /* open the socket first? */
2453 if (con
->state
== CON_STATE_PREOPEN
) {
2455 con
->state
= CON_STATE_CONNECTING
;
2457 con_out_kvec_reset(con
);
2458 prepare_write_banner(con
);
2459 prepare_read_banner(con
);
2461 BUG_ON(con
->in_msg
);
2462 con
->in_tag
= CEPH_MSGR_TAG_READY
;
2463 dout("try_write initiating connect on %p new state %lu\n",
2465 ret
= ceph_tcp_connect(con
);
2467 con
->error_msg
= "connect error";
2473 /* kvec data queued? */
2474 if (con
->out_skip
) {
2475 ret
= write_partial_skip(con
);
2479 if (con
->out_kvec_left
) {
2480 ret
= write_partial_kvec(con
);
2487 if (con
->out_msg_done
) {
2488 ceph_msg_put(con
->out_msg
);
2489 con
->out_msg
= NULL
; /* we're done with this one */
2493 ret
= write_partial_message_data(con
);
2495 goto more_kvec
; /* we need to send the footer, too! */
2499 dout("try_write write_partial_message_data err %d\n",
2506 if (con
->state
== CON_STATE_OPEN
) {
2507 /* is anything else pending? */
2508 if (!list_empty(&con
->out_queue
)) {
2509 prepare_write_message(con
);
2512 if (con
->in_seq
> con
->in_seq_acked
) {
2513 prepare_write_ack(con
);
2516 if (con_flag_test_and_clear(con
, CON_FLAG_KEEPALIVE_PENDING
)) {
2517 prepare_write_keepalive(con
);
2522 /* Nothing to do! */
2523 con_flag_clear(con
, CON_FLAG_WRITE_PENDING
);
2524 dout("try_write nothing else to write.\n");
2527 dout("try_write done on %p ret %d\n", con
, ret
);
2534 * Read what we can from the socket.
2536 static int try_read(struct ceph_connection
*con
)
2541 dout("try_read start on %p state %lu\n", con
, con
->state
);
2542 if (con
->state
!= CON_STATE_CONNECTING
&&
2543 con
->state
!= CON_STATE_NEGOTIATING
&&
2544 con
->state
!= CON_STATE_OPEN
)
2549 dout("try_read tag %d in_base_pos %d\n", (int)con
->in_tag
,
2552 if (con
->state
== CON_STATE_CONNECTING
) {
2553 dout("try_read connecting\n");
2554 ret
= read_partial_banner(con
);
2557 ret
= process_banner(con
);
2561 con
->state
= CON_STATE_NEGOTIATING
;
2564 * Received banner is good, exchange connection info.
2565 * Do not reset out_kvec, as sending our banner raced
2566 * with receiving peer banner after connect completed.
2568 ret
= prepare_write_connect(con
);
2571 prepare_read_connect(con
);
2573 /* Send connection info before awaiting response */
2577 if (con
->state
== CON_STATE_NEGOTIATING
) {
2578 dout("try_read negotiating\n");
2579 ret
= read_partial_connect(con
);
2582 ret
= process_connect(con
);
2588 WARN_ON(con
->state
!= CON_STATE_OPEN
);
2590 if (con
->in_base_pos
< 0) {
2592 * skipping + discarding content.
2594 * FIXME: there must be a better way to do this!
2596 static char buf
[SKIP_BUF_SIZE
];
2597 int skip
= min((int) sizeof (buf
), -con
->in_base_pos
);
2599 dout("skipping %d / %d bytes\n", skip
, -con
->in_base_pos
);
2600 ret
= ceph_tcp_recvmsg(con
->sock
, buf
, skip
);
2603 con
->in_base_pos
+= ret
;
2604 if (con
->in_base_pos
)
2607 if (con
->in_tag
== CEPH_MSGR_TAG_READY
) {
2611 ret
= ceph_tcp_recvmsg(con
->sock
, &con
->in_tag
, 1);
2614 dout("try_read got tag %d\n", (int)con
->in_tag
);
2615 switch (con
->in_tag
) {
2616 case CEPH_MSGR_TAG_MSG
:
2617 prepare_read_message(con
);
2619 case CEPH_MSGR_TAG_ACK
:
2620 prepare_read_ack(con
);
2622 case CEPH_MSGR_TAG_CLOSE
:
2623 con_close_socket(con
);
2624 con
->state
= CON_STATE_CLOSED
;
2630 if (con
->in_tag
== CEPH_MSGR_TAG_MSG
) {
2631 ret
= read_partial_message(con
);
2635 con
->error_msg
= "bad crc";
2639 con
->error_msg
= "io error";
2644 if (con
->in_tag
== CEPH_MSGR_TAG_READY
)
2646 process_message(con
);
2647 if (con
->state
== CON_STATE_OPEN
)
2648 prepare_read_tag(con
);
2651 if (con
->in_tag
== CEPH_MSGR_TAG_ACK
||
2652 con
->in_tag
== CEPH_MSGR_TAG_SEQ
) {
2654 * the final handshake seq exchange is semantically
2655 * equivalent to an ACK
2657 ret
= read_partial_ack(con
);
2665 dout("try_read done on %p ret %d\n", con
, ret
);
2669 pr_err("try_read bad con->in_tag = %d\n", (int)con
->in_tag
);
2670 con
->error_msg
= "protocol error, garbage tag";
2677 * Atomically queue work on a connection after the specified delay.
2678 * Bump @con reference to avoid races with connection teardown.
2679 * Returns 0 if work was queued, or an error code otherwise.
2681 static int queue_con_delay(struct ceph_connection
*con
, unsigned long delay
)
2683 if (!con
->ops
->get(con
)) {
2684 dout("%s %p ref count 0\n", __func__
, con
);
2689 if (!queue_delayed_work(ceph_msgr_wq
, &con
->work
, delay
)) {
2690 dout("%s %p - already queued\n", __func__
, con
);
2696 dout("%s %p %lu\n", __func__
, con
, delay
);
2701 static void queue_con(struct ceph_connection
*con
)
2703 (void) queue_con_delay(con
, 0);
2706 static bool con_sock_closed(struct ceph_connection
*con
)
2708 if (!con_flag_test_and_clear(con
, CON_FLAG_SOCK_CLOSED
))
2712 case CON_STATE_ ## x: \
2713 con->error_msg = "socket closed (con state " #x ")"; \
2716 switch (con
->state
) {
2724 pr_warning("%s con %p unrecognized state %lu\n",
2725 __func__
, con
, con
->state
);
2726 con
->error_msg
= "unrecognized con state";
2735 static bool con_backoff(struct ceph_connection
*con
)
2739 if (!con_flag_test_and_clear(con
, CON_FLAG_BACKOFF
))
2742 ret
= queue_con_delay(con
, round_jiffies_relative(con
->delay
));
2744 dout("%s: con %p FAILED to back off %lu\n", __func__
,
2746 BUG_ON(ret
== -ENOENT
);
2747 con_flag_set(con
, CON_FLAG_BACKOFF
);
2753 /* Finish fault handling; con->mutex must *not* be held here */
2755 static void con_fault_finish(struct ceph_connection
*con
)
2758 * in case we faulted due to authentication, invalidate our
2759 * current tickets so that we can get new ones.
2761 if (con
->auth_retry
&& con
->ops
->invalidate_authorizer
) {
2762 dout("calling invalidate_authorizer()\n");
2763 con
->ops
->invalidate_authorizer(con
);
2766 if (con
->ops
->fault
)
2767 con
->ops
->fault(con
);
2771 * Do some work on a connection. Drop a connection ref when we're done.
2773 static void con_work(struct work_struct
*work
)
2775 struct ceph_connection
*con
= container_of(work
, struct ceph_connection
,
2779 mutex_lock(&con
->mutex
);
2783 if ((fault
= con_sock_closed(con
))) {
2784 dout("%s: con %p SOCK_CLOSED\n", __func__
, con
);
2787 if (con_backoff(con
)) {
2788 dout("%s: con %p BACKOFF\n", __func__
, con
);
2791 if (con
->state
== CON_STATE_STANDBY
) {
2792 dout("%s: con %p STANDBY\n", __func__
, con
);
2795 if (con
->state
== CON_STATE_CLOSED
) {
2796 dout("%s: con %p CLOSED\n", __func__
, con
);
2800 if (con
->state
== CON_STATE_PREOPEN
) {
2801 dout("%s: con %p PREOPEN\n", __func__
, con
);
2805 ret
= try_read(con
);
2809 con
->error_msg
= "socket error on read";
2814 ret
= try_write(con
);
2818 con
->error_msg
= "socket error on write";
2822 break; /* If we make it to here, we're done */
2826 mutex_unlock(&con
->mutex
);
2829 con_fault_finish(con
);
2835 * Generic error/fault handler. A retry mechanism is used with
2836 * exponential backoff
2838 static void con_fault(struct ceph_connection
*con
)
2840 pr_warning("%s%lld %s %s\n", ENTITY_NAME(con
->peer_name
),
2841 ceph_pr_addr(&con
->peer_addr
.in_addr
), con
->error_msg
);
2842 dout("fault %p state %lu to peer %s\n",
2843 con
, con
->state
, ceph_pr_addr(&con
->peer_addr
.in_addr
));
2845 WARN_ON(con
->state
!= CON_STATE_CONNECTING
&&
2846 con
->state
!= CON_STATE_NEGOTIATING
&&
2847 con
->state
!= CON_STATE_OPEN
);
2849 con_close_socket(con
);
2851 if (con_flag_test(con
, CON_FLAG_LOSSYTX
)) {
2852 dout("fault on LOSSYTX channel, marking CLOSED\n");
2853 con
->state
= CON_STATE_CLOSED
;
2858 BUG_ON(con
->in_msg
->con
!= con
);
2859 con
->in_msg
->con
= NULL
;
2860 ceph_msg_put(con
->in_msg
);
2865 /* Requeue anything that hasn't been acked */
2866 list_splice_init(&con
->out_sent
, &con
->out_queue
);
2868 /* If there are no messages queued or keepalive pending, place
2869 * the connection in a STANDBY state */
2870 if (list_empty(&con
->out_queue
) &&
2871 !con_flag_test(con
, CON_FLAG_KEEPALIVE_PENDING
)) {
2872 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con
);
2873 con_flag_clear(con
, CON_FLAG_WRITE_PENDING
);
2874 con
->state
= CON_STATE_STANDBY
;
2876 /* retry after a delay. */
2877 con
->state
= CON_STATE_PREOPEN
;
2878 if (con
->delay
== 0)
2879 con
->delay
= BASE_DELAY_INTERVAL
;
2880 else if (con
->delay
< MAX_DELAY_INTERVAL
)
2882 con_flag_set(con
, CON_FLAG_BACKOFF
);
2890 * initialize a new messenger instance
2892 void ceph_messenger_init(struct ceph_messenger
*msgr
,
2893 struct ceph_entity_addr
*myaddr
,
2894 u32 supported_features
,
2895 u32 required_features
,
2898 msgr
->supported_features
= supported_features
;
2899 msgr
->required_features
= required_features
;
2901 spin_lock_init(&msgr
->global_seq_lock
);
2904 msgr
->inst
.addr
= *myaddr
;
2906 /* select a random nonce */
2907 msgr
->inst
.addr
.type
= 0;
2908 get_random_bytes(&msgr
->inst
.addr
.nonce
, sizeof(msgr
->inst
.addr
.nonce
));
2909 encode_my_addr(msgr
);
2910 msgr
->nocrc
= nocrc
;
2912 atomic_set(&msgr
->stopping
, 0);
2914 dout("%s %p\n", __func__
, msgr
);
2916 EXPORT_SYMBOL(ceph_messenger_init
);
2918 static void clear_standby(struct ceph_connection
*con
)
2920 /* come back from STANDBY? */
2921 if (con
->state
== CON_STATE_STANDBY
) {
2922 dout("clear_standby %p and ++connect_seq\n", con
);
2923 con
->state
= CON_STATE_PREOPEN
;
2925 WARN_ON(con_flag_test(con
, CON_FLAG_WRITE_PENDING
));
2926 WARN_ON(con_flag_test(con
, CON_FLAG_KEEPALIVE_PENDING
));
2931 * Queue up an outgoing message on the given connection.
2933 void ceph_con_send(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2936 msg
->hdr
.src
= con
->msgr
->inst
.name
;
2937 BUG_ON(msg
->front
.iov_len
!= le32_to_cpu(msg
->hdr
.front_len
));
2938 msg
->needs_out_seq
= true;
2940 mutex_lock(&con
->mutex
);
2942 if (con
->state
== CON_STATE_CLOSED
) {
2943 dout("con_send %p closed, dropping %p\n", con
, msg
);
2945 mutex_unlock(&con
->mutex
);
2949 BUG_ON(msg
->con
!= NULL
);
2950 msg
->con
= con
->ops
->get(con
);
2951 BUG_ON(msg
->con
== NULL
);
2953 BUG_ON(!list_empty(&msg
->list_head
));
2954 list_add_tail(&msg
->list_head
, &con
->out_queue
);
2955 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg
,
2956 ENTITY_NAME(con
->peer_name
), le16_to_cpu(msg
->hdr
.type
),
2957 ceph_msg_type_name(le16_to_cpu(msg
->hdr
.type
)),
2958 le32_to_cpu(msg
->hdr
.front_len
),
2959 le32_to_cpu(msg
->hdr
.middle_len
),
2960 le32_to_cpu(msg
->hdr
.data_len
));
2963 mutex_unlock(&con
->mutex
);
2965 /* if there wasn't anything waiting to send before, queue
2967 if (con_flag_test_and_set(con
, CON_FLAG_WRITE_PENDING
) == 0)
2970 EXPORT_SYMBOL(ceph_con_send
);
2973 * Revoke a message that was previously queued for send
2975 void ceph_msg_revoke(struct ceph_msg
*msg
)
2977 struct ceph_connection
*con
= msg
->con
;
2980 return; /* Message not in our possession */
2982 mutex_lock(&con
->mutex
);
2983 if (!list_empty(&msg
->list_head
)) {
2984 dout("%s %p msg %p - was on queue\n", __func__
, con
, msg
);
2985 list_del_init(&msg
->list_head
);
2986 BUG_ON(msg
->con
== NULL
);
2987 msg
->con
->ops
->put(msg
->con
);
2993 if (con
->out_msg
== msg
) {
2994 dout("%s %p msg %p - was sending\n", __func__
, con
, msg
);
2995 con
->out_msg
= NULL
;
2996 if (con
->out_kvec_is_msg
) {
2997 con
->out_skip
= con
->out_kvec_bytes
;
2998 con
->out_kvec_is_msg
= false;
3004 mutex_unlock(&con
->mutex
);
3008 * Revoke a message that we may be reading data into
3010 void ceph_msg_revoke_incoming(struct ceph_msg
*msg
)
3012 struct ceph_connection
*con
;
3014 BUG_ON(msg
== NULL
);
3016 dout("%s msg %p null con\n", __func__
, msg
);
3018 return; /* Message not in our possession */
3022 mutex_lock(&con
->mutex
);
3023 if (con
->in_msg
== msg
) {
3024 unsigned int front_len
= le32_to_cpu(con
->in_hdr
.front_len
);
3025 unsigned int middle_len
= le32_to_cpu(con
->in_hdr
.middle_len
);
3026 unsigned int data_len
= le32_to_cpu(con
->in_hdr
.data_len
);
3028 /* skip rest of message */
3029 dout("%s %p msg %p revoked\n", __func__
, con
, msg
);
3030 con
->in_base_pos
= con
->in_base_pos
-
3031 sizeof(struct ceph_msg_header
) -
3035 sizeof(struct ceph_msg_footer
);
3036 ceph_msg_put(con
->in_msg
);
3038 con
->in_tag
= CEPH_MSGR_TAG_READY
;
3041 dout("%s %p in_msg %p msg %p no-op\n",
3042 __func__
, con
, con
->in_msg
, msg
);
3044 mutex_unlock(&con
->mutex
);
3048 * Queue a keepalive byte to ensure the tcp connection is alive.
3050 void ceph_con_keepalive(struct ceph_connection
*con
)
3052 dout("con_keepalive %p\n", con
);
3053 mutex_lock(&con
->mutex
);
3055 mutex_unlock(&con
->mutex
);
3056 if (con_flag_test_and_set(con
, CON_FLAG_KEEPALIVE_PENDING
) == 0 &&
3057 con_flag_test_and_set(con
, CON_FLAG_WRITE_PENDING
) == 0)
3060 EXPORT_SYMBOL(ceph_con_keepalive
);
3062 static struct ceph_msg_data
*ceph_msg_data_create(enum ceph_msg_data_type type
)
3064 struct ceph_msg_data
*data
;
3066 if (WARN_ON(!ceph_msg_data_type_valid(type
)))
3069 data
= kmem_cache_zalloc(ceph_msg_data_cache
, GFP_NOFS
);
3072 INIT_LIST_HEAD(&data
->links
);
3077 static void ceph_msg_data_destroy(struct ceph_msg_data
*data
)
3082 WARN_ON(!list_empty(&data
->links
));
3083 if (data
->type
== CEPH_MSG_DATA_PAGELIST
) {
3084 ceph_pagelist_release(data
->pagelist
);
3085 kfree(data
->pagelist
);
3087 kmem_cache_free(ceph_msg_data_cache
, data
);
3090 void ceph_msg_data_add_pages(struct ceph_msg
*msg
, struct page
**pages
,
3091 size_t length
, size_t alignment
)
3093 struct ceph_msg_data
*data
;
3098 data
= ceph_msg_data_create(CEPH_MSG_DATA_PAGES
);
3100 data
->pages
= pages
;
3101 data
->length
= length
;
3102 data
->alignment
= alignment
& ~PAGE_MASK
;
3104 list_add_tail(&data
->links
, &msg
->data
);
3105 msg
->data_length
+= length
;
3107 EXPORT_SYMBOL(ceph_msg_data_add_pages
);
3109 void ceph_msg_data_add_pagelist(struct ceph_msg
*msg
,
3110 struct ceph_pagelist
*pagelist
)
3112 struct ceph_msg_data
*data
;
3115 BUG_ON(!pagelist
->length
);
3117 data
= ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST
);
3119 data
->pagelist
= pagelist
;
3121 list_add_tail(&data
->links
, &msg
->data
);
3122 msg
->data_length
+= pagelist
->length
;
3124 EXPORT_SYMBOL(ceph_msg_data_add_pagelist
);
3127 void ceph_msg_data_add_bio(struct ceph_msg
*msg
, struct bio
*bio
,
3130 struct ceph_msg_data
*data
;
3134 data
= ceph_msg_data_create(CEPH_MSG_DATA_BIO
);
3137 data
->bio_length
= length
;
3139 list_add_tail(&data
->links
, &msg
->data
);
3140 msg
->data_length
+= length
;
3142 EXPORT_SYMBOL(ceph_msg_data_add_bio
);
3143 #endif /* CONFIG_BLOCK */
3146 * construct a new message with given type, size
3147 * the new msg has a ref count of 1.
3149 struct ceph_msg
*ceph_msg_new(int type
, int front_len
, gfp_t flags
,
3154 m
= kmem_cache_zalloc(ceph_msg_cache
, flags
);
3158 m
->hdr
.type
= cpu_to_le16(type
);
3159 m
->hdr
.priority
= cpu_to_le16(CEPH_MSG_PRIO_DEFAULT
);
3160 m
->hdr
.front_len
= cpu_to_le32(front_len
);
3162 INIT_LIST_HEAD(&m
->list_head
);
3163 kref_init(&m
->kref
);
3164 INIT_LIST_HEAD(&m
->data
);
3167 m
->front_alloc_len
= front_len
;
3169 if (front_len
> PAGE_CACHE_SIZE
) {
3170 m
->front
.iov_base
= __vmalloc(front_len
, flags
,
3172 m
->front_is_vmalloc
= true;
3174 m
->front
.iov_base
= kmalloc(front_len
, flags
);
3176 if (m
->front
.iov_base
== NULL
) {
3177 dout("ceph_msg_new can't allocate %d bytes\n",
3182 m
->front
.iov_base
= NULL
;
3184 m
->front
.iov_len
= front_len
;
3186 dout("ceph_msg_new %p front %d\n", m
, front_len
);
3193 pr_err("msg_new can't create type %d front %d\n", type
,
3197 dout("msg_new can't create type %d front %d\n", type
,
3202 EXPORT_SYMBOL(ceph_msg_new
);
3205 * Allocate "middle" portion of a message, if it is needed and wasn't
3206 * allocated by alloc_msg. This allows us to read a small fixed-size
3207 * per-type header in the front and then gracefully fail (i.e.,
3208 * propagate the error to the caller based on info in the front) when
3209 * the middle is too large.
3211 static int ceph_alloc_middle(struct ceph_connection
*con
, struct ceph_msg
*msg
)
3213 int type
= le16_to_cpu(msg
->hdr
.type
);
3214 int middle_len
= le32_to_cpu(msg
->hdr
.middle_len
);
3216 dout("alloc_middle %p type %d %s middle_len %d\n", msg
, type
,
3217 ceph_msg_type_name(type
), middle_len
);
3218 BUG_ON(!middle_len
);
3219 BUG_ON(msg
->middle
);
3221 msg
->middle
= ceph_buffer_new(middle_len
, GFP_NOFS
);
3228 * Allocate a message for receiving an incoming message on a
3229 * connection, and save the result in con->in_msg. Uses the
3230 * connection's private alloc_msg op if available.
3232 * Returns 0 on success, or a negative error code.
3234 * On success, if we set *skip = 1:
3235 * - the next message should be skipped and ignored.
3236 * - con->in_msg == NULL
3237 * or if we set *skip = 0:
3238 * - con->in_msg is non-null.
3239 * On error (ENOMEM, EAGAIN, ...),
3240 * - con->in_msg == NULL
3242 static int ceph_con_in_msg_alloc(struct ceph_connection
*con
, int *skip
)
3244 struct ceph_msg_header
*hdr
= &con
->in_hdr
;
3245 int middle_len
= le32_to_cpu(hdr
->middle_len
);
3246 struct ceph_msg
*msg
;
3249 BUG_ON(con
->in_msg
!= NULL
);
3250 BUG_ON(!con
->ops
->alloc_msg
);
3252 mutex_unlock(&con
->mutex
);
3253 msg
= con
->ops
->alloc_msg(con
, hdr
, skip
);
3254 mutex_lock(&con
->mutex
);
3255 if (con
->state
!= CON_STATE_OPEN
) {
3263 con
->in_msg
->con
= con
->ops
->get(con
);
3264 BUG_ON(con
->in_msg
->con
== NULL
);
3267 * Null message pointer means either we should skip
3268 * this message or we couldn't allocate memory. The
3269 * former is not an error.
3273 con
->error_msg
= "error allocating memory for incoming message";
3277 memcpy(&con
->in_msg
->hdr
, &con
->in_hdr
, sizeof(con
->in_hdr
));
3279 if (middle_len
&& !con
->in_msg
->middle
) {
3280 ret
= ceph_alloc_middle(con
, con
->in_msg
);
3282 ceph_msg_put(con
->in_msg
);
3292 * Free a generically kmalloc'd message.
3294 void ceph_msg_kfree(struct ceph_msg
*m
)
3296 dout("msg_kfree %p\n", m
);
3297 if (m
->front_is_vmalloc
)
3298 vfree(m
->front
.iov_base
);
3300 kfree(m
->front
.iov_base
);
3301 kmem_cache_free(ceph_msg_cache
, m
);
3305 * Drop a msg ref. Destroy as needed.
3307 void ceph_msg_last_put(struct kref
*kref
)
3309 struct ceph_msg
*m
= container_of(kref
, struct ceph_msg
, kref
);
3311 struct list_head
*links
;
3312 struct list_head
*next
;
3314 dout("ceph_msg_put last one on %p\n", m
);
3315 WARN_ON(!list_empty(&m
->list_head
));
3317 /* drop middle, data, if any */
3319 ceph_buffer_put(m
->middle
);
3323 list_splice_init(&m
->data
, &data
);
3324 list_for_each_safe(links
, next
, &data
) {
3325 struct ceph_msg_data
*data
;
3327 data
= list_entry(links
, struct ceph_msg_data
, links
);
3328 list_del_init(links
);
3329 ceph_msg_data_destroy(data
);
3334 ceph_msgpool_put(m
->pool
, m
);
3338 EXPORT_SYMBOL(ceph_msg_last_put
);
3340 void ceph_msg_dump(struct ceph_msg
*msg
)
3342 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg
,
3343 msg
->front_alloc_len
, msg
->data_length
);
3344 print_hex_dump(KERN_DEBUG
, "header: ",
3345 DUMP_PREFIX_OFFSET
, 16, 1,
3346 &msg
->hdr
, sizeof(msg
->hdr
), true);
3347 print_hex_dump(KERN_DEBUG
, " front: ",
3348 DUMP_PREFIX_OFFSET
, 16, 1,
3349 msg
->front
.iov_base
, msg
->front
.iov_len
, true);
3351 print_hex_dump(KERN_DEBUG
, "middle: ",
3352 DUMP_PREFIX_OFFSET
, 16, 1,
3353 msg
->middle
->vec
.iov_base
,
3354 msg
->middle
->vec
.iov_len
, true);
3355 print_hex_dump(KERN_DEBUG
, "footer: ",
3356 DUMP_PREFIX_OFFSET
, 16, 1,
3357 &msg
->footer
, sizeof(msg
->footer
), true);
3359 EXPORT_SYMBOL(ceph_msg_dump
);