libceph: fully initialize connection in con_init()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ceph / messenger.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
8 #include <linux/net.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
14 #include <linux/dns_resolver.h>
15 #include <net/tcp.h>
16
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/messenger.h>
19 #include <linux/ceph/decode.h>
20 #include <linux/ceph/pagelist.h>
21 #include <linux/export.h>
22
23 /*
24 * Ceph uses the messenger to exchange ceph_msg messages with other
25 * hosts in the system. The messenger provides ordered and reliable
26 * delivery. We tolerate TCP disconnects by reconnecting (with
27 * exponential backoff) in the case of a fault (disconnection, bad
28 * crc, protocol error). Acks allow sent messages to be discarded by
29 * the sender.
30 */
31
32 /* State values for ceph_connection->sock_state; NEW is assumed to be 0 */
33
34 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
35 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
36 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
37 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
38 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
39
40 /* static tag bytes (protocol control messages) */
41 static char tag_msg = CEPH_MSGR_TAG_MSG;
42 static char tag_ack = CEPH_MSGR_TAG_ACK;
43 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
44
45 #ifdef CONFIG_LOCKDEP
46 static struct lock_class_key socket_class;
47 #endif
48
49 /*
50 * When skipping (ignoring) a block of input we read it into a "skip
51 * buffer," which is this many bytes in size.
52 */
53 #define SKIP_BUF_SIZE 1024
54
55 static void queue_con(struct ceph_connection *con);
56 static void con_work(struct work_struct *);
57 static void ceph_fault(struct ceph_connection *con);
58
59 /*
60 * Nicely render a sockaddr as a string. An array of formatted
61 * strings is used, to approximate reentrancy.
62 */
63 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
64 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
65 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
66 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
67
68 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
69 static atomic_t addr_str_seq = ATOMIC_INIT(0);
70
71 static struct page *zero_page; /* used in certain error cases */
72
73 const char *ceph_pr_addr(const struct sockaddr_storage *ss)
74 {
75 int i;
76 char *s;
77 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
78 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
79
80 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
81 s = addr_str[i];
82
83 switch (ss->ss_family) {
84 case AF_INET:
85 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr,
86 ntohs(in4->sin_port));
87 break;
88
89 case AF_INET6:
90 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr,
91 ntohs(in6->sin6_port));
92 break;
93
94 default:
95 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
96 ss->ss_family);
97 }
98
99 return s;
100 }
101 EXPORT_SYMBOL(ceph_pr_addr);
102
103 static void encode_my_addr(struct ceph_messenger *msgr)
104 {
105 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
106 ceph_encode_addr(&msgr->my_enc_addr);
107 }
108
109 /*
110 * work queue for all reading and writing to/from the socket.
111 */
112 static struct workqueue_struct *ceph_msgr_wq;
113
114 void _ceph_msgr_exit(void)
115 {
116 if (ceph_msgr_wq) {
117 destroy_workqueue(ceph_msgr_wq);
118 ceph_msgr_wq = NULL;
119 }
120
121 BUG_ON(zero_page == NULL);
122 kunmap(zero_page);
123 page_cache_release(zero_page);
124 zero_page = NULL;
125 }
126
127 int ceph_msgr_init(void)
128 {
129 BUG_ON(zero_page != NULL);
130 zero_page = ZERO_PAGE(0);
131 page_cache_get(zero_page);
132
133 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
134 if (ceph_msgr_wq)
135 return 0;
136
137 pr_err("msgr_init failed to create workqueue\n");
138 _ceph_msgr_exit();
139
140 return -ENOMEM;
141 }
142 EXPORT_SYMBOL(ceph_msgr_init);
143
144 void ceph_msgr_exit(void)
145 {
146 BUG_ON(ceph_msgr_wq == NULL);
147
148 _ceph_msgr_exit();
149 }
150 EXPORT_SYMBOL(ceph_msgr_exit);
151
152 void ceph_msgr_flush(void)
153 {
154 flush_workqueue(ceph_msgr_wq);
155 }
156 EXPORT_SYMBOL(ceph_msgr_flush);
157
158 /* Connection socket state transition functions */
159
160 static void con_sock_state_init(struct ceph_connection *con)
161 {
162 int old_state;
163
164 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
165 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
166 printk("%s: unexpected old state %d\n", __func__, old_state);
167 }
168
169 static void con_sock_state_connecting(struct ceph_connection *con)
170 {
171 int old_state;
172
173 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
174 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
175 printk("%s: unexpected old state %d\n", __func__, old_state);
176 }
177
178 static void con_sock_state_connected(struct ceph_connection *con)
179 {
180 int old_state;
181
182 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
183 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
184 printk("%s: unexpected old state %d\n", __func__, old_state);
185 }
186
187 static void con_sock_state_closing(struct ceph_connection *con)
188 {
189 int old_state;
190
191 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
192 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
193 old_state != CON_SOCK_STATE_CONNECTED &&
194 old_state != CON_SOCK_STATE_CLOSING))
195 printk("%s: unexpected old state %d\n", __func__, old_state);
196 }
197
198 static void con_sock_state_closed(struct ceph_connection *con)
199 {
200 int old_state;
201
202 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
203 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
204 old_state != CON_SOCK_STATE_CLOSING))
205 printk("%s: unexpected old state %d\n", __func__, old_state);
206 }
207
208 /*
209 * socket callback functions
210 */
211
212 /* data available on socket, or listen socket received a connect */
213 static void ceph_sock_data_ready(struct sock *sk, int count_unused)
214 {
215 struct ceph_connection *con = sk->sk_user_data;
216
217 if (sk->sk_state != TCP_CLOSE_WAIT) {
218 dout("%s on %p state = %lu, queueing work\n", __func__,
219 con, con->state);
220 queue_con(con);
221 }
222 }
223
224 /* socket has buffer space for writing */
225 static void ceph_sock_write_space(struct sock *sk)
226 {
227 struct ceph_connection *con = sk->sk_user_data;
228
229 /* only queue to workqueue if there is data we want to write,
230 * and there is sufficient space in the socket buffer to accept
231 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
232 * doesn't get called again until try_write() fills the socket
233 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
234 * and net/core/stream.c:sk_stream_write_space().
235 */
236 if (test_bit(WRITE_PENDING, &con->flags)) {
237 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
238 dout("%s %p queueing write work\n", __func__, con);
239 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
240 queue_con(con);
241 }
242 } else {
243 dout("%s %p nothing to write\n", __func__, con);
244 }
245 }
246
247 /* socket's state has changed */
248 static void ceph_sock_state_change(struct sock *sk)
249 {
250 struct ceph_connection *con = sk->sk_user_data;
251
252 dout("%s %p state = %lu sk_state = %u\n", __func__,
253 con, con->state, sk->sk_state);
254
255 if (test_bit(CLOSED, &con->state))
256 return;
257
258 switch (sk->sk_state) {
259 case TCP_CLOSE:
260 dout("%s TCP_CLOSE\n", __func__);
261 case TCP_CLOSE_WAIT:
262 dout("%s TCP_CLOSE_WAIT\n", __func__);
263 con_sock_state_closing(con);
264 if (test_and_set_bit(SOCK_CLOSED, &con->flags) == 0) {
265 if (test_bit(CONNECTING, &con->state))
266 con->error_msg = "connection failed";
267 else
268 con->error_msg = "socket closed";
269 queue_con(con);
270 }
271 break;
272 case TCP_ESTABLISHED:
273 dout("%s TCP_ESTABLISHED\n", __func__);
274 con_sock_state_connected(con);
275 queue_con(con);
276 break;
277 default: /* Everything else is uninteresting */
278 break;
279 }
280 }
281
282 /*
283 * set up socket callbacks
284 */
285 static void set_sock_callbacks(struct socket *sock,
286 struct ceph_connection *con)
287 {
288 struct sock *sk = sock->sk;
289 sk->sk_user_data = con;
290 sk->sk_data_ready = ceph_sock_data_ready;
291 sk->sk_write_space = ceph_sock_write_space;
292 sk->sk_state_change = ceph_sock_state_change;
293 }
294
295
296 /*
297 * socket helpers
298 */
299
300 /*
301 * initiate connection to a remote socket.
302 */
303 static int ceph_tcp_connect(struct ceph_connection *con)
304 {
305 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
306 struct socket *sock;
307 int ret;
308
309 BUG_ON(con->sock);
310 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
311 IPPROTO_TCP, &sock);
312 if (ret)
313 return ret;
314 sock->sk->sk_allocation = GFP_NOFS;
315
316 #ifdef CONFIG_LOCKDEP
317 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
318 #endif
319
320 set_sock_callbacks(sock, con);
321
322 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
323
324 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
325 O_NONBLOCK);
326 if (ret == -EINPROGRESS) {
327 dout("connect %s EINPROGRESS sk_state = %u\n",
328 ceph_pr_addr(&con->peer_addr.in_addr),
329 sock->sk->sk_state);
330 } else if (ret < 0) {
331 pr_err("connect %s error %d\n",
332 ceph_pr_addr(&con->peer_addr.in_addr), ret);
333 sock_release(sock);
334 con->error_msg = "connect error";
335
336 return ret;
337 }
338 con->sock = sock;
339 con_sock_state_connecting(con);
340
341 return 0;
342 }
343
344 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
345 {
346 struct kvec iov = {buf, len};
347 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
348 int r;
349
350 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
351 if (r == -EAGAIN)
352 r = 0;
353 return r;
354 }
355
356 /*
357 * write something. @more is true if caller will be sending more data
358 * shortly.
359 */
360 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
361 size_t kvlen, size_t len, int more)
362 {
363 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
364 int r;
365
366 if (more)
367 msg.msg_flags |= MSG_MORE;
368 else
369 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
370
371 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
372 if (r == -EAGAIN)
373 r = 0;
374 return r;
375 }
376
377 static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
378 int offset, size_t size, int more)
379 {
380 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
381 int ret;
382
383 ret = kernel_sendpage(sock, page, offset, size, flags);
384 if (ret == -EAGAIN)
385 ret = 0;
386
387 return ret;
388 }
389
390
391 /*
392 * Shutdown/close the socket for the given connection.
393 */
394 static int con_close_socket(struct ceph_connection *con)
395 {
396 int rc;
397
398 dout("con_close_socket on %p sock %p\n", con, con->sock);
399 if (!con->sock)
400 return 0;
401 set_bit(SOCK_CLOSED, &con->state);
402 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
403 sock_release(con->sock);
404 con->sock = NULL;
405 clear_bit(SOCK_CLOSED, &con->state);
406 con_sock_state_closed(con);
407 return rc;
408 }
409
410 /*
411 * Reset a connection. Discard all incoming and outgoing messages
412 * and clear *_seq state.
413 */
414 static void ceph_msg_remove(struct ceph_msg *msg)
415 {
416 list_del_init(&msg->list_head);
417 ceph_msg_put(msg);
418 }
419 static void ceph_msg_remove_list(struct list_head *head)
420 {
421 while (!list_empty(head)) {
422 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
423 list_head);
424 ceph_msg_remove(msg);
425 }
426 }
427
428 static void reset_connection(struct ceph_connection *con)
429 {
430 /* reset connection, out_queue, msg_ and connect_seq */
431 /* discard existing out_queue and msg_seq */
432 ceph_msg_remove_list(&con->out_queue);
433 ceph_msg_remove_list(&con->out_sent);
434
435 if (con->in_msg) {
436 ceph_msg_put(con->in_msg);
437 con->in_msg = NULL;
438 }
439
440 con->connect_seq = 0;
441 con->out_seq = 0;
442 if (con->out_msg) {
443 ceph_msg_put(con->out_msg);
444 con->out_msg = NULL;
445 }
446 con->in_seq = 0;
447 con->in_seq_acked = 0;
448 }
449
450 /*
451 * mark a peer down. drop any open connections.
452 */
453 void ceph_con_close(struct ceph_connection *con)
454 {
455 dout("con_close %p peer %s\n", con,
456 ceph_pr_addr(&con->peer_addr.in_addr));
457 clear_bit(NEGOTIATING, &con->state);
458 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
459 set_bit(CLOSED, &con->state);
460
461 clear_bit(LOSSYTX, &con->flags); /* so we retry next connect */
462 clear_bit(KEEPALIVE_PENDING, &con->flags);
463 clear_bit(WRITE_PENDING, &con->flags);
464
465 mutex_lock(&con->mutex);
466 reset_connection(con);
467 con->peer_global_seq = 0;
468 cancel_delayed_work(&con->work);
469 mutex_unlock(&con->mutex);
470 queue_con(con);
471 }
472 EXPORT_SYMBOL(ceph_con_close);
473
474 /*
475 * Reopen a closed connection, with a new peer address.
476 */
477 void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
478 {
479 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
480 set_bit(OPENING, &con->state);
481 WARN_ON(!test_and_clear_bit(CLOSED, &con->state));
482
483 memcpy(&con->peer_addr, addr, sizeof(*addr));
484 con->delay = 0; /* reset backoff memory */
485 queue_con(con);
486 }
487 EXPORT_SYMBOL(ceph_con_open);
488
489 /*
490 * return true if this connection ever successfully opened
491 */
492 bool ceph_con_opened(struct ceph_connection *con)
493 {
494 return con->connect_seq > 0;
495 }
496
497 /*
498 * generic get/put
499 */
500 struct ceph_connection *ceph_con_get(struct ceph_connection *con)
501 {
502 int nref = __atomic_add_unless(&con->nref, 1, 0);
503
504 dout("con_get %p nref = %d -> %d\n", con, nref, nref + 1);
505
506 return nref ? con : NULL;
507 }
508
509 void ceph_con_put(struct ceph_connection *con)
510 {
511 int nref = atomic_dec_return(&con->nref);
512
513 BUG_ON(nref < 0);
514 if (nref == 0) {
515 BUG_ON(con->sock);
516 kfree(con);
517 }
518 dout("con_put %p nref = %d -> %d\n", con, nref + 1, nref);
519 }
520
521 /*
522 * initialize a new connection.
523 */
524 void ceph_con_init(struct ceph_connection *con, void *private,
525 const struct ceph_connection_operations *ops,
526 struct ceph_messenger *msgr, __u8 entity_type, __u64 entity_num)
527 {
528 dout("con_init %p\n", con);
529 memset(con, 0, sizeof(*con));
530 con->private = private;
531 con->ops = ops;
532 atomic_set(&con->nref, 1);
533 con->msgr = msgr;
534
535 con_sock_state_init(con);
536
537 con->peer_name.type = (__u8) entity_type;
538 con->peer_name.num = cpu_to_le64(entity_num);
539
540 mutex_init(&con->mutex);
541 INIT_LIST_HEAD(&con->out_queue);
542 INIT_LIST_HEAD(&con->out_sent);
543 INIT_DELAYED_WORK(&con->work, con_work);
544
545 set_bit(CLOSED, &con->state);
546 }
547 EXPORT_SYMBOL(ceph_con_init);
548
549
550 /*
551 * We maintain a global counter to order connection attempts. Get
552 * a unique seq greater than @gt.
553 */
554 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
555 {
556 u32 ret;
557
558 spin_lock(&msgr->global_seq_lock);
559 if (msgr->global_seq < gt)
560 msgr->global_seq = gt;
561 ret = ++msgr->global_seq;
562 spin_unlock(&msgr->global_seq_lock);
563 return ret;
564 }
565
566 static void con_out_kvec_reset(struct ceph_connection *con)
567 {
568 con->out_kvec_left = 0;
569 con->out_kvec_bytes = 0;
570 con->out_kvec_cur = &con->out_kvec[0];
571 }
572
573 static void con_out_kvec_add(struct ceph_connection *con,
574 size_t size, void *data)
575 {
576 int index;
577
578 index = con->out_kvec_left;
579 BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
580
581 con->out_kvec[index].iov_len = size;
582 con->out_kvec[index].iov_base = data;
583 con->out_kvec_left++;
584 con->out_kvec_bytes += size;
585 }
586
587 /*
588 * Prepare footer for currently outgoing message, and finish things
589 * off. Assumes out_kvec* are already valid.. we just add on to the end.
590 */
591 static void prepare_write_message_footer(struct ceph_connection *con)
592 {
593 struct ceph_msg *m = con->out_msg;
594 int v = con->out_kvec_left;
595
596 dout("prepare_write_message_footer %p\n", con);
597 con->out_kvec_is_msg = true;
598 con->out_kvec[v].iov_base = &m->footer;
599 con->out_kvec[v].iov_len = sizeof(m->footer);
600 con->out_kvec_bytes += sizeof(m->footer);
601 con->out_kvec_left++;
602 con->out_more = m->more_to_follow;
603 con->out_msg_done = true;
604 }
605
606 /*
607 * Prepare headers for the next outgoing message.
608 */
609 static void prepare_write_message(struct ceph_connection *con)
610 {
611 struct ceph_msg *m;
612 u32 crc;
613
614 con_out_kvec_reset(con);
615 con->out_kvec_is_msg = true;
616 con->out_msg_done = false;
617
618 /* Sneak an ack in there first? If we can get it into the same
619 * TCP packet that's a good thing. */
620 if (con->in_seq > con->in_seq_acked) {
621 con->in_seq_acked = con->in_seq;
622 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
623 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
624 con_out_kvec_add(con, sizeof (con->out_temp_ack),
625 &con->out_temp_ack);
626 }
627
628 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
629 con->out_msg = m;
630
631 /* put message on sent list */
632 ceph_msg_get(m);
633 list_move_tail(&m->list_head, &con->out_sent);
634
635 /*
636 * only assign outgoing seq # if we haven't sent this message
637 * yet. if it is requeued, resend with it's original seq.
638 */
639 if (m->needs_out_seq) {
640 m->hdr.seq = cpu_to_le64(++con->out_seq);
641 m->needs_out_seq = false;
642 }
643
644 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
645 m, con->out_seq, le16_to_cpu(m->hdr.type),
646 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
647 le32_to_cpu(m->hdr.data_len),
648 m->nr_pages);
649 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
650
651 /* tag + hdr + front + middle */
652 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
653 con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
654 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
655
656 if (m->middle)
657 con_out_kvec_add(con, m->middle->vec.iov_len,
658 m->middle->vec.iov_base);
659
660 /* fill in crc (except data pages), footer */
661 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
662 con->out_msg->hdr.crc = cpu_to_le32(crc);
663 con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE;
664
665 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
666 con->out_msg->footer.front_crc = cpu_to_le32(crc);
667 if (m->middle) {
668 crc = crc32c(0, m->middle->vec.iov_base,
669 m->middle->vec.iov_len);
670 con->out_msg->footer.middle_crc = cpu_to_le32(crc);
671 } else
672 con->out_msg->footer.middle_crc = 0;
673 con->out_msg->footer.data_crc = 0;
674 dout("prepare_write_message front_crc %u data_crc %u\n",
675 le32_to_cpu(con->out_msg->footer.front_crc),
676 le32_to_cpu(con->out_msg->footer.middle_crc));
677
678 /* is there a data payload? */
679 if (le32_to_cpu(m->hdr.data_len) > 0) {
680 /* initialize page iterator */
681 con->out_msg_pos.page = 0;
682 if (m->pages)
683 con->out_msg_pos.page_pos = m->page_alignment;
684 else
685 con->out_msg_pos.page_pos = 0;
686 con->out_msg_pos.data_pos = 0;
687 con->out_msg_pos.did_page_crc = false;
688 con->out_more = 1; /* data + footer will follow */
689 } else {
690 /* no, queue up footer too and be done */
691 prepare_write_message_footer(con);
692 }
693
694 set_bit(WRITE_PENDING, &con->flags);
695 }
696
697 /*
698 * Prepare an ack.
699 */
700 static void prepare_write_ack(struct ceph_connection *con)
701 {
702 dout("prepare_write_ack %p %llu -> %llu\n", con,
703 con->in_seq_acked, con->in_seq);
704 con->in_seq_acked = con->in_seq;
705
706 con_out_kvec_reset(con);
707
708 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
709
710 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
711 con_out_kvec_add(con, sizeof (con->out_temp_ack),
712 &con->out_temp_ack);
713
714 con->out_more = 1; /* more will follow.. eventually.. */
715 set_bit(WRITE_PENDING, &con->flags);
716 }
717
718 /*
719 * Prepare to write keepalive byte.
720 */
721 static void prepare_write_keepalive(struct ceph_connection *con)
722 {
723 dout("prepare_write_keepalive %p\n", con);
724 con_out_kvec_reset(con);
725 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
726 set_bit(WRITE_PENDING, &con->flags);
727 }
728
729 /*
730 * Connection negotiation.
731 */
732
733 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
734 int *auth_proto)
735 {
736 struct ceph_auth_handshake *auth;
737
738 if (!con->ops->get_authorizer) {
739 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
740 con->out_connect.authorizer_len = 0;
741
742 return NULL;
743 }
744
745 /* Can't hold the mutex while getting authorizer */
746
747 mutex_unlock(&con->mutex);
748
749 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
750
751 mutex_lock(&con->mutex);
752
753 if (IS_ERR(auth))
754 return auth;
755 if (test_bit(CLOSED, &con->state) || test_bit(OPENING, &con->flags))
756 return ERR_PTR(-EAGAIN);
757
758 con->auth_reply_buf = auth->authorizer_reply_buf;
759 con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
760
761
762 return auth;
763 }
764
765 /*
766 * We connected to a peer and are saying hello.
767 */
768 static void prepare_write_banner(struct ceph_connection *con)
769 {
770 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
771 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
772 &con->msgr->my_enc_addr);
773
774 con->out_more = 0;
775 set_bit(WRITE_PENDING, &con->flags);
776 }
777
778 static int prepare_write_connect(struct ceph_connection *con)
779 {
780 unsigned global_seq = get_global_seq(con->msgr, 0);
781 int proto;
782 int auth_proto;
783 struct ceph_auth_handshake *auth;
784
785 switch (con->peer_name.type) {
786 case CEPH_ENTITY_TYPE_MON:
787 proto = CEPH_MONC_PROTOCOL;
788 break;
789 case CEPH_ENTITY_TYPE_OSD:
790 proto = CEPH_OSDC_PROTOCOL;
791 break;
792 case CEPH_ENTITY_TYPE_MDS:
793 proto = CEPH_MDSC_PROTOCOL;
794 break;
795 default:
796 BUG();
797 }
798
799 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
800 con->connect_seq, global_seq, proto);
801
802 con->out_connect.features = cpu_to_le64(con->msgr->supported_features);
803 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
804 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
805 con->out_connect.global_seq = cpu_to_le32(global_seq);
806 con->out_connect.protocol_version = cpu_to_le32(proto);
807 con->out_connect.flags = 0;
808
809 auth_proto = CEPH_AUTH_UNKNOWN;
810 auth = get_connect_authorizer(con, &auth_proto);
811 if (IS_ERR(auth))
812 return PTR_ERR(auth);
813
814 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
815 con->out_connect.authorizer_len = auth ?
816 cpu_to_le32(auth->authorizer_buf_len) : 0;
817
818 con_out_kvec_add(con, sizeof (con->out_connect),
819 &con->out_connect);
820 if (auth && auth->authorizer_buf_len)
821 con_out_kvec_add(con, auth->authorizer_buf_len,
822 auth->authorizer_buf);
823
824 con->out_more = 0;
825 set_bit(WRITE_PENDING, &con->flags);
826
827 return 0;
828 }
829
830 /*
831 * write as much of pending kvecs to the socket as we can.
832 * 1 -> done
833 * 0 -> socket full, but more to do
834 * <0 -> error
835 */
836 static int write_partial_kvec(struct ceph_connection *con)
837 {
838 int ret;
839
840 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
841 while (con->out_kvec_bytes > 0) {
842 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
843 con->out_kvec_left, con->out_kvec_bytes,
844 con->out_more);
845 if (ret <= 0)
846 goto out;
847 con->out_kvec_bytes -= ret;
848 if (con->out_kvec_bytes == 0)
849 break; /* done */
850
851 /* account for full iov entries consumed */
852 while (ret >= con->out_kvec_cur->iov_len) {
853 BUG_ON(!con->out_kvec_left);
854 ret -= con->out_kvec_cur->iov_len;
855 con->out_kvec_cur++;
856 con->out_kvec_left--;
857 }
858 /* and for a partially-consumed entry */
859 if (ret) {
860 con->out_kvec_cur->iov_len -= ret;
861 con->out_kvec_cur->iov_base += ret;
862 }
863 }
864 con->out_kvec_left = 0;
865 con->out_kvec_is_msg = false;
866 ret = 1;
867 out:
868 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
869 con->out_kvec_bytes, con->out_kvec_left, ret);
870 return ret; /* done! */
871 }
872
873 #ifdef CONFIG_BLOCK
874 static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
875 {
876 if (!bio) {
877 *iter = NULL;
878 *seg = 0;
879 return;
880 }
881 *iter = bio;
882 *seg = bio->bi_idx;
883 }
884
885 static void iter_bio_next(struct bio **bio_iter, int *seg)
886 {
887 if (*bio_iter == NULL)
888 return;
889
890 BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
891
892 (*seg)++;
893 if (*seg == (*bio_iter)->bi_vcnt)
894 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
895 }
896 #endif
897
898 /*
899 * Write as much message data payload as we can. If we finish, queue
900 * up the footer.
901 * 1 -> done, footer is now queued in out_kvec[].
902 * 0 -> socket full, but more to do
903 * <0 -> error
904 */
905 static int write_partial_msg_pages(struct ceph_connection *con)
906 {
907 struct ceph_msg *msg = con->out_msg;
908 unsigned data_len = le32_to_cpu(msg->hdr.data_len);
909 size_t len;
910 bool do_datacrc = !con->msgr->nocrc;
911 int ret;
912 int total_max_write;
913 int in_trail = 0;
914 size_t trail_len = (msg->trail ? msg->trail->length : 0);
915
916 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
917 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
918 con->out_msg_pos.page_pos);
919
920 #ifdef CONFIG_BLOCK
921 if (msg->bio && !msg->bio_iter)
922 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
923 #endif
924
925 while (data_len > con->out_msg_pos.data_pos) {
926 struct page *page = NULL;
927 int max_write = PAGE_SIZE;
928 int bio_offset = 0;
929
930 total_max_write = data_len - trail_len -
931 con->out_msg_pos.data_pos;
932
933 /*
934 * if we are calculating the data crc (the default), we need
935 * to map the page. if our pages[] has been revoked, use the
936 * zero page.
937 */
938
939 /* have we reached the trail part of the data? */
940 if (con->out_msg_pos.data_pos >= data_len - trail_len) {
941 in_trail = 1;
942
943 total_max_write = data_len - con->out_msg_pos.data_pos;
944
945 page = list_first_entry(&msg->trail->head,
946 struct page, lru);
947 max_write = PAGE_SIZE;
948 } else if (msg->pages) {
949 page = msg->pages[con->out_msg_pos.page];
950 } else if (msg->pagelist) {
951 page = list_first_entry(&msg->pagelist->head,
952 struct page, lru);
953 #ifdef CONFIG_BLOCK
954 } else if (msg->bio) {
955 struct bio_vec *bv;
956
957 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg);
958 page = bv->bv_page;
959 bio_offset = bv->bv_offset;
960 max_write = bv->bv_len;
961 #endif
962 } else {
963 page = zero_page;
964 }
965 len = min_t(int, max_write - con->out_msg_pos.page_pos,
966 total_max_write);
967
968 if (do_datacrc && !con->out_msg_pos.did_page_crc) {
969 void *base;
970 u32 crc;
971 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
972 char *kaddr;
973
974 kaddr = kmap(page);
975 BUG_ON(kaddr == NULL);
976 base = kaddr + con->out_msg_pos.page_pos + bio_offset;
977 crc = crc32c(tmpcrc, base, len);
978 con->out_msg->footer.data_crc = cpu_to_le32(crc);
979 con->out_msg_pos.did_page_crc = true;
980 }
981 ret = ceph_tcp_sendpage(con->sock, page,
982 con->out_msg_pos.page_pos + bio_offset,
983 len, 1);
984
985 if (do_datacrc)
986 kunmap(page);
987
988 if (ret <= 0)
989 goto out;
990
991 con->out_msg_pos.data_pos += ret;
992 con->out_msg_pos.page_pos += ret;
993 if (ret == len) {
994 con->out_msg_pos.page_pos = 0;
995 con->out_msg_pos.page++;
996 con->out_msg_pos.did_page_crc = false;
997 if (in_trail)
998 list_move_tail(&page->lru,
999 &msg->trail->head);
1000 else if (msg->pagelist)
1001 list_move_tail(&page->lru,
1002 &msg->pagelist->head);
1003 #ifdef CONFIG_BLOCK
1004 else if (msg->bio)
1005 iter_bio_next(&msg->bio_iter, &msg->bio_seg);
1006 #endif
1007 }
1008 }
1009
1010 dout("write_partial_msg_pages %p msg %p done\n", con, msg);
1011
1012 /* prepare and queue up footer, too */
1013 if (!do_datacrc)
1014 con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
1015 con_out_kvec_reset(con);
1016 prepare_write_message_footer(con);
1017 ret = 1;
1018 out:
1019 return ret;
1020 }
1021
1022 /*
1023 * write some zeros
1024 */
1025 static int write_partial_skip(struct ceph_connection *con)
1026 {
1027 int ret;
1028
1029 while (con->out_skip > 0) {
1030 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
1031
1032 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, 1);
1033 if (ret <= 0)
1034 goto out;
1035 con->out_skip -= ret;
1036 }
1037 ret = 1;
1038 out:
1039 return ret;
1040 }
1041
1042 /*
1043 * Prepare to read connection handshake, or an ack.
1044 */
1045 static void prepare_read_banner(struct ceph_connection *con)
1046 {
1047 dout("prepare_read_banner %p\n", con);
1048 con->in_base_pos = 0;
1049 }
1050
1051 static void prepare_read_connect(struct ceph_connection *con)
1052 {
1053 dout("prepare_read_connect %p\n", con);
1054 con->in_base_pos = 0;
1055 }
1056
1057 static void prepare_read_ack(struct ceph_connection *con)
1058 {
1059 dout("prepare_read_ack %p\n", con);
1060 con->in_base_pos = 0;
1061 }
1062
1063 static void prepare_read_tag(struct ceph_connection *con)
1064 {
1065 dout("prepare_read_tag %p\n", con);
1066 con->in_base_pos = 0;
1067 con->in_tag = CEPH_MSGR_TAG_READY;
1068 }
1069
1070 /*
1071 * Prepare to read a message.
1072 */
1073 static int prepare_read_message(struct ceph_connection *con)
1074 {
1075 dout("prepare_read_message %p\n", con);
1076 BUG_ON(con->in_msg != NULL);
1077 con->in_base_pos = 0;
1078 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
1079 return 0;
1080 }
1081
1082
1083 static int read_partial(struct ceph_connection *con,
1084 int end, int size, void *object)
1085 {
1086 while (con->in_base_pos < end) {
1087 int left = end - con->in_base_pos;
1088 int have = size - left;
1089 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
1090 if (ret <= 0)
1091 return ret;
1092 con->in_base_pos += ret;
1093 }
1094 return 1;
1095 }
1096
1097
1098 /*
1099 * Read all or part of the connect-side handshake on a new connection
1100 */
1101 static int read_partial_banner(struct ceph_connection *con)
1102 {
1103 int size;
1104 int end;
1105 int ret;
1106
1107 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
1108
1109 /* peer's banner */
1110 size = strlen(CEPH_BANNER);
1111 end = size;
1112 ret = read_partial(con, end, size, con->in_banner);
1113 if (ret <= 0)
1114 goto out;
1115
1116 size = sizeof (con->actual_peer_addr);
1117 end += size;
1118 ret = read_partial(con, end, size, &con->actual_peer_addr);
1119 if (ret <= 0)
1120 goto out;
1121
1122 size = sizeof (con->peer_addr_for_me);
1123 end += size;
1124 ret = read_partial(con, end, size, &con->peer_addr_for_me);
1125 if (ret <= 0)
1126 goto out;
1127
1128 out:
1129 return ret;
1130 }
1131
1132 static int read_partial_connect(struct ceph_connection *con)
1133 {
1134 int size;
1135 int end;
1136 int ret;
1137
1138 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1139
1140 size = sizeof (con->in_reply);
1141 end = size;
1142 ret = read_partial(con, end, size, &con->in_reply);
1143 if (ret <= 0)
1144 goto out;
1145
1146 size = le32_to_cpu(con->in_reply.authorizer_len);
1147 end += size;
1148 ret = read_partial(con, end, size, con->auth_reply_buf);
1149 if (ret <= 0)
1150 goto out;
1151
1152 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1153 con, (int)con->in_reply.tag,
1154 le32_to_cpu(con->in_reply.connect_seq),
1155 le32_to_cpu(con->in_reply.global_seq));
1156 out:
1157 return ret;
1158
1159 }
1160
1161 /*
1162 * Verify the hello banner looks okay.
1163 */
1164 static int verify_hello(struct ceph_connection *con)
1165 {
1166 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1167 pr_err("connect to %s got bad banner\n",
1168 ceph_pr_addr(&con->peer_addr.in_addr));
1169 con->error_msg = "protocol error, bad banner";
1170 return -1;
1171 }
1172 return 0;
1173 }
1174
1175 static bool addr_is_blank(struct sockaddr_storage *ss)
1176 {
1177 switch (ss->ss_family) {
1178 case AF_INET:
1179 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
1180 case AF_INET6:
1181 return
1182 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
1183 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
1184 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1185 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1186 }
1187 return false;
1188 }
1189
1190 static int addr_port(struct sockaddr_storage *ss)
1191 {
1192 switch (ss->ss_family) {
1193 case AF_INET:
1194 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1195 case AF_INET6:
1196 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1197 }
1198 return 0;
1199 }
1200
1201 static void addr_set_port(struct sockaddr_storage *ss, int p)
1202 {
1203 switch (ss->ss_family) {
1204 case AF_INET:
1205 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1206 break;
1207 case AF_INET6:
1208 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1209 break;
1210 }
1211 }
1212
1213 /*
1214 * Unlike other *_pton function semantics, zero indicates success.
1215 */
1216 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
1217 char delim, const char **ipend)
1218 {
1219 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
1220 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
1221
1222 memset(ss, 0, sizeof(*ss));
1223
1224 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
1225 ss->ss_family = AF_INET;
1226 return 0;
1227 }
1228
1229 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
1230 ss->ss_family = AF_INET6;
1231 return 0;
1232 }
1233
1234 return -EINVAL;
1235 }
1236
1237 /*
1238 * Extract hostname string and resolve using kernel DNS facility.
1239 */
1240 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1241 static int ceph_dns_resolve_name(const char *name, size_t namelen,
1242 struct sockaddr_storage *ss, char delim, const char **ipend)
1243 {
1244 const char *end, *delim_p;
1245 char *colon_p, *ip_addr = NULL;
1246 int ip_len, ret;
1247
1248 /*
1249 * The end of the hostname occurs immediately preceding the delimiter or
1250 * the port marker (':') where the delimiter takes precedence.
1251 */
1252 delim_p = memchr(name, delim, namelen);
1253 colon_p = memchr(name, ':', namelen);
1254
1255 if (delim_p && colon_p)
1256 end = delim_p < colon_p ? delim_p : colon_p;
1257 else if (!delim_p && colon_p)
1258 end = colon_p;
1259 else {
1260 end = delim_p;
1261 if (!end) /* case: hostname:/ */
1262 end = name + namelen;
1263 }
1264
1265 if (end <= name)
1266 return -EINVAL;
1267
1268 /* do dns_resolve upcall */
1269 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
1270 if (ip_len > 0)
1271 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
1272 else
1273 ret = -ESRCH;
1274
1275 kfree(ip_addr);
1276
1277 *ipend = end;
1278
1279 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1280 ret, ret ? "failed" : ceph_pr_addr(ss));
1281
1282 return ret;
1283 }
1284 #else
1285 static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1286 struct sockaddr_storage *ss, char delim, const char **ipend)
1287 {
1288 return -EINVAL;
1289 }
1290 #endif
1291
1292 /*
1293 * Parse a server name (IP or hostname). If a valid IP address is not found
1294 * then try to extract a hostname to resolve using userspace DNS upcall.
1295 */
1296 static int ceph_parse_server_name(const char *name, size_t namelen,
1297 struct sockaddr_storage *ss, char delim, const char **ipend)
1298 {
1299 int ret;
1300
1301 ret = ceph_pton(name, namelen, ss, delim, ipend);
1302 if (ret)
1303 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
1304
1305 return ret;
1306 }
1307
1308 /*
1309 * Parse an ip[:port] list into an addr array. Use the default
1310 * monitor port if a port isn't specified.
1311 */
1312 int ceph_parse_ips(const char *c, const char *end,
1313 struct ceph_entity_addr *addr,
1314 int max_count, int *count)
1315 {
1316 int i, ret = -EINVAL;
1317 const char *p = c;
1318
1319 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1320 for (i = 0; i < max_count; i++) {
1321 const char *ipend;
1322 struct sockaddr_storage *ss = &addr[i].in_addr;
1323 int port;
1324 char delim = ',';
1325
1326 if (*p == '[') {
1327 delim = ']';
1328 p++;
1329 }
1330
1331 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
1332 if (ret)
1333 goto bad;
1334 ret = -EINVAL;
1335
1336 p = ipend;
1337
1338 if (delim == ']') {
1339 if (*p != ']') {
1340 dout("missing matching ']'\n");
1341 goto bad;
1342 }
1343 p++;
1344 }
1345
1346 /* port? */
1347 if (p < end && *p == ':') {
1348 port = 0;
1349 p++;
1350 while (p < end && *p >= '0' && *p <= '9') {
1351 port = (port * 10) + (*p - '0');
1352 p++;
1353 }
1354 if (port > 65535 || port == 0)
1355 goto bad;
1356 } else {
1357 port = CEPH_MON_PORT;
1358 }
1359
1360 addr_set_port(ss, port);
1361
1362 dout("parse_ips got %s\n", ceph_pr_addr(ss));
1363
1364 if (p == end)
1365 break;
1366 if (*p != ',')
1367 goto bad;
1368 p++;
1369 }
1370
1371 if (p != end)
1372 goto bad;
1373
1374 if (count)
1375 *count = i + 1;
1376 return 0;
1377
1378 bad:
1379 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1380 return ret;
1381 }
1382 EXPORT_SYMBOL(ceph_parse_ips);
1383
1384 static int process_banner(struct ceph_connection *con)
1385 {
1386 dout("process_banner on %p\n", con);
1387
1388 if (verify_hello(con) < 0)
1389 return -1;
1390
1391 ceph_decode_addr(&con->actual_peer_addr);
1392 ceph_decode_addr(&con->peer_addr_for_me);
1393
1394 /*
1395 * Make sure the other end is who we wanted. note that the other
1396 * end may not yet know their ip address, so if it's 0.0.0.0, give
1397 * them the benefit of the doubt.
1398 */
1399 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1400 sizeof(con->peer_addr)) != 0 &&
1401 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1402 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1403 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1404 ceph_pr_addr(&con->peer_addr.in_addr),
1405 (int)le32_to_cpu(con->peer_addr.nonce),
1406 ceph_pr_addr(&con->actual_peer_addr.in_addr),
1407 (int)le32_to_cpu(con->actual_peer_addr.nonce));
1408 con->error_msg = "wrong peer at address";
1409 return -1;
1410 }
1411
1412 /*
1413 * did we learn our address?
1414 */
1415 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1416 int port = addr_port(&con->msgr->inst.addr.in_addr);
1417
1418 memcpy(&con->msgr->inst.addr.in_addr,
1419 &con->peer_addr_for_me.in_addr,
1420 sizeof(con->peer_addr_for_me.in_addr));
1421 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1422 encode_my_addr(con->msgr);
1423 dout("process_banner learned my addr is %s\n",
1424 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
1425 }
1426
1427 set_bit(NEGOTIATING, &con->state);
1428 prepare_read_connect(con);
1429 return 0;
1430 }
1431
1432 static void fail_protocol(struct ceph_connection *con)
1433 {
1434 reset_connection(con);
1435 set_bit(CLOSED, &con->state); /* in case there's queued work */
1436 }
1437
1438 static int process_connect(struct ceph_connection *con)
1439 {
1440 u64 sup_feat = con->msgr->supported_features;
1441 u64 req_feat = con->msgr->required_features;
1442 u64 server_feat = le64_to_cpu(con->in_reply.features);
1443 int ret;
1444
1445 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1446
1447 switch (con->in_reply.tag) {
1448 case CEPH_MSGR_TAG_FEATURES:
1449 pr_err("%s%lld %s feature set mismatch,"
1450 " my %llx < server's %llx, missing %llx\n",
1451 ENTITY_NAME(con->peer_name),
1452 ceph_pr_addr(&con->peer_addr.in_addr),
1453 sup_feat, server_feat, server_feat & ~sup_feat);
1454 con->error_msg = "missing required protocol features";
1455 fail_protocol(con);
1456 return -1;
1457
1458 case CEPH_MSGR_TAG_BADPROTOVER:
1459 pr_err("%s%lld %s protocol version mismatch,"
1460 " my %d != server's %d\n",
1461 ENTITY_NAME(con->peer_name),
1462 ceph_pr_addr(&con->peer_addr.in_addr),
1463 le32_to_cpu(con->out_connect.protocol_version),
1464 le32_to_cpu(con->in_reply.protocol_version));
1465 con->error_msg = "protocol version mismatch";
1466 fail_protocol(con);
1467 return -1;
1468
1469 case CEPH_MSGR_TAG_BADAUTHORIZER:
1470 con->auth_retry++;
1471 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
1472 con->auth_retry);
1473 if (con->auth_retry == 2) {
1474 con->error_msg = "connect authorization failure";
1475 return -1;
1476 }
1477 con->auth_retry = 1;
1478 con_out_kvec_reset(con);
1479 ret = prepare_write_connect(con);
1480 if (ret < 0)
1481 return ret;
1482 prepare_read_connect(con);
1483 break;
1484
1485 case CEPH_MSGR_TAG_RESETSESSION:
1486 /*
1487 * If we connected with a large connect_seq but the peer
1488 * has no record of a session with us (no connection, or
1489 * connect_seq == 0), they will send RESETSESION to indicate
1490 * that they must have reset their session, and may have
1491 * dropped messages.
1492 */
1493 dout("process_connect got RESET peer seq %u\n",
1494 le32_to_cpu(con->in_connect.connect_seq));
1495 pr_err("%s%lld %s connection reset\n",
1496 ENTITY_NAME(con->peer_name),
1497 ceph_pr_addr(&con->peer_addr.in_addr));
1498 reset_connection(con);
1499 con_out_kvec_reset(con);
1500 ret = prepare_write_connect(con);
1501 if (ret < 0)
1502 return ret;
1503 prepare_read_connect(con);
1504
1505 /* Tell ceph about it. */
1506 mutex_unlock(&con->mutex);
1507 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
1508 if (con->ops->peer_reset)
1509 con->ops->peer_reset(con);
1510 mutex_lock(&con->mutex);
1511 if (test_bit(CLOSED, &con->state) ||
1512 test_bit(OPENING, &con->state))
1513 return -EAGAIN;
1514 break;
1515
1516 case CEPH_MSGR_TAG_RETRY_SESSION:
1517 /*
1518 * If we sent a smaller connect_seq than the peer has, try
1519 * again with a larger value.
1520 */
1521 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1522 le32_to_cpu(con->out_connect.connect_seq),
1523 le32_to_cpu(con->in_connect.connect_seq));
1524 con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
1525 con_out_kvec_reset(con);
1526 ret = prepare_write_connect(con);
1527 if (ret < 0)
1528 return ret;
1529 prepare_read_connect(con);
1530 break;
1531
1532 case CEPH_MSGR_TAG_RETRY_GLOBAL:
1533 /*
1534 * If we sent a smaller global_seq than the peer has, try
1535 * again with a larger value.
1536 */
1537 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1538 con->peer_global_seq,
1539 le32_to_cpu(con->in_connect.global_seq));
1540 get_global_seq(con->msgr,
1541 le32_to_cpu(con->in_connect.global_seq));
1542 con_out_kvec_reset(con);
1543 ret = prepare_write_connect(con);
1544 if (ret < 0)
1545 return ret;
1546 prepare_read_connect(con);
1547 break;
1548
1549 case CEPH_MSGR_TAG_READY:
1550 if (req_feat & ~server_feat) {
1551 pr_err("%s%lld %s protocol feature mismatch,"
1552 " my required %llx > server's %llx, need %llx\n",
1553 ENTITY_NAME(con->peer_name),
1554 ceph_pr_addr(&con->peer_addr.in_addr),
1555 req_feat, server_feat, req_feat & ~server_feat);
1556 con->error_msg = "missing required protocol features";
1557 fail_protocol(con);
1558 return -1;
1559 }
1560 clear_bit(CONNECTING, &con->state);
1561 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
1562 con->connect_seq++;
1563 con->peer_features = server_feat;
1564 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1565 con->peer_global_seq,
1566 le32_to_cpu(con->in_reply.connect_seq),
1567 con->connect_seq);
1568 WARN_ON(con->connect_seq !=
1569 le32_to_cpu(con->in_reply.connect_seq));
1570
1571 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
1572 set_bit(LOSSYTX, &con->flags);
1573
1574 prepare_read_tag(con);
1575 break;
1576
1577 case CEPH_MSGR_TAG_WAIT:
1578 /*
1579 * If there is a connection race (we are opening
1580 * connections to each other), one of us may just have
1581 * to WAIT. This shouldn't happen if we are the
1582 * client.
1583 */
1584 pr_err("process_connect got WAIT as client\n");
1585 con->error_msg = "protocol error, got WAIT as client";
1586 return -1;
1587
1588 default:
1589 pr_err("connect protocol error, will retry\n");
1590 con->error_msg = "protocol error, garbage tag during connect";
1591 return -1;
1592 }
1593 return 0;
1594 }
1595
1596
1597 /*
1598 * read (part of) an ack
1599 */
1600 static int read_partial_ack(struct ceph_connection *con)
1601 {
1602 int size = sizeof (con->in_temp_ack);
1603 int end = size;
1604
1605 return read_partial(con, end, size, &con->in_temp_ack);
1606 }
1607
1608
1609 /*
1610 * We can finally discard anything that's been acked.
1611 */
1612 static void process_ack(struct ceph_connection *con)
1613 {
1614 struct ceph_msg *m;
1615 u64 ack = le64_to_cpu(con->in_temp_ack);
1616 u64 seq;
1617
1618 while (!list_empty(&con->out_sent)) {
1619 m = list_first_entry(&con->out_sent, struct ceph_msg,
1620 list_head);
1621 seq = le64_to_cpu(m->hdr.seq);
1622 if (seq > ack)
1623 break;
1624 dout("got ack for seq %llu type %d at %p\n", seq,
1625 le16_to_cpu(m->hdr.type), m);
1626 m->ack_stamp = jiffies;
1627 ceph_msg_remove(m);
1628 }
1629 prepare_read_tag(con);
1630 }
1631
1632
1633
1634
1635 static int read_partial_message_section(struct ceph_connection *con,
1636 struct kvec *section,
1637 unsigned int sec_len, u32 *crc)
1638 {
1639 int ret, left;
1640
1641 BUG_ON(!section);
1642
1643 while (section->iov_len < sec_len) {
1644 BUG_ON(section->iov_base == NULL);
1645 left = sec_len - section->iov_len;
1646 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
1647 section->iov_len, left);
1648 if (ret <= 0)
1649 return ret;
1650 section->iov_len += ret;
1651 }
1652 if (section->iov_len == sec_len)
1653 *crc = crc32c(0, section->iov_base, section->iov_len);
1654
1655 return 1;
1656 }
1657
1658 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
1659 struct ceph_msg_header *hdr,
1660 int *skip);
1661
1662
1663 static int read_partial_message_pages(struct ceph_connection *con,
1664 struct page **pages,
1665 unsigned data_len, bool do_datacrc)
1666 {
1667 void *p;
1668 int ret;
1669 int left;
1670
1671 left = min((int)(data_len - con->in_msg_pos.data_pos),
1672 (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
1673 /* (page) data */
1674 BUG_ON(pages == NULL);
1675 p = kmap(pages[con->in_msg_pos.page]);
1676 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1677 left);
1678 if (ret > 0 && do_datacrc)
1679 con->in_data_crc =
1680 crc32c(con->in_data_crc,
1681 p + con->in_msg_pos.page_pos, ret);
1682 kunmap(pages[con->in_msg_pos.page]);
1683 if (ret <= 0)
1684 return ret;
1685 con->in_msg_pos.data_pos += ret;
1686 con->in_msg_pos.page_pos += ret;
1687 if (con->in_msg_pos.page_pos == PAGE_SIZE) {
1688 con->in_msg_pos.page_pos = 0;
1689 con->in_msg_pos.page++;
1690 }
1691
1692 return ret;
1693 }
1694
1695 #ifdef CONFIG_BLOCK
1696 static int read_partial_message_bio(struct ceph_connection *con,
1697 struct bio **bio_iter, int *bio_seg,
1698 unsigned data_len, bool do_datacrc)
1699 {
1700 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
1701 void *p;
1702 int ret, left;
1703
1704 if (IS_ERR(bv))
1705 return PTR_ERR(bv);
1706
1707 left = min((int)(data_len - con->in_msg_pos.data_pos),
1708 (int)(bv->bv_len - con->in_msg_pos.page_pos));
1709
1710 p = kmap(bv->bv_page) + bv->bv_offset;
1711
1712 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1713 left);
1714 if (ret > 0 && do_datacrc)
1715 con->in_data_crc =
1716 crc32c(con->in_data_crc,
1717 p + con->in_msg_pos.page_pos, ret);
1718 kunmap(bv->bv_page);
1719 if (ret <= 0)
1720 return ret;
1721 con->in_msg_pos.data_pos += ret;
1722 con->in_msg_pos.page_pos += ret;
1723 if (con->in_msg_pos.page_pos == bv->bv_len) {
1724 con->in_msg_pos.page_pos = 0;
1725 iter_bio_next(bio_iter, bio_seg);
1726 }
1727
1728 return ret;
1729 }
1730 #endif
1731
1732 /*
1733 * read (part of) a message.
1734 */
1735 static int read_partial_message(struct ceph_connection *con)
1736 {
1737 struct ceph_msg *m = con->in_msg;
1738 int size;
1739 int end;
1740 int ret;
1741 unsigned front_len, middle_len, data_len;
1742 bool do_datacrc = !con->msgr->nocrc;
1743 int skip;
1744 u64 seq;
1745 u32 crc;
1746
1747 dout("read_partial_message con %p msg %p\n", con, m);
1748
1749 /* header */
1750 size = sizeof (con->in_hdr);
1751 end = size;
1752 ret = read_partial(con, end, size, &con->in_hdr);
1753 if (ret <= 0)
1754 return ret;
1755
1756 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
1757 if (cpu_to_le32(crc) != con->in_hdr.crc) {
1758 pr_err("read_partial_message bad hdr "
1759 " crc %u != expected %u\n",
1760 crc, con->in_hdr.crc);
1761 return -EBADMSG;
1762 }
1763
1764 front_len = le32_to_cpu(con->in_hdr.front_len);
1765 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
1766 return -EIO;
1767 middle_len = le32_to_cpu(con->in_hdr.middle_len);
1768 if (middle_len > CEPH_MSG_MAX_DATA_LEN)
1769 return -EIO;
1770 data_len = le32_to_cpu(con->in_hdr.data_len);
1771 if (data_len > CEPH_MSG_MAX_DATA_LEN)
1772 return -EIO;
1773
1774 /* verify seq# */
1775 seq = le64_to_cpu(con->in_hdr.seq);
1776 if ((s64)seq - (s64)con->in_seq < 1) {
1777 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
1778 ENTITY_NAME(con->peer_name),
1779 ceph_pr_addr(&con->peer_addr.in_addr),
1780 seq, con->in_seq + 1);
1781 con->in_base_pos = -front_len - middle_len - data_len -
1782 sizeof(m->footer);
1783 con->in_tag = CEPH_MSGR_TAG_READY;
1784 return 0;
1785 } else if ((s64)seq - (s64)con->in_seq > 1) {
1786 pr_err("read_partial_message bad seq %lld expected %lld\n",
1787 seq, con->in_seq + 1);
1788 con->error_msg = "bad message sequence # for incoming message";
1789 return -EBADMSG;
1790 }
1791
1792 /* allocate message? */
1793 if (!con->in_msg) {
1794 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
1795 con->in_hdr.front_len, con->in_hdr.data_len);
1796 skip = 0;
1797 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
1798 if (skip) {
1799 /* skip this message */
1800 dout("alloc_msg said skip message\n");
1801 BUG_ON(con->in_msg);
1802 con->in_base_pos = -front_len - middle_len - data_len -
1803 sizeof(m->footer);
1804 con->in_tag = CEPH_MSGR_TAG_READY;
1805 con->in_seq++;
1806 return 0;
1807 }
1808 if (!con->in_msg) {
1809 con->error_msg =
1810 "error allocating memory for incoming message";
1811 return -ENOMEM;
1812 }
1813 m = con->in_msg;
1814 m->front.iov_len = 0; /* haven't read it yet */
1815 if (m->middle)
1816 m->middle->vec.iov_len = 0;
1817
1818 con->in_msg_pos.page = 0;
1819 if (m->pages)
1820 con->in_msg_pos.page_pos = m->page_alignment;
1821 else
1822 con->in_msg_pos.page_pos = 0;
1823 con->in_msg_pos.data_pos = 0;
1824 }
1825
1826 /* front */
1827 ret = read_partial_message_section(con, &m->front, front_len,
1828 &con->in_front_crc);
1829 if (ret <= 0)
1830 return ret;
1831
1832 /* middle */
1833 if (m->middle) {
1834 ret = read_partial_message_section(con, &m->middle->vec,
1835 middle_len,
1836 &con->in_middle_crc);
1837 if (ret <= 0)
1838 return ret;
1839 }
1840 #ifdef CONFIG_BLOCK
1841 if (m->bio && !m->bio_iter)
1842 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
1843 #endif
1844
1845 /* (page) data */
1846 while (con->in_msg_pos.data_pos < data_len) {
1847 if (m->pages) {
1848 ret = read_partial_message_pages(con, m->pages,
1849 data_len, do_datacrc);
1850 if (ret <= 0)
1851 return ret;
1852 #ifdef CONFIG_BLOCK
1853 } else if (m->bio) {
1854
1855 ret = read_partial_message_bio(con,
1856 &m->bio_iter, &m->bio_seg,
1857 data_len, do_datacrc);
1858 if (ret <= 0)
1859 return ret;
1860 #endif
1861 } else {
1862 BUG_ON(1);
1863 }
1864 }
1865
1866 /* footer */
1867 size = sizeof (m->footer);
1868 end += size;
1869 ret = read_partial(con, end, size, &m->footer);
1870 if (ret <= 0)
1871 return ret;
1872
1873 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1874 m, front_len, m->footer.front_crc, middle_len,
1875 m->footer.middle_crc, data_len, m->footer.data_crc);
1876
1877 /* crc ok? */
1878 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
1879 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1880 m, con->in_front_crc, m->footer.front_crc);
1881 return -EBADMSG;
1882 }
1883 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
1884 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1885 m, con->in_middle_crc, m->footer.middle_crc);
1886 return -EBADMSG;
1887 }
1888 if (do_datacrc &&
1889 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
1890 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
1891 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
1892 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
1893 return -EBADMSG;
1894 }
1895
1896 return 1; /* done! */
1897 }
1898
1899 /*
1900 * Process message. This happens in the worker thread. The callback should
1901 * be careful not to do anything that waits on other incoming messages or it
1902 * may deadlock.
1903 */
1904 static void process_message(struct ceph_connection *con)
1905 {
1906 struct ceph_msg *msg;
1907
1908 msg = con->in_msg;
1909 con->in_msg = NULL;
1910
1911 /* if first message, set peer_name */
1912 if (con->peer_name.type == 0)
1913 con->peer_name = msg->hdr.src;
1914
1915 con->in_seq++;
1916 mutex_unlock(&con->mutex);
1917
1918 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1919 msg, le64_to_cpu(msg->hdr.seq),
1920 ENTITY_NAME(msg->hdr.src),
1921 le16_to_cpu(msg->hdr.type),
1922 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1923 le32_to_cpu(msg->hdr.front_len),
1924 le32_to_cpu(msg->hdr.data_len),
1925 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1926 con->ops->dispatch(con, msg);
1927
1928 mutex_lock(&con->mutex);
1929 prepare_read_tag(con);
1930 }
1931
1932
1933 /*
1934 * Write something to the socket. Called in a worker thread when the
1935 * socket appears to be writeable and we have something ready to send.
1936 */
1937 static int try_write(struct ceph_connection *con)
1938 {
1939 int ret = 1;
1940
1941 dout("try_write start %p state %lu nref %d\n", con, con->state,
1942 atomic_read(&con->nref));
1943
1944 more:
1945 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
1946
1947 /* open the socket first? */
1948 if (con->sock == NULL) {
1949 clear_bit(NEGOTIATING, &con->state);
1950 set_bit(CONNECTING, &con->state);
1951
1952 con_out_kvec_reset(con);
1953 prepare_write_banner(con);
1954 ret = prepare_write_connect(con);
1955 if (ret < 0)
1956 goto out;
1957 prepare_read_banner(con);
1958
1959 BUG_ON(con->in_msg);
1960 con->in_tag = CEPH_MSGR_TAG_READY;
1961 dout("try_write initiating connect on %p new state %lu\n",
1962 con, con->state);
1963 ret = ceph_tcp_connect(con);
1964 if (ret < 0) {
1965 con->error_msg = "connect error";
1966 goto out;
1967 }
1968 }
1969
1970 more_kvec:
1971 /* kvec data queued? */
1972 if (con->out_skip) {
1973 ret = write_partial_skip(con);
1974 if (ret <= 0)
1975 goto out;
1976 }
1977 if (con->out_kvec_left) {
1978 ret = write_partial_kvec(con);
1979 if (ret <= 0)
1980 goto out;
1981 }
1982
1983 /* msg pages? */
1984 if (con->out_msg) {
1985 if (con->out_msg_done) {
1986 ceph_msg_put(con->out_msg);
1987 con->out_msg = NULL; /* we're done with this one */
1988 goto do_next;
1989 }
1990
1991 ret = write_partial_msg_pages(con);
1992 if (ret == 1)
1993 goto more_kvec; /* we need to send the footer, too! */
1994 if (ret == 0)
1995 goto out;
1996 if (ret < 0) {
1997 dout("try_write write_partial_msg_pages err %d\n",
1998 ret);
1999 goto out;
2000 }
2001 }
2002
2003 do_next:
2004 if (!test_bit(CONNECTING, &con->state)) {
2005 /* is anything else pending? */
2006 if (!list_empty(&con->out_queue)) {
2007 prepare_write_message(con);
2008 goto more;
2009 }
2010 if (con->in_seq > con->in_seq_acked) {
2011 prepare_write_ack(con);
2012 goto more;
2013 }
2014 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->flags)) {
2015 prepare_write_keepalive(con);
2016 goto more;
2017 }
2018 }
2019
2020 /* Nothing to do! */
2021 clear_bit(WRITE_PENDING, &con->flags);
2022 dout("try_write nothing else to write.\n");
2023 ret = 0;
2024 out:
2025 dout("try_write done on %p ret %d\n", con, ret);
2026 return ret;
2027 }
2028
2029
2030
2031 /*
2032 * Read what we can from the socket.
2033 */
2034 static int try_read(struct ceph_connection *con)
2035 {
2036 int ret = -1;
2037
2038 if (!con->sock)
2039 return 0;
2040
2041 if (test_bit(STANDBY, &con->state))
2042 return 0;
2043
2044 dout("try_read start on %p\n", con);
2045
2046 more:
2047 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
2048 con->in_base_pos);
2049
2050 /*
2051 * process_connect and process_message drop and re-take
2052 * con->mutex. make sure we handle a racing close or reopen.
2053 */
2054 if (test_bit(CLOSED, &con->state) ||
2055 test_bit(OPENING, &con->state)) {
2056 ret = -EAGAIN;
2057 goto out;
2058 }
2059
2060 if (test_bit(CONNECTING, &con->state)) {
2061 if (!test_bit(NEGOTIATING, &con->state)) {
2062 dout("try_read connecting\n");
2063 ret = read_partial_banner(con);
2064 if (ret <= 0)
2065 goto out;
2066 ret = process_banner(con);
2067 if (ret < 0)
2068 goto out;
2069 }
2070 ret = read_partial_connect(con);
2071 if (ret <= 0)
2072 goto out;
2073 ret = process_connect(con);
2074 if (ret < 0)
2075 goto out;
2076 goto more;
2077 }
2078
2079 if (con->in_base_pos < 0) {
2080 /*
2081 * skipping + discarding content.
2082 *
2083 * FIXME: there must be a better way to do this!
2084 */
2085 static char buf[SKIP_BUF_SIZE];
2086 int skip = min((int) sizeof (buf), -con->in_base_pos);
2087
2088 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
2089 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
2090 if (ret <= 0)
2091 goto out;
2092 con->in_base_pos += ret;
2093 if (con->in_base_pos)
2094 goto more;
2095 }
2096 if (con->in_tag == CEPH_MSGR_TAG_READY) {
2097 /*
2098 * what's next?
2099 */
2100 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
2101 if (ret <= 0)
2102 goto out;
2103 dout("try_read got tag %d\n", (int)con->in_tag);
2104 switch (con->in_tag) {
2105 case CEPH_MSGR_TAG_MSG:
2106 prepare_read_message(con);
2107 break;
2108 case CEPH_MSGR_TAG_ACK:
2109 prepare_read_ack(con);
2110 break;
2111 case CEPH_MSGR_TAG_CLOSE:
2112 set_bit(CLOSED, &con->state); /* fixme */
2113 goto out;
2114 default:
2115 goto bad_tag;
2116 }
2117 }
2118 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
2119 ret = read_partial_message(con);
2120 if (ret <= 0) {
2121 switch (ret) {
2122 case -EBADMSG:
2123 con->error_msg = "bad crc";
2124 ret = -EIO;
2125 break;
2126 case -EIO:
2127 con->error_msg = "io error";
2128 break;
2129 }
2130 goto out;
2131 }
2132 if (con->in_tag == CEPH_MSGR_TAG_READY)
2133 goto more;
2134 process_message(con);
2135 goto more;
2136 }
2137 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
2138 ret = read_partial_ack(con);
2139 if (ret <= 0)
2140 goto out;
2141 process_ack(con);
2142 goto more;
2143 }
2144
2145 out:
2146 dout("try_read done on %p ret %d\n", con, ret);
2147 return ret;
2148
2149 bad_tag:
2150 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
2151 con->error_msg = "protocol error, garbage tag";
2152 ret = -1;
2153 goto out;
2154 }
2155
2156
2157 /*
2158 * Atomically queue work on a connection. Bump @con reference to
2159 * avoid races with connection teardown.
2160 */
2161 static void queue_con(struct ceph_connection *con)
2162 {
2163 if (!con->ops->get(con)) {
2164 dout("queue_con %p ref count 0\n", con);
2165 return;
2166 }
2167
2168 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
2169 dout("queue_con %p - already queued\n", con);
2170 con->ops->put(con);
2171 } else {
2172 dout("queue_con %p\n", con);
2173 }
2174 }
2175
2176 /*
2177 * Do some work on a connection. Drop a connection ref when we're done.
2178 */
2179 static void con_work(struct work_struct *work)
2180 {
2181 struct ceph_connection *con = container_of(work, struct ceph_connection,
2182 work.work);
2183 int ret;
2184
2185 mutex_lock(&con->mutex);
2186 restart:
2187 if (test_and_clear_bit(BACKOFF, &con->flags)) {
2188 dout("con_work %p backing off\n", con);
2189 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2190 round_jiffies_relative(con->delay))) {
2191 dout("con_work %p backoff %lu\n", con, con->delay);
2192 mutex_unlock(&con->mutex);
2193 return;
2194 } else {
2195 con->ops->put(con);
2196 dout("con_work %p FAILED to back off %lu\n", con,
2197 con->delay);
2198 }
2199 }
2200
2201 if (test_bit(STANDBY, &con->state)) {
2202 dout("con_work %p STANDBY\n", con);
2203 goto done;
2204 }
2205 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
2206 dout("con_work CLOSED\n");
2207 con_close_socket(con);
2208 goto done;
2209 }
2210 if (test_and_clear_bit(OPENING, &con->state)) {
2211 /* reopen w/ new peer */
2212 dout("con_work OPENING\n");
2213 con_close_socket(con);
2214 }
2215
2216 if (test_and_clear_bit(SOCK_CLOSED, &con->flags))
2217 goto fault;
2218
2219 ret = try_read(con);
2220 if (ret == -EAGAIN)
2221 goto restart;
2222 if (ret < 0)
2223 goto fault;
2224
2225 ret = try_write(con);
2226 if (ret == -EAGAIN)
2227 goto restart;
2228 if (ret < 0)
2229 goto fault;
2230
2231 done:
2232 mutex_unlock(&con->mutex);
2233 done_unlocked:
2234 con->ops->put(con);
2235 return;
2236
2237 fault:
2238 mutex_unlock(&con->mutex);
2239 ceph_fault(con); /* error/fault path */
2240 goto done_unlocked;
2241 }
2242
2243
2244 /*
2245 * Generic error/fault handler. A retry mechanism is used with
2246 * exponential backoff
2247 */
2248 static void ceph_fault(struct ceph_connection *con)
2249 {
2250 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2251 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2252 dout("fault %p state %lu to peer %s\n",
2253 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2254
2255 if (test_bit(LOSSYTX, &con->flags)) {
2256 dout("fault on LOSSYTX channel\n");
2257 goto out;
2258 }
2259
2260 mutex_lock(&con->mutex);
2261 if (test_bit(CLOSED, &con->state))
2262 goto out_unlock;
2263
2264 con_close_socket(con);
2265
2266 if (con->in_msg) {
2267 ceph_msg_put(con->in_msg);
2268 con->in_msg = NULL;
2269 }
2270
2271 /* Requeue anything that hasn't been acked */
2272 list_splice_init(&con->out_sent, &con->out_queue);
2273
2274 /* If there are no messages queued or keepalive pending, place
2275 * the connection in a STANDBY state */
2276 if (list_empty(&con->out_queue) &&
2277 !test_bit(KEEPALIVE_PENDING, &con->flags)) {
2278 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2279 clear_bit(WRITE_PENDING, &con->flags);
2280 set_bit(STANDBY, &con->state);
2281 } else {
2282 /* retry after a delay. */
2283 if (con->delay == 0)
2284 con->delay = BASE_DELAY_INTERVAL;
2285 else if (con->delay < MAX_DELAY_INTERVAL)
2286 con->delay *= 2;
2287 con->ops->get(con);
2288 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2289 round_jiffies_relative(con->delay))) {
2290 dout("fault queued %p delay %lu\n", con, con->delay);
2291 } else {
2292 con->ops->put(con);
2293 dout("fault failed to queue %p delay %lu, backoff\n",
2294 con, con->delay);
2295 /*
2296 * In many cases we see a socket state change
2297 * while con_work is running and end up
2298 * queuing (non-delayed) work, such that we
2299 * can't backoff with a delay. Set a flag so
2300 * that when con_work restarts we schedule the
2301 * delay then.
2302 */
2303 set_bit(BACKOFF, &con->flags);
2304 }
2305 }
2306
2307 out_unlock:
2308 mutex_unlock(&con->mutex);
2309 out:
2310 /*
2311 * in case we faulted due to authentication, invalidate our
2312 * current tickets so that we can get new ones.
2313 */
2314 if (con->auth_retry && con->ops->invalidate_authorizer) {
2315 dout("calling invalidate_authorizer()\n");
2316 con->ops->invalidate_authorizer(con);
2317 }
2318
2319 if (con->ops->fault)
2320 con->ops->fault(con);
2321 }
2322
2323
2324
2325 /*
2326 * initialize a new messenger instance
2327 */
2328 void ceph_messenger_init(struct ceph_messenger *msgr,
2329 struct ceph_entity_addr *myaddr,
2330 u32 supported_features,
2331 u32 required_features,
2332 bool nocrc)
2333 {
2334 msgr->supported_features = supported_features;
2335 msgr->required_features = required_features;
2336
2337 spin_lock_init(&msgr->global_seq_lock);
2338
2339 if (myaddr)
2340 msgr->inst.addr = *myaddr;
2341
2342 /* select a random nonce */
2343 msgr->inst.addr.type = 0;
2344 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2345 encode_my_addr(msgr);
2346 msgr->nocrc = nocrc;
2347
2348 dout("%s %p\n", __func__, msgr);
2349 }
2350 EXPORT_SYMBOL(ceph_messenger_init);
2351
2352 static void clear_standby(struct ceph_connection *con)
2353 {
2354 /* come back from STANDBY? */
2355 if (test_and_clear_bit(STANDBY, &con->state)) {
2356 mutex_lock(&con->mutex);
2357 dout("clear_standby %p and ++connect_seq\n", con);
2358 con->connect_seq++;
2359 WARN_ON(test_bit(WRITE_PENDING, &con->flags));
2360 WARN_ON(test_bit(KEEPALIVE_PENDING, &con->flags));
2361 mutex_unlock(&con->mutex);
2362 }
2363 }
2364
2365 /*
2366 * Queue up an outgoing message on the given connection.
2367 */
2368 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2369 {
2370 if (test_bit(CLOSED, &con->state)) {
2371 dout("con_send %p closed, dropping %p\n", con, msg);
2372 ceph_msg_put(msg);
2373 return;
2374 }
2375
2376 /* set src+dst */
2377 msg->hdr.src = con->msgr->inst.name;
2378
2379 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
2380
2381 msg->needs_out_seq = true;
2382
2383 /* queue */
2384 mutex_lock(&con->mutex);
2385 BUG_ON(!list_empty(&msg->list_head));
2386 list_add_tail(&msg->list_head, &con->out_queue);
2387 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
2388 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
2389 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2390 le32_to_cpu(msg->hdr.front_len),
2391 le32_to_cpu(msg->hdr.middle_len),
2392 le32_to_cpu(msg->hdr.data_len));
2393 mutex_unlock(&con->mutex);
2394
2395 /* if there wasn't anything waiting to send before, queue
2396 * new work */
2397 clear_standby(con);
2398 if (test_and_set_bit(WRITE_PENDING, &con->flags) == 0)
2399 queue_con(con);
2400 }
2401 EXPORT_SYMBOL(ceph_con_send);
2402
2403 /*
2404 * Revoke a message that was previously queued for send
2405 */
2406 void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
2407 {
2408 mutex_lock(&con->mutex);
2409 if (!list_empty(&msg->list_head)) {
2410 dout("con_revoke %p msg %p - was on queue\n", con, msg);
2411 list_del_init(&msg->list_head);
2412 ceph_msg_put(msg);
2413 msg->hdr.seq = 0;
2414 }
2415 if (con->out_msg == msg) {
2416 dout("con_revoke %p msg %p - was sending\n", con, msg);
2417 con->out_msg = NULL;
2418 if (con->out_kvec_is_msg) {
2419 con->out_skip = con->out_kvec_bytes;
2420 con->out_kvec_is_msg = false;
2421 }
2422 ceph_msg_put(msg);
2423 msg->hdr.seq = 0;
2424 }
2425 mutex_unlock(&con->mutex);
2426 }
2427
2428 /*
2429 * Revoke a message that we may be reading data into
2430 */
2431 void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2432 {
2433 mutex_lock(&con->mutex);
2434 if (con->in_msg && con->in_msg == msg) {
2435 unsigned front_len = le32_to_cpu(con->in_hdr.front_len);
2436 unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len);
2437 unsigned data_len = le32_to_cpu(con->in_hdr.data_len);
2438
2439 /* skip rest of message */
2440 dout("con_revoke_pages %p msg %p revoked\n", con, msg);
2441 con->in_base_pos = con->in_base_pos -
2442 sizeof(struct ceph_msg_header) -
2443 front_len -
2444 middle_len -
2445 data_len -
2446 sizeof(struct ceph_msg_footer);
2447 ceph_msg_put(con->in_msg);
2448 con->in_msg = NULL;
2449 con->in_tag = CEPH_MSGR_TAG_READY;
2450 con->in_seq++;
2451 } else {
2452 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2453 con, con->in_msg, msg);
2454 }
2455 mutex_unlock(&con->mutex);
2456 }
2457
2458 /*
2459 * Queue a keepalive byte to ensure the tcp connection is alive.
2460 */
2461 void ceph_con_keepalive(struct ceph_connection *con)
2462 {
2463 dout("con_keepalive %p\n", con);
2464 clear_standby(con);
2465 if (test_and_set_bit(KEEPALIVE_PENDING, &con->flags) == 0 &&
2466 test_and_set_bit(WRITE_PENDING, &con->flags) == 0)
2467 queue_con(con);
2468 }
2469 EXPORT_SYMBOL(ceph_con_keepalive);
2470
2471
2472 /*
2473 * construct a new message with given type, size
2474 * the new msg has a ref count of 1.
2475 */
2476 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
2477 bool can_fail)
2478 {
2479 struct ceph_msg *m;
2480
2481 m = kmalloc(sizeof(*m), flags);
2482 if (m == NULL)
2483 goto out;
2484 kref_init(&m->kref);
2485 INIT_LIST_HEAD(&m->list_head);
2486
2487 m->hdr.tid = 0;
2488 m->hdr.type = cpu_to_le16(type);
2489 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
2490 m->hdr.version = 0;
2491 m->hdr.front_len = cpu_to_le32(front_len);
2492 m->hdr.middle_len = 0;
2493 m->hdr.data_len = 0;
2494 m->hdr.data_off = 0;
2495 m->hdr.reserved = 0;
2496 m->footer.front_crc = 0;
2497 m->footer.middle_crc = 0;
2498 m->footer.data_crc = 0;
2499 m->footer.flags = 0;
2500 m->front_max = front_len;
2501 m->front_is_vmalloc = false;
2502 m->more_to_follow = false;
2503 m->ack_stamp = 0;
2504 m->pool = NULL;
2505
2506 /* middle */
2507 m->middle = NULL;
2508
2509 /* data */
2510 m->nr_pages = 0;
2511 m->page_alignment = 0;
2512 m->pages = NULL;
2513 m->pagelist = NULL;
2514 m->bio = NULL;
2515 m->bio_iter = NULL;
2516 m->bio_seg = 0;
2517 m->trail = NULL;
2518
2519 /* front */
2520 if (front_len) {
2521 if (front_len > PAGE_CACHE_SIZE) {
2522 m->front.iov_base = __vmalloc(front_len, flags,
2523 PAGE_KERNEL);
2524 m->front_is_vmalloc = true;
2525 } else {
2526 m->front.iov_base = kmalloc(front_len, flags);
2527 }
2528 if (m->front.iov_base == NULL) {
2529 dout("ceph_msg_new can't allocate %d bytes\n",
2530 front_len);
2531 goto out2;
2532 }
2533 } else {
2534 m->front.iov_base = NULL;
2535 }
2536 m->front.iov_len = front_len;
2537
2538 dout("ceph_msg_new %p front %d\n", m, front_len);
2539 return m;
2540
2541 out2:
2542 ceph_msg_put(m);
2543 out:
2544 if (!can_fail) {
2545 pr_err("msg_new can't create type %d front %d\n", type,
2546 front_len);
2547 WARN_ON(1);
2548 } else {
2549 dout("msg_new can't create type %d front %d\n", type,
2550 front_len);
2551 }
2552 return NULL;
2553 }
2554 EXPORT_SYMBOL(ceph_msg_new);
2555
2556 /*
2557 * Allocate "middle" portion of a message, if it is needed and wasn't
2558 * allocated by alloc_msg. This allows us to read a small fixed-size
2559 * per-type header in the front and then gracefully fail (i.e.,
2560 * propagate the error to the caller based on info in the front) when
2561 * the middle is too large.
2562 */
2563 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2564 {
2565 int type = le16_to_cpu(msg->hdr.type);
2566 int middle_len = le32_to_cpu(msg->hdr.middle_len);
2567
2568 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
2569 ceph_msg_type_name(type), middle_len);
2570 BUG_ON(!middle_len);
2571 BUG_ON(msg->middle);
2572
2573 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
2574 if (!msg->middle)
2575 return -ENOMEM;
2576 return 0;
2577 }
2578
2579 /*
2580 * Generic message allocator, for incoming messages.
2581 */
2582 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2583 struct ceph_msg_header *hdr,
2584 int *skip)
2585 {
2586 int type = le16_to_cpu(hdr->type);
2587 int front_len = le32_to_cpu(hdr->front_len);
2588 int middle_len = le32_to_cpu(hdr->middle_len);
2589 struct ceph_msg *msg = NULL;
2590 int ret;
2591
2592 if (con->ops->alloc_msg) {
2593 mutex_unlock(&con->mutex);
2594 msg = con->ops->alloc_msg(con, hdr, skip);
2595 mutex_lock(&con->mutex);
2596 if (!msg || *skip)
2597 return NULL;
2598 }
2599 if (!msg) {
2600 *skip = 0;
2601 msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
2602 if (!msg) {
2603 pr_err("unable to allocate msg type %d len %d\n",
2604 type, front_len);
2605 return NULL;
2606 }
2607 msg->page_alignment = le16_to_cpu(hdr->data_off);
2608 }
2609 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2610
2611 if (middle_len && !msg->middle) {
2612 ret = ceph_alloc_middle(con, msg);
2613 if (ret < 0) {
2614 ceph_msg_put(msg);
2615 return NULL;
2616 }
2617 }
2618
2619 return msg;
2620 }
2621
2622
2623 /*
2624 * Free a generically kmalloc'd message.
2625 */
2626 void ceph_msg_kfree(struct ceph_msg *m)
2627 {
2628 dout("msg_kfree %p\n", m);
2629 if (m->front_is_vmalloc)
2630 vfree(m->front.iov_base);
2631 else
2632 kfree(m->front.iov_base);
2633 kfree(m);
2634 }
2635
2636 /*
2637 * Drop a msg ref. Destroy as needed.
2638 */
2639 void ceph_msg_last_put(struct kref *kref)
2640 {
2641 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2642
2643 dout("ceph_msg_put last one on %p\n", m);
2644 WARN_ON(!list_empty(&m->list_head));
2645
2646 /* drop middle, data, if any */
2647 if (m->middle) {
2648 ceph_buffer_put(m->middle);
2649 m->middle = NULL;
2650 }
2651 m->nr_pages = 0;
2652 m->pages = NULL;
2653
2654 if (m->pagelist) {
2655 ceph_pagelist_release(m->pagelist);
2656 kfree(m->pagelist);
2657 m->pagelist = NULL;
2658 }
2659
2660 m->trail = NULL;
2661
2662 if (m->pool)
2663 ceph_msgpool_put(m->pool, m);
2664 else
2665 ceph_msg_kfree(m);
2666 }
2667 EXPORT_SYMBOL(ceph_msg_last_put);
2668
2669 void ceph_msg_dump(struct ceph_msg *msg)
2670 {
2671 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg,
2672 msg->front_max, msg->nr_pages);
2673 print_hex_dump(KERN_DEBUG, "header: ",
2674 DUMP_PREFIX_OFFSET, 16, 1,
2675 &msg->hdr, sizeof(msg->hdr), true);
2676 print_hex_dump(KERN_DEBUG, " front: ",
2677 DUMP_PREFIX_OFFSET, 16, 1,
2678 msg->front.iov_base, msg->front.iov_len, true);
2679 if (msg->middle)
2680 print_hex_dump(KERN_DEBUG, "middle: ",
2681 DUMP_PREFIX_OFFSET, 16, 1,
2682 msg->middle->vec.iov_base,
2683 msg->middle->vec.iov_len, true);
2684 print_hex_dump(KERN_DEBUG, "footer: ",
2685 DUMP_PREFIX_OFFSET, 16, 1,
2686 &msg->footer, sizeof(msg->footer), true);
2687 }
2688 EXPORT_SYMBOL(ceph_msg_dump);