libceph: handle connection reopen race with callbacks
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ceph / messenger.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
8 #include <linux/net.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
14 #include <net/tcp.h>
15
16 #include <linux/ceph/libceph.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/pagelist.h>
20
21 /*
22 * Ceph uses the messenger to exchange ceph_msg messages with other
23 * hosts in the system. The messenger provides ordered and reliable
24 * delivery. We tolerate TCP disconnects by reconnecting (with
25 * exponential backoff) in the case of a fault (disconnection, bad
26 * crc, protocol error). Acks allow sent messages to be discarded by
27 * the sender.
28 */
29
30 /* static tag bytes (protocol control messages) */
31 static char tag_msg = CEPH_MSGR_TAG_MSG;
32 static char tag_ack = CEPH_MSGR_TAG_ACK;
33 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
34
35 #ifdef CONFIG_LOCKDEP
36 static struct lock_class_key socket_class;
37 #endif
38
39
40 static void queue_con(struct ceph_connection *con);
41 static void con_work(struct work_struct *);
42 static void ceph_fault(struct ceph_connection *con);
43
44 /*
45 * nicely render a sockaddr as a string.
46 */
47 #define MAX_ADDR_STR 20
48 #define MAX_ADDR_STR_LEN 60
49 static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN];
50 static DEFINE_SPINLOCK(addr_str_lock);
51 static int last_addr_str;
52
53 const char *ceph_pr_addr(const struct sockaddr_storage *ss)
54 {
55 int i;
56 char *s;
57 struct sockaddr_in *in4 = (void *)ss;
58 struct sockaddr_in6 *in6 = (void *)ss;
59
60 spin_lock(&addr_str_lock);
61 i = last_addr_str++;
62 if (last_addr_str == MAX_ADDR_STR)
63 last_addr_str = 0;
64 spin_unlock(&addr_str_lock);
65 s = addr_str[i];
66
67 switch (ss->ss_family) {
68 case AF_INET:
69 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%u", &in4->sin_addr,
70 (unsigned int)ntohs(in4->sin_port));
71 break;
72
73 case AF_INET6:
74 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%u", &in6->sin6_addr,
75 (unsigned int)ntohs(in6->sin6_port));
76 break;
77
78 default:
79 sprintf(s, "(unknown sockaddr family %d)", (int)ss->ss_family);
80 }
81
82 return s;
83 }
84 EXPORT_SYMBOL(ceph_pr_addr);
85
86 static void encode_my_addr(struct ceph_messenger *msgr)
87 {
88 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
89 ceph_encode_addr(&msgr->my_enc_addr);
90 }
91
92 /*
93 * work queue for all reading and writing to/from the socket.
94 */
95 struct workqueue_struct *ceph_msgr_wq;
96
97 int ceph_msgr_init(void)
98 {
99 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
100 if (!ceph_msgr_wq) {
101 pr_err("msgr_init failed to create workqueue\n");
102 return -ENOMEM;
103 }
104 return 0;
105 }
106 EXPORT_SYMBOL(ceph_msgr_init);
107
108 void ceph_msgr_exit(void)
109 {
110 destroy_workqueue(ceph_msgr_wq);
111 }
112 EXPORT_SYMBOL(ceph_msgr_exit);
113
114 void ceph_msgr_flush(void)
115 {
116 flush_workqueue(ceph_msgr_wq);
117 }
118 EXPORT_SYMBOL(ceph_msgr_flush);
119
120
121 /*
122 * socket callback functions
123 */
124
125 /* data available on socket, or listen socket received a connect */
126 static void ceph_data_ready(struct sock *sk, int count_unused)
127 {
128 struct ceph_connection *con =
129 (struct ceph_connection *)sk->sk_user_data;
130 if (sk->sk_state != TCP_CLOSE_WAIT) {
131 dout("ceph_data_ready on %p state = %lu, queueing work\n",
132 con, con->state);
133 queue_con(con);
134 }
135 }
136
137 /* socket has buffer space for writing */
138 static void ceph_write_space(struct sock *sk)
139 {
140 struct ceph_connection *con =
141 (struct ceph_connection *)sk->sk_user_data;
142
143 /* only queue to workqueue if there is data we want to write. */
144 if (test_bit(WRITE_PENDING, &con->state)) {
145 dout("ceph_write_space %p queueing write work\n", con);
146 queue_con(con);
147 } else {
148 dout("ceph_write_space %p nothing to write\n", con);
149 }
150
151 /* since we have our own write_space, clear the SOCK_NOSPACE flag */
152 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
153 }
154
155 /* socket's state has changed */
156 static void ceph_state_change(struct sock *sk)
157 {
158 struct ceph_connection *con =
159 (struct ceph_connection *)sk->sk_user_data;
160
161 dout("ceph_state_change %p state = %lu sk_state = %u\n",
162 con, con->state, sk->sk_state);
163
164 if (test_bit(CLOSED, &con->state))
165 return;
166
167 switch (sk->sk_state) {
168 case TCP_CLOSE:
169 dout("ceph_state_change TCP_CLOSE\n");
170 case TCP_CLOSE_WAIT:
171 dout("ceph_state_change TCP_CLOSE_WAIT\n");
172 if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) {
173 if (test_bit(CONNECTING, &con->state))
174 con->error_msg = "connection failed";
175 else
176 con->error_msg = "socket closed";
177 queue_con(con);
178 }
179 break;
180 case TCP_ESTABLISHED:
181 dout("ceph_state_change TCP_ESTABLISHED\n");
182 queue_con(con);
183 break;
184 }
185 }
186
187 /*
188 * set up socket callbacks
189 */
190 static void set_sock_callbacks(struct socket *sock,
191 struct ceph_connection *con)
192 {
193 struct sock *sk = sock->sk;
194 sk->sk_user_data = (void *)con;
195 sk->sk_data_ready = ceph_data_ready;
196 sk->sk_write_space = ceph_write_space;
197 sk->sk_state_change = ceph_state_change;
198 }
199
200
201 /*
202 * socket helpers
203 */
204
205 /*
206 * initiate connection to a remote socket.
207 */
208 static struct socket *ceph_tcp_connect(struct ceph_connection *con)
209 {
210 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
211 struct socket *sock;
212 int ret;
213
214 BUG_ON(con->sock);
215 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
216 IPPROTO_TCP, &sock);
217 if (ret)
218 return ERR_PTR(ret);
219 con->sock = sock;
220 sock->sk->sk_allocation = GFP_NOFS;
221
222 #ifdef CONFIG_LOCKDEP
223 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
224 #endif
225
226 set_sock_callbacks(sock, con);
227
228 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
229
230 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
231 O_NONBLOCK);
232 if (ret == -EINPROGRESS) {
233 dout("connect %s EINPROGRESS sk_state = %u\n",
234 ceph_pr_addr(&con->peer_addr.in_addr),
235 sock->sk->sk_state);
236 ret = 0;
237 }
238 if (ret < 0) {
239 pr_err("connect %s error %d\n",
240 ceph_pr_addr(&con->peer_addr.in_addr), ret);
241 sock_release(sock);
242 con->sock = NULL;
243 con->error_msg = "connect error";
244 }
245
246 if (ret < 0)
247 return ERR_PTR(ret);
248 return sock;
249 }
250
251 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
252 {
253 struct kvec iov = {buf, len};
254 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
255 int r;
256
257 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
258 if (r == -EAGAIN)
259 r = 0;
260 return r;
261 }
262
263 /*
264 * write something. @more is true if caller will be sending more data
265 * shortly.
266 */
267 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
268 size_t kvlen, size_t len, int more)
269 {
270 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
271 int r;
272
273 if (more)
274 msg.msg_flags |= MSG_MORE;
275 else
276 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
277
278 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
279 if (r == -EAGAIN)
280 r = 0;
281 return r;
282 }
283
284
285 /*
286 * Shutdown/close the socket for the given connection.
287 */
288 static int con_close_socket(struct ceph_connection *con)
289 {
290 int rc;
291
292 dout("con_close_socket on %p sock %p\n", con, con->sock);
293 if (!con->sock)
294 return 0;
295 set_bit(SOCK_CLOSED, &con->state);
296 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
297 sock_release(con->sock);
298 con->sock = NULL;
299 clear_bit(SOCK_CLOSED, &con->state);
300 return rc;
301 }
302
303 /*
304 * Reset a connection. Discard all incoming and outgoing messages
305 * and clear *_seq state.
306 */
307 static void ceph_msg_remove(struct ceph_msg *msg)
308 {
309 list_del_init(&msg->list_head);
310 ceph_msg_put(msg);
311 }
312 static void ceph_msg_remove_list(struct list_head *head)
313 {
314 while (!list_empty(head)) {
315 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
316 list_head);
317 ceph_msg_remove(msg);
318 }
319 }
320
321 static void reset_connection(struct ceph_connection *con)
322 {
323 /* reset connection, out_queue, msg_ and connect_seq */
324 /* discard existing out_queue and msg_seq */
325 ceph_msg_remove_list(&con->out_queue);
326 ceph_msg_remove_list(&con->out_sent);
327
328 if (con->in_msg) {
329 ceph_msg_put(con->in_msg);
330 con->in_msg = NULL;
331 }
332
333 con->connect_seq = 0;
334 con->out_seq = 0;
335 if (con->out_msg) {
336 ceph_msg_put(con->out_msg);
337 con->out_msg = NULL;
338 }
339 con->in_seq = 0;
340 con->in_seq_acked = 0;
341 }
342
343 /*
344 * mark a peer down. drop any open connections.
345 */
346 void ceph_con_close(struct ceph_connection *con)
347 {
348 dout("con_close %p peer %s\n", con,
349 ceph_pr_addr(&con->peer_addr.in_addr));
350 set_bit(CLOSED, &con->state); /* in case there's queued work */
351 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
352 clear_bit(LOSSYTX, &con->state); /* so we retry next connect */
353 clear_bit(KEEPALIVE_PENDING, &con->state);
354 clear_bit(WRITE_PENDING, &con->state);
355 mutex_lock(&con->mutex);
356 reset_connection(con);
357 con->peer_global_seq = 0;
358 cancel_delayed_work(&con->work);
359 mutex_unlock(&con->mutex);
360 queue_con(con);
361 }
362 EXPORT_SYMBOL(ceph_con_close);
363
364 /*
365 * Reopen a closed connection, with a new peer address.
366 */
367 void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
368 {
369 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
370 set_bit(OPENING, &con->state);
371 clear_bit(CLOSED, &con->state);
372 memcpy(&con->peer_addr, addr, sizeof(*addr));
373 con->delay = 0; /* reset backoff memory */
374 queue_con(con);
375 }
376 EXPORT_SYMBOL(ceph_con_open);
377
378 /*
379 * return true if this connection ever successfully opened
380 */
381 bool ceph_con_opened(struct ceph_connection *con)
382 {
383 return con->connect_seq > 0;
384 }
385
386 /*
387 * generic get/put
388 */
389 struct ceph_connection *ceph_con_get(struct ceph_connection *con)
390 {
391 dout("con_get %p nref = %d -> %d\n", con,
392 atomic_read(&con->nref), atomic_read(&con->nref) + 1);
393 if (atomic_inc_not_zero(&con->nref))
394 return con;
395 return NULL;
396 }
397
398 void ceph_con_put(struct ceph_connection *con)
399 {
400 dout("con_put %p nref = %d -> %d\n", con,
401 atomic_read(&con->nref), atomic_read(&con->nref) - 1);
402 BUG_ON(atomic_read(&con->nref) == 0);
403 if (atomic_dec_and_test(&con->nref)) {
404 BUG_ON(con->sock);
405 kfree(con);
406 }
407 }
408
409 /*
410 * initialize a new connection.
411 */
412 void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con)
413 {
414 dout("con_init %p\n", con);
415 memset(con, 0, sizeof(*con));
416 atomic_set(&con->nref, 1);
417 con->msgr = msgr;
418 mutex_init(&con->mutex);
419 INIT_LIST_HEAD(&con->out_queue);
420 INIT_LIST_HEAD(&con->out_sent);
421 INIT_DELAYED_WORK(&con->work, con_work);
422 }
423 EXPORT_SYMBOL(ceph_con_init);
424
425
426 /*
427 * We maintain a global counter to order connection attempts. Get
428 * a unique seq greater than @gt.
429 */
430 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
431 {
432 u32 ret;
433
434 spin_lock(&msgr->global_seq_lock);
435 if (msgr->global_seq < gt)
436 msgr->global_seq = gt;
437 ret = ++msgr->global_seq;
438 spin_unlock(&msgr->global_seq_lock);
439 return ret;
440 }
441
442
443 /*
444 * Prepare footer for currently outgoing message, and finish things
445 * off. Assumes out_kvec* are already valid.. we just add on to the end.
446 */
447 static void prepare_write_message_footer(struct ceph_connection *con, int v)
448 {
449 struct ceph_msg *m = con->out_msg;
450
451 dout("prepare_write_message_footer %p\n", con);
452 con->out_kvec_is_msg = true;
453 con->out_kvec[v].iov_base = &m->footer;
454 con->out_kvec[v].iov_len = sizeof(m->footer);
455 con->out_kvec_bytes += sizeof(m->footer);
456 con->out_kvec_left++;
457 con->out_more = m->more_to_follow;
458 con->out_msg_done = true;
459 }
460
461 /*
462 * Prepare headers for the next outgoing message.
463 */
464 static void prepare_write_message(struct ceph_connection *con)
465 {
466 struct ceph_msg *m;
467 int v = 0;
468
469 con->out_kvec_bytes = 0;
470 con->out_kvec_is_msg = true;
471 con->out_msg_done = false;
472
473 /* Sneak an ack in there first? If we can get it into the same
474 * TCP packet that's a good thing. */
475 if (con->in_seq > con->in_seq_acked) {
476 con->in_seq_acked = con->in_seq;
477 con->out_kvec[v].iov_base = &tag_ack;
478 con->out_kvec[v++].iov_len = 1;
479 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
480 con->out_kvec[v].iov_base = &con->out_temp_ack;
481 con->out_kvec[v++].iov_len = sizeof(con->out_temp_ack);
482 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
483 }
484
485 m = list_first_entry(&con->out_queue,
486 struct ceph_msg, list_head);
487 con->out_msg = m;
488 if (test_bit(LOSSYTX, &con->state)) {
489 list_del_init(&m->list_head);
490 } else {
491 /* put message on sent list */
492 ceph_msg_get(m);
493 list_move_tail(&m->list_head, &con->out_sent);
494 }
495
496 /*
497 * only assign outgoing seq # if we haven't sent this message
498 * yet. if it is requeued, resend with it's original seq.
499 */
500 if (m->needs_out_seq) {
501 m->hdr.seq = cpu_to_le64(++con->out_seq);
502 m->needs_out_seq = false;
503 }
504
505 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
506 m, con->out_seq, le16_to_cpu(m->hdr.type),
507 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
508 le32_to_cpu(m->hdr.data_len),
509 m->nr_pages);
510 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
511
512 /* tag + hdr + front + middle */
513 con->out_kvec[v].iov_base = &tag_msg;
514 con->out_kvec[v++].iov_len = 1;
515 con->out_kvec[v].iov_base = &m->hdr;
516 con->out_kvec[v++].iov_len = sizeof(m->hdr);
517 con->out_kvec[v++] = m->front;
518 if (m->middle)
519 con->out_kvec[v++] = m->middle->vec;
520 con->out_kvec_left = v;
521 con->out_kvec_bytes += 1 + sizeof(m->hdr) + m->front.iov_len +
522 (m->middle ? m->middle->vec.iov_len : 0);
523 con->out_kvec_cur = con->out_kvec;
524
525 /* fill in crc (except data pages), footer */
526 con->out_msg->hdr.crc =
527 cpu_to_le32(crc32c(0, (void *)&m->hdr,
528 sizeof(m->hdr) - sizeof(m->hdr.crc)));
529 con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE;
530 con->out_msg->footer.front_crc =
531 cpu_to_le32(crc32c(0, m->front.iov_base, m->front.iov_len));
532 if (m->middle)
533 con->out_msg->footer.middle_crc =
534 cpu_to_le32(crc32c(0, m->middle->vec.iov_base,
535 m->middle->vec.iov_len));
536 else
537 con->out_msg->footer.middle_crc = 0;
538 con->out_msg->footer.data_crc = 0;
539 dout("prepare_write_message front_crc %u data_crc %u\n",
540 le32_to_cpu(con->out_msg->footer.front_crc),
541 le32_to_cpu(con->out_msg->footer.middle_crc));
542
543 /* is there a data payload? */
544 if (le32_to_cpu(m->hdr.data_len) > 0) {
545 /* initialize page iterator */
546 con->out_msg_pos.page = 0;
547 if (m->pages)
548 con->out_msg_pos.page_pos = m->page_alignment;
549 else
550 con->out_msg_pos.page_pos = 0;
551 con->out_msg_pos.data_pos = 0;
552 con->out_msg_pos.did_page_crc = 0;
553 con->out_more = 1; /* data + footer will follow */
554 } else {
555 /* no, queue up footer too and be done */
556 prepare_write_message_footer(con, v);
557 }
558
559 set_bit(WRITE_PENDING, &con->state);
560 }
561
562 /*
563 * Prepare an ack.
564 */
565 static void prepare_write_ack(struct ceph_connection *con)
566 {
567 dout("prepare_write_ack %p %llu -> %llu\n", con,
568 con->in_seq_acked, con->in_seq);
569 con->in_seq_acked = con->in_seq;
570
571 con->out_kvec[0].iov_base = &tag_ack;
572 con->out_kvec[0].iov_len = 1;
573 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
574 con->out_kvec[1].iov_base = &con->out_temp_ack;
575 con->out_kvec[1].iov_len = sizeof(con->out_temp_ack);
576 con->out_kvec_left = 2;
577 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
578 con->out_kvec_cur = con->out_kvec;
579 con->out_more = 1; /* more will follow.. eventually.. */
580 set_bit(WRITE_PENDING, &con->state);
581 }
582
583 /*
584 * Prepare to write keepalive byte.
585 */
586 static void prepare_write_keepalive(struct ceph_connection *con)
587 {
588 dout("prepare_write_keepalive %p\n", con);
589 con->out_kvec[0].iov_base = &tag_keepalive;
590 con->out_kvec[0].iov_len = 1;
591 con->out_kvec_left = 1;
592 con->out_kvec_bytes = 1;
593 con->out_kvec_cur = con->out_kvec;
594 set_bit(WRITE_PENDING, &con->state);
595 }
596
597 /*
598 * Connection negotiation.
599 */
600
601 static int prepare_connect_authorizer(struct ceph_connection *con)
602 {
603 void *auth_buf;
604 int auth_len = 0;
605 int auth_protocol = 0;
606
607 mutex_unlock(&con->mutex);
608 if (con->ops->get_authorizer)
609 con->ops->get_authorizer(con, &auth_buf, &auth_len,
610 &auth_protocol, &con->auth_reply_buf,
611 &con->auth_reply_buf_len,
612 con->auth_retry);
613 mutex_lock(&con->mutex);
614
615 if (test_bit(CLOSED, &con->state) ||
616 test_bit(OPENING, &con->state))
617 return -EAGAIN;
618
619 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
620 con->out_connect.authorizer_len = cpu_to_le32(auth_len);
621
622 con->out_kvec[con->out_kvec_left].iov_base = auth_buf;
623 con->out_kvec[con->out_kvec_left].iov_len = auth_len;
624 con->out_kvec_left++;
625 con->out_kvec_bytes += auth_len;
626
627 return 0;
628 }
629
630 /*
631 * We connected to a peer and are saying hello.
632 */
633 static void prepare_write_banner(struct ceph_messenger *msgr,
634 struct ceph_connection *con)
635 {
636 int len = strlen(CEPH_BANNER);
637
638 con->out_kvec[0].iov_base = CEPH_BANNER;
639 con->out_kvec[0].iov_len = len;
640 con->out_kvec[1].iov_base = &msgr->my_enc_addr;
641 con->out_kvec[1].iov_len = sizeof(msgr->my_enc_addr);
642 con->out_kvec_left = 2;
643 con->out_kvec_bytes = len + sizeof(msgr->my_enc_addr);
644 con->out_kvec_cur = con->out_kvec;
645 con->out_more = 0;
646 set_bit(WRITE_PENDING, &con->state);
647 }
648
649 static int prepare_write_connect(struct ceph_messenger *msgr,
650 struct ceph_connection *con,
651 int after_banner)
652 {
653 unsigned global_seq = get_global_seq(con->msgr, 0);
654 int proto;
655
656 switch (con->peer_name.type) {
657 case CEPH_ENTITY_TYPE_MON:
658 proto = CEPH_MONC_PROTOCOL;
659 break;
660 case CEPH_ENTITY_TYPE_OSD:
661 proto = CEPH_OSDC_PROTOCOL;
662 break;
663 case CEPH_ENTITY_TYPE_MDS:
664 proto = CEPH_MDSC_PROTOCOL;
665 break;
666 default:
667 BUG();
668 }
669
670 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
671 con->connect_seq, global_seq, proto);
672
673 con->out_connect.features = cpu_to_le64(msgr->supported_features);
674 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
675 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
676 con->out_connect.global_seq = cpu_to_le32(global_seq);
677 con->out_connect.protocol_version = cpu_to_le32(proto);
678 con->out_connect.flags = 0;
679
680 if (!after_banner) {
681 con->out_kvec_left = 0;
682 con->out_kvec_bytes = 0;
683 }
684 con->out_kvec[con->out_kvec_left].iov_base = &con->out_connect;
685 con->out_kvec[con->out_kvec_left].iov_len = sizeof(con->out_connect);
686 con->out_kvec_left++;
687 con->out_kvec_bytes += sizeof(con->out_connect);
688 con->out_kvec_cur = con->out_kvec;
689 con->out_more = 0;
690 set_bit(WRITE_PENDING, &con->state);
691
692 return prepare_connect_authorizer(con);
693 }
694
695
696 /*
697 * write as much of pending kvecs to the socket as we can.
698 * 1 -> done
699 * 0 -> socket full, but more to do
700 * <0 -> error
701 */
702 static int write_partial_kvec(struct ceph_connection *con)
703 {
704 int ret;
705
706 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
707 while (con->out_kvec_bytes > 0) {
708 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
709 con->out_kvec_left, con->out_kvec_bytes,
710 con->out_more);
711 if (ret <= 0)
712 goto out;
713 con->out_kvec_bytes -= ret;
714 if (con->out_kvec_bytes == 0)
715 break; /* done */
716 while (ret > 0) {
717 if (ret >= con->out_kvec_cur->iov_len) {
718 ret -= con->out_kvec_cur->iov_len;
719 con->out_kvec_cur++;
720 con->out_kvec_left--;
721 } else {
722 con->out_kvec_cur->iov_len -= ret;
723 con->out_kvec_cur->iov_base += ret;
724 ret = 0;
725 break;
726 }
727 }
728 }
729 con->out_kvec_left = 0;
730 con->out_kvec_is_msg = false;
731 ret = 1;
732 out:
733 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
734 con->out_kvec_bytes, con->out_kvec_left, ret);
735 return ret; /* done! */
736 }
737
738 #ifdef CONFIG_BLOCK
739 static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
740 {
741 if (!bio) {
742 *iter = NULL;
743 *seg = 0;
744 return;
745 }
746 *iter = bio;
747 *seg = bio->bi_idx;
748 }
749
750 static void iter_bio_next(struct bio **bio_iter, int *seg)
751 {
752 if (*bio_iter == NULL)
753 return;
754
755 BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
756
757 (*seg)++;
758 if (*seg == (*bio_iter)->bi_vcnt)
759 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
760 }
761 #endif
762
763 /*
764 * Write as much message data payload as we can. If we finish, queue
765 * up the footer.
766 * 1 -> done, footer is now queued in out_kvec[].
767 * 0 -> socket full, but more to do
768 * <0 -> error
769 */
770 static int write_partial_msg_pages(struct ceph_connection *con)
771 {
772 struct ceph_msg *msg = con->out_msg;
773 unsigned data_len = le32_to_cpu(msg->hdr.data_len);
774 size_t len;
775 int crc = con->msgr->nocrc;
776 int ret;
777 int total_max_write;
778 int in_trail = 0;
779 size_t trail_len = (msg->trail ? msg->trail->length : 0);
780
781 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
782 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
783 con->out_msg_pos.page_pos);
784
785 #ifdef CONFIG_BLOCK
786 if (msg->bio && !msg->bio_iter)
787 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
788 #endif
789
790 while (data_len > con->out_msg_pos.data_pos) {
791 struct page *page = NULL;
792 void *kaddr = NULL;
793 int max_write = PAGE_SIZE;
794 int page_shift = 0;
795
796 total_max_write = data_len - trail_len -
797 con->out_msg_pos.data_pos;
798
799 /*
800 * if we are calculating the data crc (the default), we need
801 * to map the page. if our pages[] has been revoked, use the
802 * zero page.
803 */
804
805 /* have we reached the trail part of the data? */
806 if (con->out_msg_pos.data_pos >= data_len - trail_len) {
807 in_trail = 1;
808
809 total_max_write = data_len - con->out_msg_pos.data_pos;
810
811 page = list_first_entry(&msg->trail->head,
812 struct page, lru);
813 if (crc)
814 kaddr = kmap(page);
815 max_write = PAGE_SIZE;
816 } else if (msg->pages) {
817 page = msg->pages[con->out_msg_pos.page];
818 if (crc)
819 kaddr = kmap(page);
820 } else if (msg->pagelist) {
821 page = list_first_entry(&msg->pagelist->head,
822 struct page, lru);
823 if (crc)
824 kaddr = kmap(page);
825 #ifdef CONFIG_BLOCK
826 } else if (msg->bio) {
827 struct bio_vec *bv;
828
829 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg);
830 page = bv->bv_page;
831 page_shift = bv->bv_offset;
832 if (crc)
833 kaddr = kmap(page) + page_shift;
834 max_write = bv->bv_len;
835 #endif
836 } else {
837 page = con->msgr->zero_page;
838 if (crc)
839 kaddr = page_address(con->msgr->zero_page);
840 }
841 len = min_t(int, max_write - con->out_msg_pos.page_pos,
842 total_max_write);
843
844 if (crc && !con->out_msg_pos.did_page_crc) {
845 void *base = kaddr + con->out_msg_pos.page_pos;
846 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
847
848 BUG_ON(kaddr == NULL);
849 con->out_msg->footer.data_crc =
850 cpu_to_le32(crc32c(tmpcrc, base, len));
851 con->out_msg_pos.did_page_crc = 1;
852 }
853 ret = kernel_sendpage(con->sock, page,
854 con->out_msg_pos.page_pos + page_shift,
855 len,
856 MSG_DONTWAIT | MSG_NOSIGNAL |
857 MSG_MORE);
858
859 if (crc &&
860 (msg->pages || msg->pagelist || msg->bio || in_trail))
861 kunmap(page);
862
863 if (ret == -EAGAIN)
864 ret = 0;
865 if (ret <= 0)
866 goto out;
867
868 con->out_msg_pos.data_pos += ret;
869 con->out_msg_pos.page_pos += ret;
870 if (ret == len) {
871 con->out_msg_pos.page_pos = 0;
872 con->out_msg_pos.page++;
873 con->out_msg_pos.did_page_crc = 0;
874 if (in_trail)
875 list_move_tail(&page->lru,
876 &msg->trail->head);
877 else if (msg->pagelist)
878 list_move_tail(&page->lru,
879 &msg->pagelist->head);
880 #ifdef CONFIG_BLOCK
881 else if (msg->bio)
882 iter_bio_next(&msg->bio_iter, &msg->bio_seg);
883 #endif
884 }
885 }
886
887 dout("write_partial_msg_pages %p msg %p done\n", con, msg);
888
889 /* prepare and queue up footer, too */
890 if (!crc)
891 con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
892 con->out_kvec_bytes = 0;
893 con->out_kvec_left = 0;
894 con->out_kvec_cur = con->out_kvec;
895 prepare_write_message_footer(con, 0);
896 ret = 1;
897 out:
898 return ret;
899 }
900
901 /*
902 * write some zeros
903 */
904 static int write_partial_skip(struct ceph_connection *con)
905 {
906 int ret;
907
908 while (con->out_skip > 0) {
909 struct kvec iov = {
910 .iov_base = page_address(con->msgr->zero_page),
911 .iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE)
912 };
913
914 ret = ceph_tcp_sendmsg(con->sock, &iov, 1, iov.iov_len, 1);
915 if (ret <= 0)
916 goto out;
917 con->out_skip -= ret;
918 }
919 ret = 1;
920 out:
921 return ret;
922 }
923
924 /*
925 * Prepare to read connection handshake, or an ack.
926 */
927 static void prepare_read_banner(struct ceph_connection *con)
928 {
929 dout("prepare_read_banner %p\n", con);
930 con->in_base_pos = 0;
931 }
932
933 static void prepare_read_connect(struct ceph_connection *con)
934 {
935 dout("prepare_read_connect %p\n", con);
936 con->in_base_pos = 0;
937 }
938
939 static void prepare_read_ack(struct ceph_connection *con)
940 {
941 dout("prepare_read_ack %p\n", con);
942 con->in_base_pos = 0;
943 }
944
945 static void prepare_read_tag(struct ceph_connection *con)
946 {
947 dout("prepare_read_tag %p\n", con);
948 con->in_base_pos = 0;
949 con->in_tag = CEPH_MSGR_TAG_READY;
950 }
951
952 /*
953 * Prepare to read a message.
954 */
955 static int prepare_read_message(struct ceph_connection *con)
956 {
957 dout("prepare_read_message %p\n", con);
958 BUG_ON(con->in_msg != NULL);
959 con->in_base_pos = 0;
960 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
961 return 0;
962 }
963
964
965 static int read_partial(struct ceph_connection *con,
966 int *to, int size, void *object)
967 {
968 *to += size;
969 while (con->in_base_pos < *to) {
970 int left = *to - con->in_base_pos;
971 int have = size - left;
972 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
973 if (ret <= 0)
974 return ret;
975 con->in_base_pos += ret;
976 }
977 return 1;
978 }
979
980
981 /*
982 * Read all or part of the connect-side handshake on a new connection
983 */
984 static int read_partial_banner(struct ceph_connection *con)
985 {
986 int ret, to = 0;
987
988 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
989
990 /* peer's banner */
991 ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
992 if (ret <= 0)
993 goto out;
994 ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
995 &con->actual_peer_addr);
996 if (ret <= 0)
997 goto out;
998 ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
999 &con->peer_addr_for_me);
1000 if (ret <= 0)
1001 goto out;
1002 out:
1003 return ret;
1004 }
1005
1006 static int read_partial_connect(struct ceph_connection *con)
1007 {
1008 int ret, to = 0;
1009
1010 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1011
1012 ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
1013 if (ret <= 0)
1014 goto out;
1015 ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
1016 con->auth_reply_buf);
1017 if (ret <= 0)
1018 goto out;
1019
1020 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1021 con, (int)con->in_reply.tag,
1022 le32_to_cpu(con->in_reply.connect_seq),
1023 le32_to_cpu(con->in_reply.global_seq));
1024 out:
1025 return ret;
1026
1027 }
1028
1029 /*
1030 * Verify the hello banner looks okay.
1031 */
1032 static int verify_hello(struct ceph_connection *con)
1033 {
1034 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1035 pr_err("connect to %s got bad banner\n",
1036 ceph_pr_addr(&con->peer_addr.in_addr));
1037 con->error_msg = "protocol error, bad banner";
1038 return -1;
1039 }
1040 return 0;
1041 }
1042
1043 static bool addr_is_blank(struct sockaddr_storage *ss)
1044 {
1045 switch (ss->ss_family) {
1046 case AF_INET:
1047 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
1048 case AF_INET6:
1049 return
1050 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
1051 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
1052 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1053 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1054 }
1055 return false;
1056 }
1057
1058 static int addr_port(struct sockaddr_storage *ss)
1059 {
1060 switch (ss->ss_family) {
1061 case AF_INET:
1062 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1063 case AF_INET6:
1064 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1065 }
1066 return 0;
1067 }
1068
1069 static void addr_set_port(struct sockaddr_storage *ss, int p)
1070 {
1071 switch (ss->ss_family) {
1072 case AF_INET:
1073 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1074 case AF_INET6:
1075 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1076 }
1077 }
1078
1079 /*
1080 * Parse an ip[:port] list into an addr array. Use the default
1081 * monitor port if a port isn't specified.
1082 */
1083 int ceph_parse_ips(const char *c, const char *end,
1084 struct ceph_entity_addr *addr,
1085 int max_count, int *count)
1086 {
1087 int i;
1088 const char *p = c;
1089
1090 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1091 for (i = 0; i < max_count; i++) {
1092 const char *ipend;
1093 struct sockaddr_storage *ss = &addr[i].in_addr;
1094 struct sockaddr_in *in4 = (void *)ss;
1095 struct sockaddr_in6 *in6 = (void *)ss;
1096 int port;
1097 char delim = ',';
1098
1099 if (*p == '[') {
1100 delim = ']';
1101 p++;
1102 }
1103
1104 memset(ss, 0, sizeof(*ss));
1105 if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr,
1106 delim, &ipend))
1107 ss->ss_family = AF_INET;
1108 else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
1109 delim, &ipend))
1110 ss->ss_family = AF_INET6;
1111 else
1112 goto bad;
1113 p = ipend;
1114
1115 if (delim == ']') {
1116 if (*p != ']') {
1117 dout("missing matching ']'\n");
1118 goto bad;
1119 }
1120 p++;
1121 }
1122
1123 /* port? */
1124 if (p < end && *p == ':') {
1125 port = 0;
1126 p++;
1127 while (p < end && *p >= '0' && *p <= '9') {
1128 port = (port * 10) + (*p - '0');
1129 p++;
1130 }
1131 if (port > 65535 || port == 0)
1132 goto bad;
1133 } else {
1134 port = CEPH_MON_PORT;
1135 }
1136
1137 addr_set_port(ss, port);
1138
1139 dout("parse_ips got %s\n", ceph_pr_addr(ss));
1140
1141 if (p == end)
1142 break;
1143 if (*p != ',')
1144 goto bad;
1145 p++;
1146 }
1147
1148 if (p != end)
1149 goto bad;
1150
1151 if (count)
1152 *count = i + 1;
1153 return 0;
1154
1155 bad:
1156 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1157 return -EINVAL;
1158 }
1159 EXPORT_SYMBOL(ceph_parse_ips);
1160
1161 static int process_banner(struct ceph_connection *con)
1162 {
1163 dout("process_banner on %p\n", con);
1164
1165 if (verify_hello(con) < 0)
1166 return -1;
1167
1168 ceph_decode_addr(&con->actual_peer_addr);
1169 ceph_decode_addr(&con->peer_addr_for_me);
1170
1171 /*
1172 * Make sure the other end is who we wanted. note that the other
1173 * end may not yet know their ip address, so if it's 0.0.0.0, give
1174 * them the benefit of the doubt.
1175 */
1176 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1177 sizeof(con->peer_addr)) != 0 &&
1178 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1179 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1180 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1181 ceph_pr_addr(&con->peer_addr.in_addr),
1182 (int)le32_to_cpu(con->peer_addr.nonce),
1183 ceph_pr_addr(&con->actual_peer_addr.in_addr),
1184 (int)le32_to_cpu(con->actual_peer_addr.nonce));
1185 con->error_msg = "wrong peer at address";
1186 return -1;
1187 }
1188
1189 /*
1190 * did we learn our address?
1191 */
1192 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1193 int port = addr_port(&con->msgr->inst.addr.in_addr);
1194
1195 memcpy(&con->msgr->inst.addr.in_addr,
1196 &con->peer_addr_for_me.in_addr,
1197 sizeof(con->peer_addr_for_me.in_addr));
1198 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1199 encode_my_addr(con->msgr);
1200 dout("process_banner learned my addr is %s\n",
1201 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
1202 }
1203
1204 set_bit(NEGOTIATING, &con->state);
1205 prepare_read_connect(con);
1206 return 0;
1207 }
1208
1209 static void fail_protocol(struct ceph_connection *con)
1210 {
1211 reset_connection(con);
1212 set_bit(CLOSED, &con->state); /* in case there's queued work */
1213
1214 mutex_unlock(&con->mutex);
1215 if (con->ops->bad_proto)
1216 con->ops->bad_proto(con);
1217 mutex_lock(&con->mutex);
1218 }
1219
1220 static int process_connect(struct ceph_connection *con)
1221 {
1222 u64 sup_feat = con->msgr->supported_features;
1223 u64 req_feat = con->msgr->required_features;
1224 u64 server_feat = le64_to_cpu(con->in_reply.features);
1225 int ret;
1226
1227 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1228
1229 switch (con->in_reply.tag) {
1230 case CEPH_MSGR_TAG_FEATURES:
1231 pr_err("%s%lld %s feature set mismatch,"
1232 " my %llx < server's %llx, missing %llx\n",
1233 ENTITY_NAME(con->peer_name),
1234 ceph_pr_addr(&con->peer_addr.in_addr),
1235 sup_feat, server_feat, server_feat & ~sup_feat);
1236 con->error_msg = "missing required protocol features";
1237 fail_protocol(con);
1238 return -1;
1239
1240 case CEPH_MSGR_TAG_BADPROTOVER:
1241 pr_err("%s%lld %s protocol version mismatch,"
1242 " my %d != server's %d\n",
1243 ENTITY_NAME(con->peer_name),
1244 ceph_pr_addr(&con->peer_addr.in_addr),
1245 le32_to_cpu(con->out_connect.protocol_version),
1246 le32_to_cpu(con->in_reply.protocol_version));
1247 con->error_msg = "protocol version mismatch";
1248 fail_protocol(con);
1249 return -1;
1250
1251 case CEPH_MSGR_TAG_BADAUTHORIZER:
1252 con->auth_retry++;
1253 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
1254 con->auth_retry);
1255 if (con->auth_retry == 2) {
1256 con->error_msg = "connect authorization failure";
1257 return -1;
1258 }
1259 con->auth_retry = 1;
1260 ret = prepare_write_connect(con->msgr, con, 0);
1261 if (ret < 0)
1262 return ret;
1263 prepare_read_connect(con);
1264 break;
1265
1266 case CEPH_MSGR_TAG_RESETSESSION:
1267 /*
1268 * If we connected with a large connect_seq but the peer
1269 * has no record of a session with us (no connection, or
1270 * connect_seq == 0), they will send RESETSESION to indicate
1271 * that they must have reset their session, and may have
1272 * dropped messages.
1273 */
1274 dout("process_connect got RESET peer seq %u\n",
1275 le32_to_cpu(con->in_connect.connect_seq));
1276 pr_err("%s%lld %s connection reset\n",
1277 ENTITY_NAME(con->peer_name),
1278 ceph_pr_addr(&con->peer_addr.in_addr));
1279 reset_connection(con);
1280 prepare_write_connect(con->msgr, con, 0);
1281 prepare_read_connect(con);
1282
1283 /* Tell ceph about it. */
1284 mutex_unlock(&con->mutex);
1285 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
1286 if (con->ops->peer_reset)
1287 con->ops->peer_reset(con);
1288 mutex_lock(&con->mutex);
1289 if (test_bit(CLOSED, &con->state) ||
1290 test_bit(OPENING, &con->state))
1291 return -EAGAIN;
1292 break;
1293
1294 case CEPH_MSGR_TAG_RETRY_SESSION:
1295 /*
1296 * If we sent a smaller connect_seq than the peer has, try
1297 * again with a larger value.
1298 */
1299 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1300 le32_to_cpu(con->out_connect.connect_seq),
1301 le32_to_cpu(con->in_connect.connect_seq));
1302 con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
1303 prepare_write_connect(con->msgr, con, 0);
1304 prepare_read_connect(con);
1305 break;
1306
1307 case CEPH_MSGR_TAG_RETRY_GLOBAL:
1308 /*
1309 * If we sent a smaller global_seq than the peer has, try
1310 * again with a larger value.
1311 */
1312 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1313 con->peer_global_seq,
1314 le32_to_cpu(con->in_connect.global_seq));
1315 get_global_seq(con->msgr,
1316 le32_to_cpu(con->in_connect.global_seq));
1317 prepare_write_connect(con->msgr, con, 0);
1318 prepare_read_connect(con);
1319 break;
1320
1321 case CEPH_MSGR_TAG_READY:
1322 if (req_feat & ~server_feat) {
1323 pr_err("%s%lld %s protocol feature mismatch,"
1324 " my required %llx > server's %llx, need %llx\n",
1325 ENTITY_NAME(con->peer_name),
1326 ceph_pr_addr(&con->peer_addr.in_addr),
1327 req_feat, server_feat, req_feat & ~server_feat);
1328 con->error_msg = "missing required protocol features";
1329 fail_protocol(con);
1330 return -1;
1331 }
1332 clear_bit(CONNECTING, &con->state);
1333 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
1334 con->connect_seq++;
1335 con->peer_features = server_feat;
1336 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1337 con->peer_global_seq,
1338 le32_to_cpu(con->in_reply.connect_seq),
1339 con->connect_seq);
1340 WARN_ON(con->connect_seq !=
1341 le32_to_cpu(con->in_reply.connect_seq));
1342
1343 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
1344 set_bit(LOSSYTX, &con->state);
1345
1346 prepare_read_tag(con);
1347 break;
1348
1349 case CEPH_MSGR_TAG_WAIT:
1350 /*
1351 * If there is a connection race (we are opening
1352 * connections to each other), one of us may just have
1353 * to WAIT. This shouldn't happen if we are the
1354 * client.
1355 */
1356 pr_err("process_connect peer connecting WAIT\n");
1357
1358 default:
1359 pr_err("connect protocol error, will retry\n");
1360 con->error_msg = "protocol error, garbage tag during connect";
1361 return -1;
1362 }
1363 return 0;
1364 }
1365
1366
1367 /*
1368 * read (part of) an ack
1369 */
1370 static int read_partial_ack(struct ceph_connection *con)
1371 {
1372 int to = 0;
1373
1374 return read_partial(con, &to, sizeof(con->in_temp_ack),
1375 &con->in_temp_ack);
1376 }
1377
1378
1379 /*
1380 * We can finally discard anything that's been acked.
1381 */
1382 static void process_ack(struct ceph_connection *con)
1383 {
1384 struct ceph_msg *m;
1385 u64 ack = le64_to_cpu(con->in_temp_ack);
1386 u64 seq;
1387
1388 while (!list_empty(&con->out_sent)) {
1389 m = list_first_entry(&con->out_sent, struct ceph_msg,
1390 list_head);
1391 seq = le64_to_cpu(m->hdr.seq);
1392 if (seq > ack)
1393 break;
1394 dout("got ack for seq %llu type %d at %p\n", seq,
1395 le16_to_cpu(m->hdr.type), m);
1396 ceph_msg_remove(m);
1397 }
1398 prepare_read_tag(con);
1399 }
1400
1401
1402
1403
1404 static int read_partial_message_section(struct ceph_connection *con,
1405 struct kvec *section,
1406 unsigned int sec_len, u32 *crc)
1407 {
1408 int ret, left;
1409
1410 BUG_ON(!section);
1411
1412 while (section->iov_len < sec_len) {
1413 BUG_ON(section->iov_base == NULL);
1414 left = sec_len - section->iov_len;
1415 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
1416 section->iov_len, left);
1417 if (ret <= 0)
1418 return ret;
1419 section->iov_len += ret;
1420 if (section->iov_len == sec_len)
1421 *crc = crc32c(0, section->iov_base,
1422 section->iov_len);
1423 }
1424
1425 return 1;
1426 }
1427
1428 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
1429 struct ceph_msg_header *hdr,
1430 int *skip);
1431
1432
1433 static int read_partial_message_pages(struct ceph_connection *con,
1434 struct page **pages,
1435 unsigned data_len, int datacrc)
1436 {
1437 void *p;
1438 int ret;
1439 int left;
1440
1441 left = min((int)(data_len - con->in_msg_pos.data_pos),
1442 (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
1443 /* (page) data */
1444 BUG_ON(pages == NULL);
1445 p = kmap(pages[con->in_msg_pos.page]);
1446 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1447 left);
1448 if (ret > 0 && datacrc)
1449 con->in_data_crc =
1450 crc32c(con->in_data_crc,
1451 p + con->in_msg_pos.page_pos, ret);
1452 kunmap(pages[con->in_msg_pos.page]);
1453 if (ret <= 0)
1454 return ret;
1455 con->in_msg_pos.data_pos += ret;
1456 con->in_msg_pos.page_pos += ret;
1457 if (con->in_msg_pos.page_pos == PAGE_SIZE) {
1458 con->in_msg_pos.page_pos = 0;
1459 con->in_msg_pos.page++;
1460 }
1461
1462 return ret;
1463 }
1464
1465 #ifdef CONFIG_BLOCK
1466 static int read_partial_message_bio(struct ceph_connection *con,
1467 struct bio **bio_iter, int *bio_seg,
1468 unsigned data_len, int datacrc)
1469 {
1470 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
1471 void *p;
1472 int ret, left;
1473
1474 if (IS_ERR(bv))
1475 return PTR_ERR(bv);
1476
1477 left = min((int)(data_len - con->in_msg_pos.data_pos),
1478 (int)(bv->bv_len - con->in_msg_pos.page_pos));
1479
1480 p = kmap(bv->bv_page) + bv->bv_offset;
1481
1482 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1483 left);
1484 if (ret > 0 && datacrc)
1485 con->in_data_crc =
1486 crc32c(con->in_data_crc,
1487 p + con->in_msg_pos.page_pos, ret);
1488 kunmap(bv->bv_page);
1489 if (ret <= 0)
1490 return ret;
1491 con->in_msg_pos.data_pos += ret;
1492 con->in_msg_pos.page_pos += ret;
1493 if (con->in_msg_pos.page_pos == bv->bv_len) {
1494 con->in_msg_pos.page_pos = 0;
1495 iter_bio_next(bio_iter, bio_seg);
1496 }
1497
1498 return ret;
1499 }
1500 #endif
1501
1502 /*
1503 * read (part of) a message.
1504 */
1505 static int read_partial_message(struct ceph_connection *con)
1506 {
1507 struct ceph_msg *m = con->in_msg;
1508 int ret;
1509 int to, left;
1510 unsigned front_len, middle_len, data_len;
1511 int datacrc = con->msgr->nocrc;
1512 int skip;
1513 u64 seq;
1514
1515 dout("read_partial_message con %p msg %p\n", con, m);
1516
1517 /* header */
1518 while (con->in_base_pos < sizeof(con->in_hdr)) {
1519 left = sizeof(con->in_hdr) - con->in_base_pos;
1520 ret = ceph_tcp_recvmsg(con->sock,
1521 (char *)&con->in_hdr + con->in_base_pos,
1522 left);
1523 if (ret <= 0)
1524 return ret;
1525 con->in_base_pos += ret;
1526 if (con->in_base_pos == sizeof(con->in_hdr)) {
1527 u32 crc = crc32c(0, (void *)&con->in_hdr,
1528 sizeof(con->in_hdr) - sizeof(con->in_hdr.crc));
1529 if (crc != le32_to_cpu(con->in_hdr.crc)) {
1530 pr_err("read_partial_message bad hdr "
1531 " crc %u != expected %u\n",
1532 crc, con->in_hdr.crc);
1533 return -EBADMSG;
1534 }
1535 }
1536 }
1537 front_len = le32_to_cpu(con->in_hdr.front_len);
1538 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
1539 return -EIO;
1540 middle_len = le32_to_cpu(con->in_hdr.middle_len);
1541 if (middle_len > CEPH_MSG_MAX_DATA_LEN)
1542 return -EIO;
1543 data_len = le32_to_cpu(con->in_hdr.data_len);
1544 if (data_len > CEPH_MSG_MAX_DATA_LEN)
1545 return -EIO;
1546
1547 /* verify seq# */
1548 seq = le64_to_cpu(con->in_hdr.seq);
1549 if ((s64)seq - (s64)con->in_seq < 1) {
1550 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
1551 ENTITY_NAME(con->peer_name),
1552 ceph_pr_addr(&con->peer_addr.in_addr),
1553 seq, con->in_seq + 1);
1554 con->in_base_pos = -front_len - middle_len - data_len -
1555 sizeof(m->footer);
1556 con->in_tag = CEPH_MSGR_TAG_READY;
1557 return 0;
1558 } else if ((s64)seq - (s64)con->in_seq > 1) {
1559 pr_err("read_partial_message bad seq %lld expected %lld\n",
1560 seq, con->in_seq + 1);
1561 con->error_msg = "bad message sequence # for incoming message";
1562 return -EBADMSG;
1563 }
1564
1565 /* allocate message? */
1566 if (!con->in_msg) {
1567 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
1568 con->in_hdr.front_len, con->in_hdr.data_len);
1569 skip = 0;
1570 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
1571 if (skip) {
1572 /* skip this message */
1573 dout("alloc_msg said skip message\n");
1574 BUG_ON(con->in_msg);
1575 con->in_base_pos = -front_len - middle_len - data_len -
1576 sizeof(m->footer);
1577 con->in_tag = CEPH_MSGR_TAG_READY;
1578 con->in_seq++;
1579 return 0;
1580 }
1581 if (!con->in_msg) {
1582 con->error_msg =
1583 "error allocating memory for incoming message";
1584 return -ENOMEM;
1585 }
1586 m = con->in_msg;
1587 m->front.iov_len = 0; /* haven't read it yet */
1588 if (m->middle)
1589 m->middle->vec.iov_len = 0;
1590
1591 con->in_msg_pos.page = 0;
1592 if (m->pages)
1593 con->in_msg_pos.page_pos = m->page_alignment;
1594 else
1595 con->in_msg_pos.page_pos = 0;
1596 con->in_msg_pos.data_pos = 0;
1597 }
1598
1599 /* front */
1600 ret = read_partial_message_section(con, &m->front, front_len,
1601 &con->in_front_crc);
1602 if (ret <= 0)
1603 return ret;
1604
1605 /* middle */
1606 if (m->middle) {
1607 ret = read_partial_message_section(con, &m->middle->vec,
1608 middle_len,
1609 &con->in_middle_crc);
1610 if (ret <= 0)
1611 return ret;
1612 }
1613 #ifdef CONFIG_BLOCK
1614 if (m->bio && !m->bio_iter)
1615 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
1616 #endif
1617
1618 /* (page) data */
1619 while (con->in_msg_pos.data_pos < data_len) {
1620 if (m->pages) {
1621 ret = read_partial_message_pages(con, m->pages,
1622 data_len, datacrc);
1623 if (ret <= 0)
1624 return ret;
1625 #ifdef CONFIG_BLOCK
1626 } else if (m->bio) {
1627
1628 ret = read_partial_message_bio(con,
1629 &m->bio_iter, &m->bio_seg,
1630 data_len, datacrc);
1631 if (ret <= 0)
1632 return ret;
1633 #endif
1634 } else {
1635 BUG_ON(1);
1636 }
1637 }
1638
1639 /* footer */
1640 to = sizeof(m->hdr) + sizeof(m->footer);
1641 while (con->in_base_pos < to) {
1642 left = to - con->in_base_pos;
1643 ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
1644 (con->in_base_pos - sizeof(m->hdr)),
1645 left);
1646 if (ret <= 0)
1647 return ret;
1648 con->in_base_pos += ret;
1649 }
1650 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1651 m, front_len, m->footer.front_crc, middle_len,
1652 m->footer.middle_crc, data_len, m->footer.data_crc);
1653
1654 /* crc ok? */
1655 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
1656 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1657 m, con->in_front_crc, m->footer.front_crc);
1658 return -EBADMSG;
1659 }
1660 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
1661 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1662 m, con->in_middle_crc, m->footer.middle_crc);
1663 return -EBADMSG;
1664 }
1665 if (datacrc &&
1666 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
1667 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
1668 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
1669 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
1670 return -EBADMSG;
1671 }
1672
1673 return 1; /* done! */
1674 }
1675
1676 /*
1677 * Process message. This happens in the worker thread. The callback should
1678 * be careful not to do anything that waits on other incoming messages or it
1679 * may deadlock.
1680 */
1681 static void process_message(struct ceph_connection *con)
1682 {
1683 struct ceph_msg *msg;
1684
1685 msg = con->in_msg;
1686 con->in_msg = NULL;
1687
1688 /* if first message, set peer_name */
1689 if (con->peer_name.type == 0)
1690 con->peer_name = msg->hdr.src;
1691
1692 con->in_seq++;
1693 mutex_unlock(&con->mutex);
1694
1695 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1696 msg, le64_to_cpu(msg->hdr.seq),
1697 ENTITY_NAME(msg->hdr.src),
1698 le16_to_cpu(msg->hdr.type),
1699 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1700 le32_to_cpu(msg->hdr.front_len),
1701 le32_to_cpu(msg->hdr.data_len),
1702 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1703 con->ops->dispatch(con, msg);
1704
1705 mutex_lock(&con->mutex);
1706 prepare_read_tag(con);
1707 }
1708
1709
1710 /*
1711 * Write something to the socket. Called in a worker thread when the
1712 * socket appears to be writeable and we have something ready to send.
1713 */
1714 static int try_write(struct ceph_connection *con)
1715 {
1716 struct ceph_messenger *msgr = con->msgr;
1717 int ret = 1;
1718
1719 dout("try_write start %p state %lu nref %d\n", con, con->state,
1720 atomic_read(&con->nref));
1721
1722 more:
1723 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
1724
1725 /* open the socket first? */
1726 if (con->sock == NULL) {
1727 prepare_write_banner(msgr, con);
1728 prepare_write_connect(msgr, con, 1);
1729 prepare_read_banner(con);
1730 set_bit(CONNECTING, &con->state);
1731 clear_bit(NEGOTIATING, &con->state);
1732
1733 BUG_ON(con->in_msg);
1734 con->in_tag = CEPH_MSGR_TAG_READY;
1735 dout("try_write initiating connect on %p new state %lu\n",
1736 con, con->state);
1737 con->sock = ceph_tcp_connect(con);
1738 if (IS_ERR(con->sock)) {
1739 con->sock = NULL;
1740 con->error_msg = "connect error";
1741 ret = -1;
1742 goto out;
1743 }
1744 }
1745
1746 more_kvec:
1747 /* kvec data queued? */
1748 if (con->out_skip) {
1749 ret = write_partial_skip(con);
1750 if (ret <= 0)
1751 goto out;
1752 }
1753 if (con->out_kvec_left) {
1754 ret = write_partial_kvec(con);
1755 if (ret <= 0)
1756 goto out;
1757 }
1758
1759 /* msg pages? */
1760 if (con->out_msg) {
1761 if (con->out_msg_done) {
1762 ceph_msg_put(con->out_msg);
1763 con->out_msg = NULL; /* we're done with this one */
1764 goto do_next;
1765 }
1766
1767 ret = write_partial_msg_pages(con);
1768 if (ret == 1)
1769 goto more_kvec; /* we need to send the footer, too! */
1770 if (ret == 0)
1771 goto out;
1772 if (ret < 0) {
1773 dout("try_write write_partial_msg_pages err %d\n",
1774 ret);
1775 goto out;
1776 }
1777 }
1778
1779 do_next:
1780 if (!test_bit(CONNECTING, &con->state)) {
1781 /* is anything else pending? */
1782 if (!list_empty(&con->out_queue)) {
1783 prepare_write_message(con);
1784 goto more;
1785 }
1786 if (con->in_seq > con->in_seq_acked) {
1787 prepare_write_ack(con);
1788 goto more;
1789 }
1790 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) {
1791 prepare_write_keepalive(con);
1792 goto more;
1793 }
1794 }
1795
1796 /* Nothing to do! */
1797 clear_bit(WRITE_PENDING, &con->state);
1798 dout("try_write nothing else to write.\n");
1799 ret = 0;
1800 out:
1801 dout("try_write done on %p ret %d\n", con, ret);
1802 return ret;
1803 }
1804
1805
1806
1807 /*
1808 * Read what we can from the socket.
1809 */
1810 static int try_read(struct ceph_connection *con)
1811 {
1812 int ret = -1;
1813
1814 if (!con->sock)
1815 return 0;
1816
1817 if (test_bit(STANDBY, &con->state))
1818 return 0;
1819
1820 dout("try_read start on %p\n", con);
1821
1822 more:
1823 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
1824 con->in_base_pos);
1825
1826 /*
1827 * process_connect and process_message drop and re-take
1828 * con->mutex. make sure we handle a racing close or reopen.
1829 */
1830 if (test_bit(CLOSED, &con->state) ||
1831 test_bit(OPENING, &con->state)) {
1832 ret = -EAGAIN;
1833 goto out;
1834 }
1835
1836 if (test_bit(CONNECTING, &con->state)) {
1837 if (!test_bit(NEGOTIATING, &con->state)) {
1838 dout("try_read connecting\n");
1839 ret = read_partial_banner(con);
1840 if (ret <= 0)
1841 goto out;
1842 ret = process_banner(con);
1843 if (ret < 0)
1844 goto out;
1845 }
1846 ret = read_partial_connect(con);
1847 if (ret <= 0)
1848 goto out;
1849 ret = process_connect(con);
1850 if (ret < 0)
1851 goto out;
1852 goto more;
1853 }
1854
1855 if (con->in_base_pos < 0) {
1856 /*
1857 * skipping + discarding content.
1858 *
1859 * FIXME: there must be a better way to do this!
1860 */
1861 static char buf[1024];
1862 int skip = min(1024, -con->in_base_pos);
1863 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
1864 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
1865 if (ret <= 0)
1866 goto out;
1867 con->in_base_pos += ret;
1868 if (con->in_base_pos)
1869 goto more;
1870 }
1871 if (con->in_tag == CEPH_MSGR_TAG_READY) {
1872 /*
1873 * what's next?
1874 */
1875 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
1876 if (ret <= 0)
1877 goto out;
1878 dout("try_read got tag %d\n", (int)con->in_tag);
1879 switch (con->in_tag) {
1880 case CEPH_MSGR_TAG_MSG:
1881 prepare_read_message(con);
1882 break;
1883 case CEPH_MSGR_TAG_ACK:
1884 prepare_read_ack(con);
1885 break;
1886 case CEPH_MSGR_TAG_CLOSE:
1887 set_bit(CLOSED, &con->state); /* fixme */
1888 goto out;
1889 default:
1890 goto bad_tag;
1891 }
1892 }
1893 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
1894 ret = read_partial_message(con);
1895 if (ret <= 0) {
1896 switch (ret) {
1897 case -EBADMSG:
1898 con->error_msg = "bad crc";
1899 ret = -EIO;
1900 break;
1901 case -EIO:
1902 con->error_msg = "io error";
1903 break;
1904 }
1905 goto out;
1906 }
1907 if (con->in_tag == CEPH_MSGR_TAG_READY)
1908 goto more;
1909 process_message(con);
1910 goto more;
1911 }
1912 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
1913 ret = read_partial_ack(con);
1914 if (ret <= 0)
1915 goto out;
1916 process_ack(con);
1917 goto more;
1918 }
1919
1920 out:
1921 dout("try_read done on %p ret %d\n", con, ret);
1922 return ret;
1923
1924 bad_tag:
1925 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
1926 con->error_msg = "protocol error, garbage tag";
1927 ret = -1;
1928 goto out;
1929 }
1930
1931
1932 /*
1933 * Atomically queue work on a connection. Bump @con reference to
1934 * avoid races with connection teardown.
1935 */
1936 static void queue_con(struct ceph_connection *con)
1937 {
1938 if (test_bit(DEAD, &con->state)) {
1939 dout("queue_con %p ignoring: DEAD\n",
1940 con);
1941 return;
1942 }
1943
1944 if (!con->ops->get(con)) {
1945 dout("queue_con %p ref count 0\n", con);
1946 return;
1947 }
1948
1949 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
1950 dout("queue_con %p - already queued\n", con);
1951 con->ops->put(con);
1952 } else {
1953 dout("queue_con %p\n", con);
1954 }
1955 }
1956
1957 /*
1958 * Do some work on a connection. Drop a connection ref when we're done.
1959 */
1960 static void con_work(struct work_struct *work)
1961 {
1962 struct ceph_connection *con = container_of(work, struct ceph_connection,
1963 work.work);
1964 int ret;
1965
1966 mutex_lock(&con->mutex);
1967 restart:
1968 if (test_and_clear_bit(BACKOFF, &con->state)) {
1969 dout("con_work %p backing off\n", con);
1970 if (queue_delayed_work(ceph_msgr_wq, &con->work,
1971 round_jiffies_relative(con->delay))) {
1972 dout("con_work %p backoff %lu\n", con, con->delay);
1973 mutex_unlock(&con->mutex);
1974 return;
1975 } else {
1976 con->ops->put(con);
1977 dout("con_work %p FAILED to back off %lu\n", con,
1978 con->delay);
1979 }
1980 }
1981
1982 if (test_bit(STANDBY, &con->state)) {
1983 dout("con_work %p STANDBY\n", con);
1984 goto done;
1985 }
1986 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
1987 dout("con_work CLOSED\n");
1988 con_close_socket(con);
1989 goto done;
1990 }
1991 if (test_and_clear_bit(OPENING, &con->state)) {
1992 /* reopen w/ new peer */
1993 dout("con_work OPENING\n");
1994 con_close_socket(con);
1995 }
1996
1997 if (test_and_clear_bit(SOCK_CLOSED, &con->state))
1998 goto fault;
1999
2000 ret = try_read(con);
2001 if (ret == -EAGAIN)
2002 goto restart;
2003 if (ret < 0)
2004 goto fault;
2005
2006 ret = try_write(con);
2007 if (ret == -EAGAIN)
2008 goto restart;
2009 if (ret < 0)
2010 goto fault;
2011
2012 done:
2013 mutex_unlock(&con->mutex);
2014 done_unlocked:
2015 con->ops->put(con);
2016 return;
2017
2018 fault:
2019 mutex_unlock(&con->mutex);
2020 ceph_fault(con); /* error/fault path */
2021 goto done_unlocked;
2022 }
2023
2024
2025 /*
2026 * Generic error/fault handler. A retry mechanism is used with
2027 * exponential backoff
2028 */
2029 static void ceph_fault(struct ceph_connection *con)
2030 {
2031 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2032 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2033 dout("fault %p state %lu to peer %s\n",
2034 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2035
2036 if (test_bit(LOSSYTX, &con->state)) {
2037 dout("fault on LOSSYTX channel\n");
2038 goto out;
2039 }
2040
2041 mutex_lock(&con->mutex);
2042 if (test_bit(CLOSED, &con->state))
2043 goto out_unlock;
2044
2045 con_close_socket(con);
2046
2047 if (con->in_msg) {
2048 ceph_msg_put(con->in_msg);
2049 con->in_msg = NULL;
2050 }
2051
2052 /* Requeue anything that hasn't been acked */
2053 list_splice_init(&con->out_sent, &con->out_queue);
2054
2055 /* If there are no messages queued or keepalive pending, place
2056 * the connection in a STANDBY state */
2057 if (list_empty(&con->out_queue) &&
2058 !test_bit(KEEPALIVE_PENDING, &con->state)) {
2059 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2060 clear_bit(WRITE_PENDING, &con->state);
2061 set_bit(STANDBY, &con->state);
2062 } else {
2063 /* retry after a delay. */
2064 if (con->delay == 0)
2065 con->delay = BASE_DELAY_INTERVAL;
2066 else if (con->delay < MAX_DELAY_INTERVAL)
2067 con->delay *= 2;
2068 con->ops->get(con);
2069 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2070 round_jiffies_relative(con->delay))) {
2071 dout("fault queued %p delay %lu\n", con, con->delay);
2072 } else {
2073 con->ops->put(con);
2074 dout("fault failed to queue %p delay %lu, backoff\n",
2075 con, con->delay);
2076 /*
2077 * In many cases we see a socket state change
2078 * while con_work is running and end up
2079 * queuing (non-delayed) work, such that we
2080 * can't backoff with a delay. Set a flag so
2081 * that when con_work restarts we schedule the
2082 * delay then.
2083 */
2084 set_bit(BACKOFF, &con->state);
2085 }
2086 }
2087
2088 out_unlock:
2089 mutex_unlock(&con->mutex);
2090 out:
2091 /*
2092 * in case we faulted due to authentication, invalidate our
2093 * current tickets so that we can get new ones.
2094 */
2095 if (con->auth_retry && con->ops->invalidate_authorizer) {
2096 dout("calling invalidate_authorizer()\n");
2097 con->ops->invalidate_authorizer(con);
2098 }
2099
2100 if (con->ops->fault)
2101 con->ops->fault(con);
2102 }
2103
2104
2105
2106 /*
2107 * create a new messenger instance
2108 */
2109 struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr,
2110 u32 supported_features,
2111 u32 required_features)
2112 {
2113 struct ceph_messenger *msgr;
2114
2115 msgr = kzalloc(sizeof(*msgr), GFP_KERNEL);
2116 if (msgr == NULL)
2117 return ERR_PTR(-ENOMEM);
2118
2119 msgr->supported_features = supported_features;
2120 msgr->required_features = required_features;
2121
2122 spin_lock_init(&msgr->global_seq_lock);
2123
2124 /* the zero page is needed if a request is "canceled" while the message
2125 * is being written over the socket */
2126 msgr->zero_page = __page_cache_alloc(GFP_KERNEL | __GFP_ZERO);
2127 if (!msgr->zero_page) {
2128 kfree(msgr);
2129 return ERR_PTR(-ENOMEM);
2130 }
2131 kmap(msgr->zero_page);
2132
2133 if (myaddr)
2134 msgr->inst.addr = *myaddr;
2135
2136 /* select a random nonce */
2137 msgr->inst.addr.type = 0;
2138 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2139 encode_my_addr(msgr);
2140
2141 dout("messenger_create %p\n", msgr);
2142 return msgr;
2143 }
2144 EXPORT_SYMBOL(ceph_messenger_create);
2145
2146 void ceph_messenger_destroy(struct ceph_messenger *msgr)
2147 {
2148 dout("destroy %p\n", msgr);
2149 kunmap(msgr->zero_page);
2150 __free_page(msgr->zero_page);
2151 kfree(msgr);
2152 dout("destroyed messenger %p\n", msgr);
2153 }
2154 EXPORT_SYMBOL(ceph_messenger_destroy);
2155
2156 static void clear_standby(struct ceph_connection *con)
2157 {
2158 /* come back from STANDBY? */
2159 if (test_and_clear_bit(STANDBY, &con->state)) {
2160 mutex_lock(&con->mutex);
2161 dout("clear_standby %p and ++connect_seq\n", con);
2162 con->connect_seq++;
2163 WARN_ON(test_bit(WRITE_PENDING, &con->state));
2164 WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state));
2165 mutex_unlock(&con->mutex);
2166 }
2167 }
2168
2169 /*
2170 * Queue up an outgoing message on the given connection.
2171 */
2172 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2173 {
2174 if (test_bit(CLOSED, &con->state)) {
2175 dout("con_send %p closed, dropping %p\n", con, msg);
2176 ceph_msg_put(msg);
2177 return;
2178 }
2179
2180 /* set src+dst */
2181 msg->hdr.src = con->msgr->inst.name;
2182
2183 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
2184
2185 msg->needs_out_seq = true;
2186
2187 /* queue */
2188 mutex_lock(&con->mutex);
2189 BUG_ON(!list_empty(&msg->list_head));
2190 list_add_tail(&msg->list_head, &con->out_queue);
2191 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
2192 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
2193 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2194 le32_to_cpu(msg->hdr.front_len),
2195 le32_to_cpu(msg->hdr.middle_len),
2196 le32_to_cpu(msg->hdr.data_len));
2197 mutex_unlock(&con->mutex);
2198
2199 /* if there wasn't anything waiting to send before, queue
2200 * new work */
2201 clear_standby(con);
2202 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2203 queue_con(con);
2204 }
2205 EXPORT_SYMBOL(ceph_con_send);
2206
2207 /*
2208 * Revoke a message that was previously queued for send
2209 */
2210 void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
2211 {
2212 mutex_lock(&con->mutex);
2213 if (!list_empty(&msg->list_head)) {
2214 dout("con_revoke %p msg %p - was on queue\n", con, msg);
2215 list_del_init(&msg->list_head);
2216 ceph_msg_put(msg);
2217 msg->hdr.seq = 0;
2218 }
2219 if (con->out_msg == msg) {
2220 dout("con_revoke %p msg %p - was sending\n", con, msg);
2221 con->out_msg = NULL;
2222 if (con->out_kvec_is_msg) {
2223 con->out_skip = con->out_kvec_bytes;
2224 con->out_kvec_is_msg = false;
2225 }
2226 ceph_msg_put(msg);
2227 msg->hdr.seq = 0;
2228 }
2229 mutex_unlock(&con->mutex);
2230 }
2231
2232 /*
2233 * Revoke a message that we may be reading data into
2234 */
2235 void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2236 {
2237 mutex_lock(&con->mutex);
2238 if (con->in_msg && con->in_msg == msg) {
2239 unsigned front_len = le32_to_cpu(con->in_hdr.front_len);
2240 unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len);
2241 unsigned data_len = le32_to_cpu(con->in_hdr.data_len);
2242
2243 /* skip rest of message */
2244 dout("con_revoke_pages %p msg %p revoked\n", con, msg);
2245 con->in_base_pos = con->in_base_pos -
2246 sizeof(struct ceph_msg_header) -
2247 front_len -
2248 middle_len -
2249 data_len -
2250 sizeof(struct ceph_msg_footer);
2251 ceph_msg_put(con->in_msg);
2252 con->in_msg = NULL;
2253 con->in_tag = CEPH_MSGR_TAG_READY;
2254 con->in_seq++;
2255 } else {
2256 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2257 con, con->in_msg, msg);
2258 }
2259 mutex_unlock(&con->mutex);
2260 }
2261
2262 /*
2263 * Queue a keepalive byte to ensure the tcp connection is alive.
2264 */
2265 void ceph_con_keepalive(struct ceph_connection *con)
2266 {
2267 dout("con_keepalive %p\n", con);
2268 clear_standby(con);
2269 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
2270 test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2271 queue_con(con);
2272 }
2273 EXPORT_SYMBOL(ceph_con_keepalive);
2274
2275
2276 /*
2277 * construct a new message with given type, size
2278 * the new msg has a ref count of 1.
2279 */
2280 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
2281 {
2282 struct ceph_msg *m;
2283
2284 m = kmalloc(sizeof(*m), flags);
2285 if (m == NULL)
2286 goto out;
2287 kref_init(&m->kref);
2288 INIT_LIST_HEAD(&m->list_head);
2289
2290 m->hdr.tid = 0;
2291 m->hdr.type = cpu_to_le16(type);
2292 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
2293 m->hdr.version = 0;
2294 m->hdr.front_len = cpu_to_le32(front_len);
2295 m->hdr.middle_len = 0;
2296 m->hdr.data_len = 0;
2297 m->hdr.data_off = 0;
2298 m->hdr.reserved = 0;
2299 m->footer.front_crc = 0;
2300 m->footer.middle_crc = 0;
2301 m->footer.data_crc = 0;
2302 m->footer.flags = 0;
2303 m->front_max = front_len;
2304 m->front_is_vmalloc = false;
2305 m->more_to_follow = false;
2306 m->pool = NULL;
2307
2308 /* middle */
2309 m->middle = NULL;
2310
2311 /* data */
2312 m->nr_pages = 0;
2313 m->page_alignment = 0;
2314 m->pages = NULL;
2315 m->pagelist = NULL;
2316 m->bio = NULL;
2317 m->bio_iter = NULL;
2318 m->bio_seg = 0;
2319 m->trail = NULL;
2320
2321 /* front */
2322 if (front_len) {
2323 if (front_len > PAGE_CACHE_SIZE) {
2324 m->front.iov_base = __vmalloc(front_len, flags,
2325 PAGE_KERNEL);
2326 m->front_is_vmalloc = true;
2327 } else {
2328 m->front.iov_base = kmalloc(front_len, flags);
2329 }
2330 if (m->front.iov_base == NULL) {
2331 pr_err("msg_new can't allocate %d bytes\n",
2332 front_len);
2333 goto out2;
2334 }
2335 } else {
2336 m->front.iov_base = NULL;
2337 }
2338 m->front.iov_len = front_len;
2339
2340 dout("ceph_msg_new %p front %d\n", m, front_len);
2341 return m;
2342
2343 out2:
2344 ceph_msg_put(m);
2345 out:
2346 pr_err("msg_new can't create type %d front %d\n", type, front_len);
2347 return NULL;
2348 }
2349 EXPORT_SYMBOL(ceph_msg_new);
2350
2351 /*
2352 * Allocate "middle" portion of a message, if it is needed and wasn't
2353 * allocated by alloc_msg. This allows us to read a small fixed-size
2354 * per-type header in the front and then gracefully fail (i.e.,
2355 * propagate the error to the caller based on info in the front) when
2356 * the middle is too large.
2357 */
2358 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2359 {
2360 int type = le16_to_cpu(msg->hdr.type);
2361 int middle_len = le32_to_cpu(msg->hdr.middle_len);
2362
2363 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
2364 ceph_msg_type_name(type), middle_len);
2365 BUG_ON(!middle_len);
2366 BUG_ON(msg->middle);
2367
2368 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
2369 if (!msg->middle)
2370 return -ENOMEM;
2371 return 0;
2372 }
2373
2374 /*
2375 * Generic message allocator, for incoming messages.
2376 */
2377 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2378 struct ceph_msg_header *hdr,
2379 int *skip)
2380 {
2381 int type = le16_to_cpu(hdr->type);
2382 int front_len = le32_to_cpu(hdr->front_len);
2383 int middle_len = le32_to_cpu(hdr->middle_len);
2384 struct ceph_msg *msg = NULL;
2385 int ret;
2386
2387 if (con->ops->alloc_msg) {
2388 mutex_unlock(&con->mutex);
2389 msg = con->ops->alloc_msg(con, hdr, skip);
2390 mutex_lock(&con->mutex);
2391 if (!msg || *skip)
2392 return NULL;
2393 }
2394 if (!msg) {
2395 *skip = 0;
2396 msg = ceph_msg_new(type, front_len, GFP_NOFS);
2397 if (!msg) {
2398 pr_err("unable to allocate msg type %d len %d\n",
2399 type, front_len);
2400 return NULL;
2401 }
2402 msg->page_alignment = le16_to_cpu(hdr->data_off);
2403 }
2404 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2405
2406 if (middle_len && !msg->middle) {
2407 ret = ceph_alloc_middle(con, msg);
2408 if (ret < 0) {
2409 ceph_msg_put(msg);
2410 return NULL;
2411 }
2412 }
2413
2414 return msg;
2415 }
2416
2417
2418 /*
2419 * Free a generically kmalloc'd message.
2420 */
2421 void ceph_msg_kfree(struct ceph_msg *m)
2422 {
2423 dout("msg_kfree %p\n", m);
2424 if (m->front_is_vmalloc)
2425 vfree(m->front.iov_base);
2426 else
2427 kfree(m->front.iov_base);
2428 kfree(m);
2429 }
2430
2431 /*
2432 * Drop a msg ref. Destroy as needed.
2433 */
2434 void ceph_msg_last_put(struct kref *kref)
2435 {
2436 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2437
2438 dout("ceph_msg_put last one on %p\n", m);
2439 WARN_ON(!list_empty(&m->list_head));
2440
2441 /* drop middle, data, if any */
2442 if (m->middle) {
2443 ceph_buffer_put(m->middle);
2444 m->middle = NULL;
2445 }
2446 m->nr_pages = 0;
2447 m->pages = NULL;
2448
2449 if (m->pagelist) {
2450 ceph_pagelist_release(m->pagelist);
2451 kfree(m->pagelist);
2452 m->pagelist = NULL;
2453 }
2454
2455 m->trail = NULL;
2456
2457 if (m->pool)
2458 ceph_msgpool_put(m->pool, m);
2459 else
2460 ceph_msg_kfree(m);
2461 }
2462 EXPORT_SYMBOL(ceph_msg_last_put);
2463
2464 void ceph_msg_dump(struct ceph_msg *msg)
2465 {
2466 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg,
2467 msg->front_max, msg->nr_pages);
2468 print_hex_dump(KERN_DEBUG, "header: ",
2469 DUMP_PREFIX_OFFSET, 16, 1,
2470 &msg->hdr, sizeof(msg->hdr), true);
2471 print_hex_dump(KERN_DEBUG, " front: ",
2472 DUMP_PREFIX_OFFSET, 16, 1,
2473 msg->front.iov_base, msg->front.iov_len, true);
2474 if (msg->middle)
2475 print_hex_dump(KERN_DEBUG, "middle: ",
2476 DUMP_PREFIX_OFFSET, 16, 1,
2477 msg->middle->vec.iov_base,
2478 msg->middle->vec.iov_len, true);
2479 print_hex_dump(KERN_DEBUG, "footer: ",
2480 DUMP_PREFIX_OFFSET, 16, 1,
2481 &msg->footer, sizeof(msg->footer), true);
2482 }
2483 EXPORT_SYMBOL(ceph_msg_dump);