Linux-2.6.12-rc2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / ipvs / ip_vs_app.c
1 /*
2 * ip_vs_app.c: Application module support for IPVS
3 *
4 * Version: $Id: ip_vs_app.c,v 1.17 2003/03/22 06:31:21 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference
14 * is that ip_vs_app module handles the reverse direction (incoming requests
15 * and outgoing responses).
16 *
17 * IP_MASQ_APP application masquerading module
18 *
19 * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
20 *
21 */
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/skbuff.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <net/protocol.h>
29 #include <asm/system.h>
30 #include <linux/stat.h>
31 #include <linux/proc_fs.h>
32 #include <linux/seq_file.h>
33
34 #include <net/ip_vs.h>
35
36 EXPORT_SYMBOL(register_ip_vs_app);
37 EXPORT_SYMBOL(unregister_ip_vs_app);
38 EXPORT_SYMBOL(register_ip_vs_app_inc);
39
40 /* ipvs application list head */
41 static LIST_HEAD(ip_vs_app_list);
42 static DECLARE_MUTEX(__ip_vs_app_mutex);
43
44
45 /*
46 * Get an ip_vs_app object
47 */
48 static inline int ip_vs_app_get(struct ip_vs_app *app)
49 {
50 /* test and get the module atomically */
51 if (app->module)
52 return try_module_get(app->module);
53 else
54 return 1;
55 }
56
57
58 static inline void ip_vs_app_put(struct ip_vs_app *app)
59 {
60 if (app->module)
61 module_put(app->module);
62 }
63
64
65 /*
66 * Allocate/initialize app incarnation and register it in proto apps.
67 */
68 static int
69 ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
70 {
71 struct ip_vs_protocol *pp;
72 struct ip_vs_app *inc;
73 int ret;
74
75 if (!(pp = ip_vs_proto_get(proto)))
76 return -EPROTONOSUPPORT;
77
78 if (!pp->unregister_app)
79 return -EOPNOTSUPP;
80
81 inc = kmalloc(sizeof(struct ip_vs_app), GFP_KERNEL);
82 if (!inc)
83 return -ENOMEM;
84 memcpy(inc, app, sizeof(*inc));
85 INIT_LIST_HEAD(&inc->p_list);
86 INIT_LIST_HEAD(&inc->incs_list);
87 inc->app = app;
88 inc->port = htons(port);
89 atomic_set(&inc->usecnt, 0);
90
91 if (app->timeouts) {
92 inc->timeout_table =
93 ip_vs_create_timeout_table(app->timeouts,
94 app->timeouts_size);
95 if (!inc->timeout_table) {
96 ret = -ENOMEM;
97 goto out;
98 }
99 }
100
101 ret = pp->register_app(inc);
102 if (ret)
103 goto out;
104
105 list_add(&inc->a_list, &app->incs_list);
106 IP_VS_DBG(9, "%s application %s:%u registered\n",
107 pp->name, inc->name, inc->port);
108
109 return 0;
110
111 out:
112 if (inc->timeout_table)
113 kfree(inc->timeout_table);
114 kfree(inc);
115 return ret;
116 }
117
118
119 /*
120 * Release app incarnation
121 */
122 static void
123 ip_vs_app_inc_release(struct ip_vs_app *inc)
124 {
125 struct ip_vs_protocol *pp;
126
127 if (!(pp = ip_vs_proto_get(inc->protocol)))
128 return;
129
130 if (pp->unregister_app)
131 pp->unregister_app(inc);
132
133 IP_VS_DBG(9, "%s App %s:%u unregistered\n",
134 pp->name, inc->name, inc->port);
135
136 list_del(&inc->a_list);
137
138 if (inc->timeout_table != NULL)
139 kfree(inc->timeout_table);
140 kfree(inc);
141 }
142
143
144 /*
145 * Get reference to app inc (only called from softirq)
146 *
147 */
148 int ip_vs_app_inc_get(struct ip_vs_app *inc)
149 {
150 int result;
151
152 atomic_inc(&inc->usecnt);
153 if (unlikely((result = ip_vs_app_get(inc->app)) != 1))
154 atomic_dec(&inc->usecnt);
155 return result;
156 }
157
158
159 /*
160 * Put the app inc (only called from timer or net softirq)
161 */
162 void ip_vs_app_inc_put(struct ip_vs_app *inc)
163 {
164 ip_vs_app_put(inc->app);
165 atomic_dec(&inc->usecnt);
166 }
167
168
169 /*
170 * Register an application incarnation in protocol applications
171 */
172 int
173 register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
174 {
175 int result;
176
177 down(&__ip_vs_app_mutex);
178
179 result = ip_vs_app_inc_new(app, proto, port);
180
181 up(&__ip_vs_app_mutex);
182
183 return result;
184 }
185
186
187 /*
188 * ip_vs_app registration routine
189 */
190 int register_ip_vs_app(struct ip_vs_app *app)
191 {
192 /* increase the module use count */
193 ip_vs_use_count_inc();
194
195 down(&__ip_vs_app_mutex);
196
197 list_add(&app->a_list, &ip_vs_app_list);
198
199 up(&__ip_vs_app_mutex);
200
201 return 0;
202 }
203
204
205 /*
206 * ip_vs_app unregistration routine
207 * We are sure there are no app incarnations attached to services
208 */
209 void unregister_ip_vs_app(struct ip_vs_app *app)
210 {
211 struct ip_vs_app *inc, *nxt;
212
213 down(&__ip_vs_app_mutex);
214
215 list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
216 ip_vs_app_inc_release(inc);
217 }
218
219 list_del(&app->a_list);
220
221 up(&__ip_vs_app_mutex);
222
223 /* decrease the module use count */
224 ip_vs_use_count_dec();
225 }
226
227
228 #if 0000
229 /*
230 * Get reference to app by name (called from user context)
231 */
232 struct ip_vs_app *ip_vs_app_get_by_name(char *appname)
233 {
234 struct ip_vs_app *app, *a = NULL;
235
236 down(&__ip_vs_app_mutex);
237
238 list_for_each_entry(ent, &ip_vs_app_list, a_list) {
239 if (strcmp(app->name, appname))
240 continue;
241
242 /* softirq may call ip_vs_app_get too, so the caller
243 must disable softirq on the current CPU */
244 if (ip_vs_app_get(app))
245 a = app;
246 break;
247 }
248
249 up(&__ip_vs_app_mutex);
250
251 return a;
252 }
253 #endif
254
255
256 /*
257 * Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
258 */
259 int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
260 {
261 return pp->app_conn_bind(cp);
262 }
263
264
265 /*
266 * Unbind cp from application incarnation (called by cp destructor)
267 */
268 void ip_vs_unbind_app(struct ip_vs_conn *cp)
269 {
270 struct ip_vs_app *inc = cp->app;
271
272 if (!inc)
273 return;
274
275 if (inc->unbind_conn)
276 inc->unbind_conn(inc, cp);
277 if (inc->done_conn)
278 inc->done_conn(inc, cp);
279 ip_vs_app_inc_put(inc);
280 cp->app = NULL;
281 }
282
283
284 /*
285 * Fixes th->seq based on ip_vs_seq info.
286 */
287 static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
288 {
289 __u32 seq = ntohl(th->seq);
290
291 /*
292 * Adjust seq with delta-offset for all packets after
293 * the most recent resized pkt seq and with previous_delta offset
294 * for all packets before most recent resized pkt seq.
295 */
296 if (vseq->delta || vseq->previous_delta) {
297 if(after(seq, vseq->init_seq)) {
298 th->seq = htonl(seq + vseq->delta);
299 IP_VS_DBG(9, "vs_fix_seq(): added delta (%d) to seq\n",
300 vseq->delta);
301 } else {
302 th->seq = htonl(seq + vseq->previous_delta);
303 IP_VS_DBG(9, "vs_fix_seq(): added previous_delta "
304 "(%d) to seq\n", vseq->previous_delta);
305 }
306 }
307 }
308
309
310 /*
311 * Fixes th->ack_seq based on ip_vs_seq info.
312 */
313 static inline void
314 vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
315 {
316 __u32 ack_seq = ntohl(th->ack_seq);
317
318 /*
319 * Adjust ack_seq with delta-offset for
320 * the packets AFTER most recent resized pkt has caused a shift
321 * for packets before most recent resized pkt, use previous_delta
322 */
323 if (vseq->delta || vseq->previous_delta) {
324 /* since ack_seq is the number of octet that is expected
325 to receive next, so compare it with init_seq+delta */
326 if(after(ack_seq, vseq->init_seq+vseq->delta)) {
327 th->ack_seq = htonl(ack_seq - vseq->delta);
328 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted delta "
329 "(%d) from ack_seq\n", vseq->delta);
330
331 } else {
332 th->ack_seq = htonl(ack_seq - vseq->previous_delta);
333 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted "
334 "previous_delta (%d) from ack_seq\n",
335 vseq->previous_delta);
336 }
337 }
338 }
339
340
341 /*
342 * Updates ip_vs_seq if pkt has been resized
343 * Assumes already checked proto==IPPROTO_TCP and diff!=0.
344 */
345 static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
346 unsigned flag, __u32 seq, int diff)
347 {
348 /* spinlock is to keep updating cp->flags atomic */
349 spin_lock(&cp->lock);
350 if (!(cp->flags & flag) || after(seq, vseq->init_seq)) {
351 vseq->previous_delta = vseq->delta;
352 vseq->delta += diff;
353 vseq->init_seq = seq;
354 cp->flags |= flag;
355 }
356 spin_unlock(&cp->lock);
357 }
358
359 static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb,
360 struct ip_vs_app *app)
361 {
362 int diff;
363 unsigned int tcp_offset = (*pskb)->nh.iph->ihl*4;
364 struct tcphdr *th;
365 __u32 seq;
366
367 if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th)))
368 return 0;
369
370 th = (struct tcphdr *)((*pskb)->nh.raw + tcp_offset);
371
372 /*
373 * Remember seq number in case this pkt gets resized
374 */
375 seq = ntohl(th->seq);
376
377 /*
378 * Fix seq stuff if flagged as so.
379 */
380 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
381 vs_fix_seq(&cp->out_seq, th);
382 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
383 vs_fix_ack_seq(&cp->in_seq, th);
384
385 /*
386 * Call private output hook function
387 */
388 if (app->pkt_out == NULL)
389 return 1;
390
391 if (!app->pkt_out(app, cp, pskb, &diff))
392 return 0;
393
394 /*
395 * Update ip_vs seq stuff if len has changed.
396 */
397 if (diff != 0)
398 vs_seq_update(cp, &cp->out_seq,
399 IP_VS_CONN_F_OUT_SEQ, seq, diff);
400
401 return 1;
402 }
403
404 /*
405 * Output pkt hook. Will call bound ip_vs_app specific function
406 * called by ipvs packet handler, assumes previously checked cp!=NULL
407 * returns false if it can't handle packet (oom)
408 */
409 int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb)
410 {
411 struct ip_vs_app *app;
412
413 /*
414 * check if application module is bound to
415 * this ip_vs_conn.
416 */
417 if ((app = cp->app) == NULL)
418 return 1;
419
420 /* TCP is complicated */
421 if (cp->protocol == IPPROTO_TCP)
422 return app_tcp_pkt_out(cp, pskb, app);
423
424 /*
425 * Call private output hook function
426 */
427 if (app->pkt_out == NULL)
428 return 1;
429
430 return app->pkt_out(app, cp, pskb, NULL);
431 }
432
433
434 static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb,
435 struct ip_vs_app *app)
436 {
437 int diff;
438 unsigned int tcp_offset = (*pskb)->nh.iph->ihl*4;
439 struct tcphdr *th;
440 __u32 seq;
441
442 if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th)))
443 return 0;
444
445 th = (struct tcphdr *)((*pskb)->nh.raw + tcp_offset);
446
447 /*
448 * Remember seq number in case this pkt gets resized
449 */
450 seq = ntohl(th->seq);
451
452 /*
453 * Fix seq stuff if flagged as so.
454 */
455 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
456 vs_fix_seq(&cp->in_seq, th);
457 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
458 vs_fix_ack_seq(&cp->out_seq, th);
459
460 /*
461 * Call private input hook function
462 */
463 if (app->pkt_in == NULL)
464 return 1;
465
466 if (!app->pkt_in(app, cp, pskb, &diff))
467 return 0;
468
469 /*
470 * Update ip_vs seq stuff if len has changed.
471 */
472 if (diff != 0)
473 vs_seq_update(cp, &cp->in_seq,
474 IP_VS_CONN_F_IN_SEQ, seq, diff);
475
476 return 1;
477 }
478
479 /*
480 * Input pkt hook. Will call bound ip_vs_app specific function
481 * called by ipvs packet handler, assumes previously checked cp!=NULL.
482 * returns false if can't handle packet (oom).
483 */
484 int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb)
485 {
486 struct ip_vs_app *app;
487
488 /*
489 * check if application module is bound to
490 * this ip_vs_conn.
491 */
492 if ((app = cp->app) == NULL)
493 return 1;
494
495 /* TCP is complicated */
496 if (cp->protocol == IPPROTO_TCP)
497 return app_tcp_pkt_in(cp, pskb, app);
498
499 /*
500 * Call private input hook function
501 */
502 if (app->pkt_in == NULL)
503 return 1;
504
505 return app->pkt_in(app, cp, pskb, NULL);
506 }
507
508
509 #ifdef CONFIG_PROC_FS
510 /*
511 * /proc/net/ip_vs_app entry function
512 */
513
514 static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
515 {
516 struct ip_vs_app *app, *inc;
517
518 list_for_each_entry(app, &ip_vs_app_list, a_list) {
519 list_for_each_entry(inc, &app->incs_list, a_list) {
520 if (pos-- == 0)
521 return inc;
522 }
523 }
524 return NULL;
525
526 }
527
528 static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
529 {
530 down(&__ip_vs_app_mutex);
531
532 return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
533 }
534
535 static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
536 {
537 struct ip_vs_app *inc, *app;
538 struct list_head *e;
539
540 ++*pos;
541 if (v == SEQ_START_TOKEN)
542 return ip_vs_app_idx(0);
543
544 inc = v;
545 app = inc->app;
546
547 if ((e = inc->a_list.next) != &app->incs_list)
548 return list_entry(e, struct ip_vs_app, a_list);
549
550 /* go on to next application */
551 for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
552 app = list_entry(e, struct ip_vs_app, a_list);
553 list_for_each_entry(inc, &app->incs_list, a_list) {
554 return inc;
555 }
556 }
557 return NULL;
558 }
559
560 static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
561 {
562 up(&__ip_vs_app_mutex);
563 }
564
565 static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
566 {
567 if (v == SEQ_START_TOKEN)
568 seq_puts(seq, "prot port usecnt name\n");
569 else {
570 const struct ip_vs_app *inc = v;
571
572 seq_printf(seq, "%-3s %-7u %-6d %-17s\n",
573 ip_vs_proto_name(inc->protocol),
574 ntohs(inc->port),
575 atomic_read(&inc->usecnt),
576 inc->name);
577 }
578 return 0;
579 }
580
581 static struct seq_operations ip_vs_app_seq_ops = {
582 .start = ip_vs_app_seq_start,
583 .next = ip_vs_app_seq_next,
584 .stop = ip_vs_app_seq_stop,
585 .show = ip_vs_app_seq_show,
586 };
587
588 static int ip_vs_app_open(struct inode *inode, struct file *file)
589 {
590 return seq_open(file, &ip_vs_app_seq_ops);
591 }
592
593 static struct file_operations ip_vs_app_fops = {
594 .owner = THIS_MODULE,
595 .open = ip_vs_app_open,
596 .read = seq_read,
597 .llseek = seq_lseek,
598 .release = seq_release,
599 };
600 #endif
601
602
603 /*
604 * Replace a segment of data with a new segment
605 */
606 int ip_vs_skb_replace(struct sk_buff *skb, int pri,
607 char *o_buf, int o_len, char *n_buf, int n_len)
608 {
609 struct iphdr *iph;
610 int diff;
611 int o_offset;
612 int o_left;
613
614 EnterFunction(9);
615
616 diff = n_len - o_len;
617 o_offset = o_buf - (char *)skb->data;
618 /* The length of left data after o_buf+o_len in the skb data */
619 o_left = skb->len - (o_offset + o_len);
620
621 if (diff <= 0) {
622 memmove(o_buf + n_len, o_buf + o_len, o_left);
623 memcpy(o_buf, n_buf, n_len);
624 skb_trim(skb, skb->len + diff);
625 } else if (diff <= skb_tailroom(skb)) {
626 skb_put(skb, diff);
627 memmove(o_buf + n_len, o_buf + o_len, o_left);
628 memcpy(o_buf, n_buf, n_len);
629 } else {
630 if (pskb_expand_head(skb, skb_headroom(skb), diff, pri))
631 return -ENOMEM;
632 skb_put(skb, diff);
633 memmove(skb->data + o_offset + n_len,
634 skb->data + o_offset + o_len, o_left);
635 memcpy(skb->data + o_offset, n_buf, n_len);
636 }
637
638 /* must update the iph total length here */
639 iph = skb->nh.iph;
640 iph->tot_len = htons(skb->len);
641
642 LeaveFunction(9);
643 return 0;
644 }
645
646
647 int ip_vs_app_init(void)
648 {
649 /* we will replace it with proc_net_ipvs_create() soon */
650 proc_net_fops_create("ip_vs_app", 0, &ip_vs_app_fops);
651 return 0;
652 }
653
654
655 void ip_vs_app_cleanup(void)
656 {
657 proc_net_remove("ip_vs_app");
658 }