Merge tag 'cleanup2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / net / ip_vs.h
1 /*
2 * IP Virtual Server
3 * data structure and functionality definitions
4 */
5
6 #ifndef _NET_IP_VS_H
7 #define _NET_IP_VS_H
8
9 #include <linux/ip_vs.h> /* definitions shared with userland */
10
11 #include <asm/types.h> /* for __uXX types */
12
13 #include <linux/list.h> /* for struct list_head */
14 #include <linux/spinlock.h> /* for struct rwlock_t */
15 #include <linux/atomic.h> /* for struct atomic_t */
16 #include <linux/compiler.h>
17 #include <linux/timer.h>
18 #include <linux/bug.h>
19
20 #include <net/checksum.h>
21 #include <linux/netfilter.h> /* for union nf_inet_addr */
22 #include <linux/ip.h>
23 #include <linux/ipv6.h> /* for struct ipv6hdr */
24 #include <net/ipv6.h>
25 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
26 #include <net/netfilter/nf_conntrack.h>
27 #endif
28 #include <net/net_namespace.h> /* Netw namespace */
29
30 /*
31 * Generic access of ipvs struct
32 */
33 static inline struct netns_ipvs *net_ipvs(struct net* net)
34 {
35 return net->ipvs;
36 }
37 /*
38 * Get net ptr from skb in traffic cases
39 * use skb_sknet when call is from userland (ioctl or netlink)
40 */
41 static inline struct net *skb_net(const struct sk_buff *skb)
42 {
43 #ifdef CONFIG_NET_NS
44 #ifdef CONFIG_IP_VS_DEBUG
45 /*
46 * This is used for debug only.
47 * Start with the most likely hit
48 * End with BUG
49 */
50 if (likely(skb->dev && skb->dev->nd_net))
51 return dev_net(skb->dev);
52 if (skb_dst(skb) && skb_dst(skb)->dev)
53 return dev_net(skb_dst(skb)->dev);
54 WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
55 __func__, __LINE__);
56 if (likely(skb->sk && skb->sk->sk_net))
57 return sock_net(skb->sk);
58 pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
59 __func__, __LINE__);
60 BUG();
61 #else
62 return dev_net(skb->dev ? : skb_dst(skb)->dev);
63 #endif
64 #else
65 return &init_net;
66 #endif
67 }
68
69 static inline struct net *skb_sknet(const struct sk_buff *skb)
70 {
71 #ifdef CONFIG_NET_NS
72 #ifdef CONFIG_IP_VS_DEBUG
73 /* Start with the most likely hit */
74 if (likely(skb->sk && skb->sk->sk_net))
75 return sock_net(skb->sk);
76 WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
77 __func__, __LINE__);
78 if (likely(skb->dev && skb->dev->nd_net))
79 return dev_net(skb->dev);
80 pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
81 __func__, __LINE__);
82 BUG();
83 #else
84 return sock_net(skb->sk);
85 #endif
86 #else
87 return &init_net;
88 #endif
89 }
90 /*
91 * This one needed for single_open_net since net is stored directly in
92 * private not as a struct i.e. seq_file_net can't be used.
93 */
94 static inline struct net *seq_file_single_net(struct seq_file *seq)
95 {
96 #ifdef CONFIG_NET_NS
97 return (struct net *)seq->private;
98 #else
99 return &init_net;
100 #endif
101 }
102
103 /* Connections' size value needed by ip_vs_ctl.c */
104 extern int ip_vs_conn_tab_size;
105
106
107 struct ip_vs_iphdr {
108 int len;
109 __u8 protocol;
110 union nf_inet_addr saddr;
111 union nf_inet_addr daddr;
112 };
113
114 static inline void
115 ip_vs_fill_iphdr(int af, const void *nh, struct ip_vs_iphdr *iphdr)
116 {
117 #ifdef CONFIG_IP_VS_IPV6
118 if (af == AF_INET6) {
119 const struct ipv6hdr *iph = nh;
120 iphdr->len = sizeof(struct ipv6hdr);
121 iphdr->protocol = iph->nexthdr;
122 iphdr->saddr.in6 = iph->saddr;
123 iphdr->daddr.in6 = iph->daddr;
124 } else
125 #endif
126 {
127 const struct iphdr *iph = nh;
128 iphdr->len = iph->ihl * 4;
129 iphdr->protocol = iph->protocol;
130 iphdr->saddr.ip = iph->saddr;
131 iphdr->daddr.ip = iph->daddr;
132 }
133 }
134
135 static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst,
136 const union nf_inet_addr *src)
137 {
138 #ifdef CONFIG_IP_VS_IPV6
139 if (af == AF_INET6)
140 dst->in6 = src->in6;
141 else
142 #endif
143 dst->ip = src->ip;
144 }
145
146 static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a,
147 const union nf_inet_addr *b)
148 {
149 #ifdef CONFIG_IP_VS_IPV6
150 if (af == AF_INET6)
151 return ipv6_addr_equal(&a->in6, &b->in6);
152 #endif
153 return a->ip == b->ip;
154 }
155
156 #ifdef CONFIG_IP_VS_DEBUG
157 #include <linux/net.h>
158
159 extern int ip_vs_get_debug_level(void);
160
161 static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
162 const union nf_inet_addr *addr,
163 int *idx)
164 {
165 int len;
166 #ifdef CONFIG_IP_VS_IPV6
167 if (af == AF_INET6)
168 len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6]",
169 &addr->in6) + 1;
170 else
171 #endif
172 len = snprintf(&buf[*idx], buf_len - *idx, "%pI4",
173 &addr->ip) + 1;
174
175 *idx += len;
176 BUG_ON(*idx > buf_len + 1);
177 return &buf[*idx - len];
178 }
179
180 #define IP_VS_DBG_BUF(level, msg, ...) \
181 do { \
182 char ip_vs_dbg_buf[160]; \
183 int ip_vs_dbg_idx = 0; \
184 if (level <= ip_vs_get_debug_level()) \
185 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \
186 } while (0)
187 #define IP_VS_ERR_BUF(msg...) \
188 do { \
189 char ip_vs_dbg_buf[160]; \
190 int ip_vs_dbg_idx = 0; \
191 pr_err(msg); \
192 } while (0)
193
194 /* Only use from within IP_VS_DBG_BUF() or IP_VS_ERR_BUF macros */
195 #define IP_VS_DBG_ADDR(af, addr) \
196 ip_vs_dbg_addr(af, ip_vs_dbg_buf, \
197 sizeof(ip_vs_dbg_buf), addr, \
198 &ip_vs_dbg_idx)
199
200 #define IP_VS_DBG(level, msg, ...) \
201 do { \
202 if (level <= ip_vs_get_debug_level()) \
203 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \
204 } while (0)
205 #define IP_VS_DBG_RL(msg, ...) \
206 do { \
207 if (net_ratelimit()) \
208 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \
209 } while (0)
210 #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) \
211 do { \
212 if (level <= ip_vs_get_debug_level()) \
213 pp->debug_packet(af, pp, skb, ofs, msg); \
214 } while (0)
215 #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) \
216 do { \
217 if (level <= ip_vs_get_debug_level() && \
218 net_ratelimit()) \
219 pp->debug_packet(af, pp, skb, ofs, msg); \
220 } while (0)
221 #else /* NO DEBUGGING at ALL */
222 #define IP_VS_DBG_BUF(level, msg...) do {} while (0)
223 #define IP_VS_ERR_BUF(msg...) do {} while (0)
224 #define IP_VS_DBG(level, msg...) do {} while (0)
225 #define IP_VS_DBG_RL(msg...) do {} while (0)
226 #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) do {} while (0)
227 #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) do {} while (0)
228 #endif
229
230 #define IP_VS_BUG() BUG()
231 #define IP_VS_ERR_RL(msg, ...) \
232 do { \
233 if (net_ratelimit()) \
234 pr_err(msg, ##__VA_ARGS__); \
235 } while (0)
236
237 #ifdef CONFIG_IP_VS_DEBUG
238 #define EnterFunction(level) \
239 do { \
240 if (level <= ip_vs_get_debug_level()) \
241 printk(KERN_DEBUG \
242 pr_fmt("Enter: %s, %s line %i\n"), \
243 __func__, __FILE__, __LINE__); \
244 } while (0)
245 #define LeaveFunction(level) \
246 do { \
247 if (level <= ip_vs_get_debug_level()) \
248 printk(KERN_DEBUG \
249 pr_fmt("Leave: %s, %s line %i\n"), \
250 __func__, __FILE__, __LINE__); \
251 } while (0)
252 #else
253 #define EnterFunction(level) do {} while (0)
254 #define LeaveFunction(level) do {} while (0)
255 #endif
256
257 #define IP_VS_WAIT_WHILE(expr) while (expr) { cpu_relax(); }
258
259
260 /*
261 * The port number of FTP service (in network order).
262 */
263 #define FTPPORT cpu_to_be16(21)
264 #define FTPDATA cpu_to_be16(20)
265
266 /*
267 * TCP State Values
268 */
269 enum {
270 IP_VS_TCP_S_NONE = 0,
271 IP_VS_TCP_S_ESTABLISHED,
272 IP_VS_TCP_S_SYN_SENT,
273 IP_VS_TCP_S_SYN_RECV,
274 IP_VS_TCP_S_FIN_WAIT,
275 IP_VS_TCP_S_TIME_WAIT,
276 IP_VS_TCP_S_CLOSE,
277 IP_VS_TCP_S_CLOSE_WAIT,
278 IP_VS_TCP_S_LAST_ACK,
279 IP_VS_TCP_S_LISTEN,
280 IP_VS_TCP_S_SYNACK,
281 IP_VS_TCP_S_LAST
282 };
283
284 /*
285 * UDP State Values
286 */
287 enum {
288 IP_VS_UDP_S_NORMAL,
289 IP_VS_UDP_S_LAST,
290 };
291
292 /*
293 * ICMP State Values
294 */
295 enum {
296 IP_VS_ICMP_S_NORMAL,
297 IP_VS_ICMP_S_LAST,
298 };
299
300 /*
301 * SCTP State Values
302 */
303 enum ip_vs_sctp_states {
304 IP_VS_SCTP_S_NONE,
305 IP_VS_SCTP_S_INIT_CLI,
306 IP_VS_SCTP_S_INIT_SER,
307 IP_VS_SCTP_S_INIT_ACK_CLI,
308 IP_VS_SCTP_S_INIT_ACK_SER,
309 IP_VS_SCTP_S_ECHO_CLI,
310 IP_VS_SCTP_S_ECHO_SER,
311 IP_VS_SCTP_S_ESTABLISHED,
312 IP_VS_SCTP_S_SHUT_CLI,
313 IP_VS_SCTP_S_SHUT_SER,
314 IP_VS_SCTP_S_SHUT_ACK_CLI,
315 IP_VS_SCTP_S_SHUT_ACK_SER,
316 IP_VS_SCTP_S_CLOSED,
317 IP_VS_SCTP_S_LAST
318 };
319
320 /*
321 * Delta sequence info structure
322 * Each ip_vs_conn has 2 (output AND input seq. changes).
323 * Only used in the VS/NAT.
324 */
325 struct ip_vs_seq {
326 __u32 init_seq; /* Add delta from this seq */
327 __u32 delta; /* Delta in sequence numbers */
328 __u32 previous_delta; /* Delta in sequence numbers
329 before last resized pkt */
330 };
331
332 /*
333 * counters per cpu
334 */
335 struct ip_vs_counters {
336 __u32 conns; /* connections scheduled */
337 __u32 inpkts; /* incoming packets */
338 __u32 outpkts; /* outgoing packets */
339 __u64 inbytes; /* incoming bytes */
340 __u64 outbytes; /* outgoing bytes */
341 };
342 /*
343 * Stats per cpu
344 */
345 struct ip_vs_cpu_stats {
346 struct ip_vs_counters ustats;
347 struct u64_stats_sync syncp;
348 };
349
350 /*
351 * IPVS statistics objects
352 */
353 struct ip_vs_estimator {
354 struct list_head list;
355
356 u64 last_inbytes;
357 u64 last_outbytes;
358 u32 last_conns;
359 u32 last_inpkts;
360 u32 last_outpkts;
361
362 u32 cps;
363 u32 inpps;
364 u32 outpps;
365 u32 inbps;
366 u32 outbps;
367 };
368
369 struct ip_vs_stats {
370 struct ip_vs_stats_user ustats; /* statistics */
371 struct ip_vs_estimator est; /* estimator */
372 struct ip_vs_cpu_stats *cpustats; /* per cpu counters */
373 spinlock_t lock; /* spin lock */
374 struct ip_vs_stats_user ustats0; /* reset values */
375 };
376
377 struct dst_entry;
378 struct iphdr;
379 struct ip_vs_conn;
380 struct ip_vs_app;
381 struct sk_buff;
382 struct ip_vs_proto_data;
383
384 struct ip_vs_protocol {
385 struct ip_vs_protocol *next;
386 char *name;
387 u16 protocol;
388 u16 num_states;
389 int dont_defrag;
390
391 void (*init)(struct ip_vs_protocol *pp);
392
393 void (*exit)(struct ip_vs_protocol *pp);
394
395 int (*init_netns)(struct net *net, struct ip_vs_proto_data *pd);
396
397 void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd);
398
399 int (*conn_schedule)(int af, struct sk_buff *skb,
400 struct ip_vs_proto_data *pd,
401 int *verdict, struct ip_vs_conn **cpp);
402
403 struct ip_vs_conn *
404 (*conn_in_get)(int af,
405 const struct sk_buff *skb,
406 const struct ip_vs_iphdr *iph,
407 unsigned int proto_off,
408 int inverse);
409
410 struct ip_vs_conn *
411 (*conn_out_get)(int af,
412 const struct sk_buff *skb,
413 const struct ip_vs_iphdr *iph,
414 unsigned int proto_off,
415 int inverse);
416
417 int (*snat_handler)(struct sk_buff *skb,
418 struct ip_vs_protocol *pp, struct ip_vs_conn *cp);
419
420 int (*dnat_handler)(struct sk_buff *skb,
421 struct ip_vs_protocol *pp, struct ip_vs_conn *cp);
422
423 int (*csum_check)(int af, struct sk_buff *skb,
424 struct ip_vs_protocol *pp);
425
426 const char *(*state_name)(int state);
427
428 void (*state_transition)(struct ip_vs_conn *cp, int direction,
429 const struct sk_buff *skb,
430 struct ip_vs_proto_data *pd);
431
432 int (*register_app)(struct net *net, struct ip_vs_app *inc);
433
434 void (*unregister_app)(struct net *net, struct ip_vs_app *inc);
435
436 int (*app_conn_bind)(struct ip_vs_conn *cp);
437
438 void (*debug_packet)(int af, struct ip_vs_protocol *pp,
439 const struct sk_buff *skb,
440 int offset,
441 const char *msg);
442
443 void (*timeout_change)(struct ip_vs_proto_data *pd, int flags);
444 };
445
446 /*
447 * protocol data per netns
448 */
449 struct ip_vs_proto_data {
450 struct ip_vs_proto_data *next;
451 struct ip_vs_protocol *pp;
452 int *timeout_table; /* protocol timeout table */
453 atomic_t appcnt; /* counter of proto app incs. */
454 struct tcp_states_t *tcp_state_table;
455 };
456
457 extern struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto);
458 extern struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
459 unsigned short proto);
460
461 struct ip_vs_conn_param {
462 struct net *net;
463 const union nf_inet_addr *caddr;
464 const union nf_inet_addr *vaddr;
465 __be16 cport;
466 __be16 vport;
467 __u16 protocol;
468 u16 af;
469
470 const struct ip_vs_pe *pe;
471 char *pe_data;
472 __u8 pe_data_len;
473 };
474
475 /*
476 * IP_VS structure allocated for each dynamically scheduled connection
477 */
478 struct ip_vs_conn {
479 struct hlist_node c_list; /* hashed list heads */
480 #ifdef CONFIG_NET_NS
481 struct net *net; /* Name space */
482 #endif
483 /* Protocol, addresses and port numbers */
484 u16 af; /* address family */
485 __be16 cport;
486 __be16 vport;
487 __be16 dport;
488 __u32 fwmark; /* Fire wall mark from skb */
489 union nf_inet_addr caddr; /* client address */
490 union nf_inet_addr vaddr; /* virtual address */
491 union nf_inet_addr daddr; /* destination address */
492 volatile __u32 flags; /* status flags */
493 __u16 protocol; /* Which protocol (TCP/UDP) */
494
495 /* counter and timer */
496 atomic_t refcnt; /* reference count */
497 struct timer_list timer; /* Expiration timer */
498 volatile unsigned long timeout; /* timeout */
499
500 /* Flags and state transition */
501 spinlock_t lock; /* lock for state transition */
502 volatile __u16 state; /* state info */
503 volatile __u16 old_state; /* old state, to be used for
504 * state transition triggerd
505 * synchronization
506 */
507 unsigned long sync_endtime; /* jiffies + sent_retries */
508
509 /* Control members */
510 struct ip_vs_conn *control; /* Master control connection */
511 atomic_t n_control; /* Number of controlled ones */
512 struct ip_vs_dest *dest; /* real server */
513 atomic_t in_pkts; /* incoming packet counter */
514
515 /* packet transmitter for different forwarding methods. If it
516 mangles the packet, it must return NF_DROP or better NF_STOLEN,
517 otherwise this must be changed to a sk_buff **.
518 NF_ACCEPT can be returned when destination is local.
519 */
520 int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp,
521 struct ip_vs_protocol *pp);
522
523 /* Note: we can group the following members into a structure,
524 in order to save more space, and the following members are
525 only used in VS/NAT anyway */
526 struct ip_vs_app *app; /* bound ip_vs_app object */
527 void *app_data; /* Application private data */
528 struct ip_vs_seq in_seq; /* incoming seq. struct */
529 struct ip_vs_seq out_seq; /* outgoing seq. struct */
530
531 const struct ip_vs_pe *pe;
532 char *pe_data;
533 __u8 pe_data_len;
534 };
535
536 /*
537 * To save some memory in conn table when name space is disabled.
538 */
539 static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
540 {
541 #ifdef CONFIG_NET_NS
542 return cp->net;
543 #else
544 return &init_net;
545 #endif
546 }
547 static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
548 {
549 #ifdef CONFIG_NET_NS
550 cp->net = net;
551 #endif
552 }
553
554 static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
555 struct net *net)
556 {
557 #ifdef CONFIG_NET_NS
558 return cp->net == net;
559 #else
560 return 1;
561 #endif
562 }
563
564 /*
565 * Extended internal versions of struct ip_vs_service_user and
566 * ip_vs_dest_user for IPv6 support.
567 *
568 * We need these to conveniently pass around service and destination
569 * options, but unfortunately, we also need to keep the old definitions to
570 * maintain userspace backwards compatibility for the setsockopt interface.
571 */
572 struct ip_vs_service_user_kern {
573 /* virtual service addresses */
574 u16 af;
575 u16 protocol;
576 union nf_inet_addr addr; /* virtual ip address */
577 u16 port;
578 u32 fwmark; /* firwall mark of service */
579
580 /* virtual service options */
581 char *sched_name;
582 char *pe_name;
583 unsigned int flags; /* virtual service flags */
584 unsigned int timeout; /* persistent timeout in sec */
585 u32 netmask; /* persistent netmask */
586 };
587
588
589 struct ip_vs_dest_user_kern {
590 /* destination server address */
591 union nf_inet_addr addr;
592 u16 port;
593
594 /* real server options */
595 unsigned int conn_flags; /* connection flags */
596 int weight; /* destination weight */
597
598 /* thresholds for active connections */
599 u32 u_threshold; /* upper threshold */
600 u32 l_threshold; /* lower threshold */
601 };
602
603
604 /*
605 * The information about the virtual service offered to the net
606 * and the forwarding entries
607 */
608 struct ip_vs_service {
609 struct list_head s_list; /* for normal service table */
610 struct list_head f_list; /* for fwmark-based service table */
611 atomic_t refcnt; /* reference counter */
612 atomic_t usecnt; /* use counter */
613
614 u16 af; /* address family */
615 __u16 protocol; /* which protocol (TCP/UDP) */
616 union nf_inet_addr addr; /* IP address for virtual service */
617 __be16 port; /* port number for the service */
618 __u32 fwmark; /* firewall mark of the service */
619 unsigned int flags; /* service status flags */
620 unsigned int timeout; /* persistent timeout in ticks */
621 __be32 netmask; /* grouping granularity */
622 struct net *net;
623
624 struct list_head destinations; /* real server d-linked list */
625 __u32 num_dests; /* number of servers */
626 struct ip_vs_stats stats; /* statistics for the service */
627 struct ip_vs_app *inc; /* bind conns to this app inc */
628
629 /* for scheduling */
630 struct ip_vs_scheduler *scheduler; /* bound scheduler object */
631 rwlock_t sched_lock; /* lock sched_data */
632 void *sched_data; /* scheduler application data */
633
634 /* alternate persistence engine */
635 struct ip_vs_pe *pe;
636 };
637
638
639 /*
640 * The real server destination forwarding entry
641 * with ip address, port number, and so on.
642 */
643 struct ip_vs_dest {
644 struct list_head n_list; /* for the dests in the service */
645 struct list_head d_list; /* for table with all the dests */
646
647 u16 af; /* address family */
648 __be16 port; /* port number of the server */
649 union nf_inet_addr addr; /* IP address of the server */
650 volatile unsigned int flags; /* dest status flags */
651 atomic_t conn_flags; /* flags to copy to conn */
652 atomic_t weight; /* server weight */
653
654 atomic_t refcnt; /* reference counter */
655 struct ip_vs_stats stats; /* statistics */
656
657 /* connection counters and thresholds */
658 atomic_t activeconns; /* active connections */
659 atomic_t inactconns; /* inactive connections */
660 atomic_t persistconns; /* persistent connections */
661 __u32 u_threshold; /* upper threshold */
662 __u32 l_threshold; /* lower threshold */
663
664 /* for destination cache */
665 spinlock_t dst_lock; /* lock of dst_cache */
666 struct dst_entry *dst_cache; /* destination cache entry */
667 u32 dst_rtos; /* RT_TOS(tos) for dst */
668 u32 dst_cookie;
669 union nf_inet_addr dst_saddr;
670
671 /* for virtual service */
672 struct ip_vs_service *svc; /* service it belongs to */
673 __u16 protocol; /* which protocol (TCP/UDP) */
674 __be16 vport; /* virtual port number */
675 union nf_inet_addr vaddr; /* virtual IP address */
676 __u32 vfwmark; /* firewall mark of service */
677 };
678
679
680 /*
681 * The scheduler object
682 */
683 struct ip_vs_scheduler {
684 struct list_head n_list; /* d-linked list head */
685 char *name; /* scheduler name */
686 atomic_t refcnt; /* reference counter */
687 struct module *module; /* THIS_MODULE/NULL */
688
689 /* scheduler initializing service */
690 int (*init_service)(struct ip_vs_service *svc);
691 /* scheduling service finish */
692 int (*done_service)(struct ip_vs_service *svc);
693 /* scheduler updating service */
694 int (*update_service)(struct ip_vs_service *svc);
695
696 /* selecting a server from the given service */
697 struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc,
698 const struct sk_buff *skb);
699 };
700
701 /* The persistence engine object */
702 struct ip_vs_pe {
703 struct list_head n_list; /* d-linked list head */
704 char *name; /* scheduler name */
705 atomic_t refcnt; /* reference counter */
706 struct module *module; /* THIS_MODULE/NULL */
707
708 /* get the connection template, if any */
709 int (*fill_param)(struct ip_vs_conn_param *p, struct sk_buff *skb);
710 bool (*ct_match)(const struct ip_vs_conn_param *p,
711 struct ip_vs_conn *ct);
712 u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval,
713 bool inverse);
714 int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf);
715 };
716
717 /*
718 * The application module object (a.k.a. app incarnation)
719 */
720 struct ip_vs_app {
721 struct list_head a_list; /* member in app list */
722 int type; /* IP_VS_APP_TYPE_xxx */
723 char *name; /* application module name */
724 __u16 protocol;
725 struct module *module; /* THIS_MODULE/NULL */
726 struct list_head incs_list; /* list of incarnations */
727
728 /* members for application incarnations */
729 struct list_head p_list; /* member in proto app list */
730 struct ip_vs_app *app; /* its real application */
731 __be16 port; /* port number in net order */
732 atomic_t usecnt; /* usage counter */
733
734 /*
735 * output hook: Process packet in inout direction, diff set for TCP.
736 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
737 * 2=Mangled but checksum was not updated
738 */
739 int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *,
740 struct sk_buff *, int *diff);
741
742 /*
743 * input hook: Process packet in outin direction, diff set for TCP.
744 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
745 * 2=Mangled but checksum was not updated
746 */
747 int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *,
748 struct sk_buff *, int *diff);
749
750 /* ip_vs_app initializer */
751 int (*init_conn)(struct ip_vs_app *, struct ip_vs_conn *);
752
753 /* ip_vs_app finish */
754 int (*done_conn)(struct ip_vs_app *, struct ip_vs_conn *);
755
756
757 /* not used now */
758 int (*bind_conn)(struct ip_vs_app *, struct ip_vs_conn *,
759 struct ip_vs_protocol *);
760
761 void (*unbind_conn)(struct ip_vs_app *, struct ip_vs_conn *);
762
763 int * timeout_table;
764 int * timeouts;
765 int timeouts_size;
766
767 int (*conn_schedule)(struct sk_buff *skb, struct ip_vs_app *app,
768 int *verdict, struct ip_vs_conn **cpp);
769
770 struct ip_vs_conn *
771 (*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app,
772 const struct iphdr *iph, unsigned int proto_off,
773 int inverse);
774
775 struct ip_vs_conn *
776 (*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app,
777 const struct iphdr *iph, unsigned int proto_off,
778 int inverse);
779
780 int (*state_transition)(struct ip_vs_conn *cp, int direction,
781 const struct sk_buff *skb,
782 struct ip_vs_app *app);
783
784 void (*timeout_change)(struct ip_vs_app *app, int flags);
785 };
786
787 struct ipvs_master_sync_state {
788 struct list_head sync_queue;
789 struct ip_vs_sync_buff *sync_buff;
790 int sync_queue_len;
791 unsigned int sync_queue_delay;
792 struct task_struct *master_thread;
793 struct delayed_work master_wakeup_work;
794 struct netns_ipvs *ipvs;
795 };
796
797 /* IPVS in network namespace */
798 struct netns_ipvs {
799 int gen; /* Generation */
800 int enable; /* enable like nf_hooks do */
801 /*
802 * Hash table: for real service lookups
803 */
804 #define IP_VS_RTAB_BITS 4
805 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
806 #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
807
808 struct list_head rs_table[IP_VS_RTAB_SIZE];
809 /* ip_vs_app */
810 struct list_head app_list;
811 /* ip_vs_ftp */
812 struct ip_vs_app *ftp_app;
813 /* ip_vs_proto */
814 #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */
815 struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
816 /* ip_vs_proto_tcp */
817 #ifdef CONFIG_IP_VS_PROTO_TCP
818 #define TCP_APP_TAB_BITS 4
819 #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS)
820 #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1)
821 struct list_head tcp_apps[TCP_APP_TAB_SIZE];
822 spinlock_t tcp_app_lock;
823 #endif
824 /* ip_vs_proto_udp */
825 #ifdef CONFIG_IP_VS_PROTO_UDP
826 #define UDP_APP_TAB_BITS 4
827 #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS)
828 #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1)
829 struct list_head udp_apps[UDP_APP_TAB_SIZE];
830 spinlock_t udp_app_lock;
831 #endif
832 /* ip_vs_proto_sctp */
833 #ifdef CONFIG_IP_VS_PROTO_SCTP
834 #define SCTP_APP_TAB_BITS 4
835 #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
836 #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
837 /* Hash table for SCTP application incarnations */
838 struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
839 spinlock_t sctp_app_lock;
840 #endif
841 /* ip_vs_conn */
842 atomic_t conn_count; /* connection counter */
843
844 /* ip_vs_ctl */
845 struct ip_vs_stats tot_stats; /* Statistics & est. */
846
847 int num_services; /* no of virtual services */
848
849 rwlock_t rs_lock; /* real services table */
850 /* Trash for destinations */
851 struct list_head dest_trash;
852 /* Service counters */
853 atomic_t ftpsvc_counter;
854 atomic_t nullsvc_counter;
855
856 #ifdef CONFIG_SYSCTL
857 /* 1/rate drop and drop-entry variables */
858 struct delayed_work defense_work; /* Work handler */
859 int drop_rate;
860 int drop_counter;
861 atomic_t dropentry;
862 /* locks in ctl.c */
863 spinlock_t dropentry_lock; /* drop entry handling */
864 spinlock_t droppacket_lock; /* drop packet handling */
865 spinlock_t securetcp_lock; /* state and timeout tables */
866
867 /* sys-ctl struct */
868 struct ctl_table_header *sysctl_hdr;
869 struct ctl_table *sysctl_tbl;
870 #endif
871
872 /* sysctl variables */
873 int sysctl_amemthresh;
874 int sysctl_am_droprate;
875 int sysctl_drop_entry;
876 int sysctl_drop_packet;
877 int sysctl_secure_tcp;
878 #ifdef CONFIG_IP_VS_NFCT
879 int sysctl_conntrack;
880 #endif
881 int sysctl_snat_reroute;
882 int sysctl_sync_ver;
883 int sysctl_sync_ports;
884 int sysctl_sync_qlen_max;
885 int sysctl_sync_sock_size;
886 int sysctl_cache_bypass;
887 int sysctl_expire_nodest_conn;
888 int sysctl_expire_quiescent_template;
889 int sysctl_sync_threshold[2];
890 unsigned int sysctl_sync_refresh_period;
891 int sysctl_sync_retries;
892 int sysctl_nat_icmp_send;
893
894 /* ip_vs_lblc */
895 int sysctl_lblc_expiration;
896 struct ctl_table_header *lblc_ctl_header;
897 struct ctl_table *lblc_ctl_table;
898 /* ip_vs_lblcr */
899 int sysctl_lblcr_expiration;
900 struct ctl_table_header *lblcr_ctl_header;
901 struct ctl_table *lblcr_ctl_table;
902 /* ip_vs_est */
903 struct list_head est_list; /* estimator list */
904 spinlock_t est_lock;
905 struct timer_list est_timer; /* Estimation timer */
906 /* ip_vs_sync */
907 spinlock_t sync_lock;
908 struct ipvs_master_sync_state *ms;
909 spinlock_t sync_buff_lock;
910 struct task_struct **backup_threads;
911 int threads_mask;
912 int send_mesg_maxlen;
913 int recv_mesg_maxlen;
914 volatile int sync_state;
915 volatile int master_syncid;
916 volatile int backup_syncid;
917 struct mutex sync_mutex;
918 /* multicast interface name */
919 char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
920 char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
921 /* net name space ptr */
922 struct net *net; /* Needed by timer routines */
923 };
924
925 #define DEFAULT_SYNC_THRESHOLD 3
926 #define DEFAULT_SYNC_PERIOD 50
927 #define DEFAULT_SYNC_VER 1
928 #define DEFAULT_SYNC_REFRESH_PERIOD (0U * HZ)
929 #define DEFAULT_SYNC_RETRIES 0
930 #define IPVS_SYNC_WAKEUP_RATE 8
931 #define IPVS_SYNC_QLEN_MAX (IPVS_SYNC_WAKEUP_RATE * 4)
932 #define IPVS_SYNC_SEND_DELAY (HZ / 50)
933 #define IPVS_SYNC_CHECK_PERIOD HZ
934 #define IPVS_SYNC_FLUSH_TIME (HZ * 2)
935 #define IPVS_SYNC_PORTS_MAX (1 << 6)
936
937 #ifdef CONFIG_SYSCTL
938
939 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
940 {
941 return ipvs->sysctl_sync_threshold[0];
942 }
943
944 static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
945 {
946 return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]);
947 }
948
949 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
950 {
951 return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period);
952 }
953
954 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
955 {
956 return ipvs->sysctl_sync_retries;
957 }
958
959 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
960 {
961 return ipvs->sysctl_sync_ver;
962 }
963
964 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
965 {
966 return ACCESS_ONCE(ipvs->sysctl_sync_ports);
967 }
968
969 static inline int sysctl_sync_qlen_max(struct netns_ipvs *ipvs)
970 {
971 return ipvs->sysctl_sync_qlen_max;
972 }
973
974 static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
975 {
976 return ipvs->sysctl_sync_sock_size;
977 }
978
979 #else
980
981 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
982 {
983 return DEFAULT_SYNC_THRESHOLD;
984 }
985
986 static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
987 {
988 return DEFAULT_SYNC_PERIOD;
989 }
990
991 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
992 {
993 return DEFAULT_SYNC_REFRESH_PERIOD;
994 }
995
996 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
997 {
998 return DEFAULT_SYNC_RETRIES & 3;
999 }
1000
1001 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
1002 {
1003 return DEFAULT_SYNC_VER;
1004 }
1005
1006 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
1007 {
1008 return 1;
1009 }
1010
1011 static inline int sysctl_sync_qlen_max(struct netns_ipvs *ipvs)
1012 {
1013 return IPVS_SYNC_QLEN_MAX;
1014 }
1015
1016 static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
1017 {
1018 return 0;
1019 }
1020
1021 #endif
1022
1023 /*
1024 * IPVS core functions
1025 * (from ip_vs_core.c)
1026 */
1027 extern const char *ip_vs_proto_name(unsigned int proto);
1028 extern void ip_vs_init_hash_table(struct list_head *table, int rows);
1029 #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
1030
1031 #define IP_VS_APP_TYPE_FTP 1
1032
1033 /*
1034 * ip_vs_conn handling functions
1035 * (from ip_vs_conn.c)
1036 */
1037
1038 enum {
1039 IP_VS_DIR_INPUT = 0,
1040 IP_VS_DIR_OUTPUT,
1041 IP_VS_DIR_INPUT_ONLY,
1042 IP_VS_DIR_LAST,
1043 };
1044
1045 static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol,
1046 const union nf_inet_addr *caddr,
1047 __be16 cport,
1048 const union nf_inet_addr *vaddr,
1049 __be16 vport,
1050 struct ip_vs_conn_param *p)
1051 {
1052 p->net = net;
1053 p->af = af;
1054 p->protocol = protocol;
1055 p->caddr = caddr;
1056 p->cport = cport;
1057 p->vaddr = vaddr;
1058 p->vport = vport;
1059 p->pe = NULL;
1060 p->pe_data = NULL;
1061 }
1062
1063 struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p);
1064 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p);
1065
1066 struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
1067 const struct ip_vs_iphdr *iph,
1068 unsigned int proto_off,
1069 int inverse);
1070
1071 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
1072
1073 struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
1074 const struct ip_vs_iphdr *iph,
1075 unsigned int proto_off,
1076 int inverse);
1077
1078 /* put back the conn without restarting its timer */
1079 static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
1080 {
1081 atomic_dec(&cp->refcnt);
1082 }
1083 extern void ip_vs_conn_put(struct ip_vs_conn *cp);
1084 extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
1085
1086 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
1087 const union nf_inet_addr *daddr,
1088 __be16 dport, unsigned int flags,
1089 struct ip_vs_dest *dest, __u32 fwmark);
1090 extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
1091
1092 extern const char * ip_vs_state_name(__u16 proto, int state);
1093
1094 extern void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
1095 extern int ip_vs_check_template(struct ip_vs_conn *ct);
1096 extern void ip_vs_random_dropentry(struct net *net);
1097 extern int ip_vs_conn_init(void);
1098 extern void ip_vs_conn_cleanup(void);
1099
1100 static inline void ip_vs_control_del(struct ip_vs_conn *cp)
1101 {
1102 struct ip_vs_conn *ctl_cp = cp->control;
1103 if (!ctl_cp) {
1104 IP_VS_ERR_BUF("request control DEL for uncontrolled: "
1105 "%s:%d to %s:%d\n",
1106 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1107 ntohs(cp->cport),
1108 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
1109 ntohs(cp->vport));
1110
1111 return;
1112 }
1113
1114 IP_VS_DBG_BUF(7, "DELeting control for: "
1115 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n",
1116 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1117 ntohs(cp->cport),
1118 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr),
1119 ntohs(ctl_cp->cport));
1120
1121 cp->control = NULL;
1122 if (atomic_read(&ctl_cp->n_control) == 0) {
1123 IP_VS_ERR_BUF("BUG control DEL with n=0 : "
1124 "%s:%d to %s:%d\n",
1125 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1126 ntohs(cp->cport),
1127 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
1128 ntohs(cp->vport));
1129
1130 return;
1131 }
1132 atomic_dec(&ctl_cp->n_control);
1133 }
1134
1135 static inline void
1136 ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
1137 {
1138 if (cp->control) {
1139 IP_VS_ERR_BUF("request control ADD for already controlled: "
1140 "%s:%d to %s:%d\n",
1141 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1142 ntohs(cp->cport),
1143 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
1144 ntohs(cp->vport));
1145
1146 ip_vs_control_del(cp);
1147 }
1148
1149 IP_VS_DBG_BUF(7, "ADDing control for: "
1150 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n",
1151 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1152 ntohs(cp->cport),
1153 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr),
1154 ntohs(ctl_cp->cport));
1155
1156 cp->control = ctl_cp;
1157 atomic_inc(&ctl_cp->n_control);
1158 }
1159
1160 /*
1161 * IPVS netns init & cleanup functions
1162 */
1163 extern int ip_vs_estimator_net_init(struct net *net);
1164 extern int ip_vs_control_net_init(struct net *net);
1165 extern int ip_vs_protocol_net_init(struct net *net);
1166 extern int ip_vs_app_net_init(struct net *net);
1167 extern int ip_vs_conn_net_init(struct net *net);
1168 extern int ip_vs_sync_net_init(struct net *net);
1169 extern void ip_vs_conn_net_cleanup(struct net *net);
1170 extern void ip_vs_app_net_cleanup(struct net *net);
1171 extern void ip_vs_protocol_net_cleanup(struct net *net);
1172 extern void ip_vs_control_net_cleanup(struct net *net);
1173 extern void ip_vs_estimator_net_cleanup(struct net *net);
1174 extern void ip_vs_sync_net_cleanup(struct net *net);
1175 extern void ip_vs_service_net_cleanup(struct net *net);
1176
1177 /*
1178 * IPVS application functions
1179 * (from ip_vs_app.c)
1180 */
1181 #define IP_VS_APP_MAX_PORTS 8
1182 extern int register_ip_vs_app(struct net *net, struct ip_vs_app *app);
1183 extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
1184 extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1185 extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
1186 extern int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app,
1187 __u16 proto, __u16 port);
1188 extern int ip_vs_app_inc_get(struct ip_vs_app *inc);
1189 extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
1190
1191 extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
1192 extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
1193
1194 void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe);
1195 void ip_vs_unbind_pe(struct ip_vs_service *svc);
1196 int register_ip_vs_pe(struct ip_vs_pe *pe);
1197 int unregister_ip_vs_pe(struct ip_vs_pe *pe);
1198 struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
1199 struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
1200
1201 /*
1202 * Use a #define to avoid all of module.h just for these trivial ops
1203 */
1204 #define ip_vs_pe_get(pe) \
1205 if (pe && pe->module) \
1206 __module_get(pe->module);
1207
1208 #define ip_vs_pe_put(pe) \
1209 if (pe && pe->module) \
1210 module_put(pe->module);
1211
1212 /*
1213 * IPVS protocol functions (from ip_vs_proto.c)
1214 */
1215 extern int ip_vs_protocol_init(void);
1216 extern void ip_vs_protocol_cleanup(void);
1217 extern void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
1218 extern int *ip_vs_create_timeout_table(int *table, int size);
1219 extern int
1220 ip_vs_set_state_timeout(int *table, int num, const char *const *names,
1221 const char *name, int to);
1222 extern void
1223 ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
1224 const struct sk_buff *skb,
1225 int offset, const char *msg);
1226
1227 extern struct ip_vs_protocol ip_vs_protocol_tcp;
1228 extern struct ip_vs_protocol ip_vs_protocol_udp;
1229 extern struct ip_vs_protocol ip_vs_protocol_icmp;
1230 extern struct ip_vs_protocol ip_vs_protocol_esp;
1231 extern struct ip_vs_protocol ip_vs_protocol_ah;
1232 extern struct ip_vs_protocol ip_vs_protocol_sctp;
1233
1234 /*
1235 * Registering/unregistering scheduler functions
1236 * (from ip_vs_sched.c)
1237 */
1238 extern int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
1239 extern int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
1240 extern int ip_vs_bind_scheduler(struct ip_vs_service *svc,
1241 struct ip_vs_scheduler *scheduler);
1242 extern int ip_vs_unbind_scheduler(struct ip_vs_service *svc);
1243 extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
1244 extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
1245 extern struct ip_vs_conn *
1246 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
1247 struct ip_vs_proto_data *pd, int *ignored);
1248 extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
1249 struct ip_vs_proto_data *pd);
1250
1251 extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
1252
1253
1254 /*
1255 * IPVS control data and functions (from ip_vs_ctl.c)
1256 */
1257 extern struct ip_vs_stats ip_vs_stats;
1258 extern int sysctl_ip_vs_sync_ver;
1259
1260 extern struct ip_vs_service *
1261 ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
1262 const union nf_inet_addr *vaddr, __be16 vport);
1263
1264 static inline void ip_vs_service_put(struct ip_vs_service *svc)
1265 {
1266 atomic_dec(&svc->usecnt);
1267 }
1268
1269 extern struct ip_vs_dest *
1270 ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
1271 const union nf_inet_addr *daddr, __be16 dport);
1272
1273 extern int ip_vs_use_count_inc(void);
1274 extern void ip_vs_use_count_dec(void);
1275 extern int ip_vs_register_nl_ioctl(void);
1276 extern void ip_vs_unregister_nl_ioctl(void);
1277 extern int ip_vs_control_init(void);
1278 extern void ip_vs_control_cleanup(void);
1279 extern struct ip_vs_dest *
1280 ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
1281 __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
1282 __u16 protocol, __u32 fwmark, __u32 flags);
1283 extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
1284
1285
1286 /*
1287 * IPVS sync daemon data and function prototypes
1288 * (from ip_vs_sync.c)
1289 */
1290 extern int start_sync_thread(struct net *net, int state, char *mcast_ifn,
1291 __u8 syncid);
1292 extern int stop_sync_thread(struct net *net, int state);
1293 extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
1294
1295
1296 /*
1297 * IPVS rate estimator prototypes (from ip_vs_est.c)
1298 */
1299 extern void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
1300 extern void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
1301 extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
1302 extern void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
1303 struct ip_vs_stats *stats);
1304
1305 /*
1306 * Various IPVS packet transmitters (from ip_vs_xmit.c)
1307 */
1308 extern int ip_vs_null_xmit
1309 (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1310 extern int ip_vs_bypass_xmit
1311 (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1312 extern int ip_vs_nat_xmit
1313 (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1314 extern int ip_vs_tunnel_xmit
1315 (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1316 extern int ip_vs_dr_xmit
1317 (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1318 extern int ip_vs_icmp_xmit
1319 (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp,
1320 int offset, unsigned int hooknum);
1321 extern void ip_vs_dst_reset(struct ip_vs_dest *dest);
1322
1323 #ifdef CONFIG_IP_VS_IPV6
1324 extern int ip_vs_bypass_xmit_v6
1325 (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1326 extern int ip_vs_nat_xmit_v6
1327 (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1328 extern int ip_vs_tunnel_xmit_v6
1329 (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1330 extern int ip_vs_dr_xmit_v6
1331 (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1332 extern int ip_vs_icmp_xmit_v6
1333 (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp,
1334 int offset, unsigned int hooknum);
1335 #endif
1336
1337 #ifdef CONFIG_SYSCTL
1338 /*
1339 * This is a simple mechanism to ignore packets when
1340 * we are loaded. Just set ip_vs_drop_rate to 'n' and
1341 * we start to drop 1/rate of the packets
1342 */
1343
1344 static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
1345 {
1346 if (!ipvs->drop_rate)
1347 return 0;
1348 if (--ipvs->drop_counter > 0)
1349 return 0;
1350 ipvs->drop_counter = ipvs->drop_rate;
1351 return 1;
1352 }
1353 #else
1354 static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; }
1355 #endif
1356
1357 /*
1358 * ip_vs_fwd_tag returns the forwarding tag of the connection
1359 */
1360 #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK)
1361
1362 static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
1363 {
1364 char fwd;
1365
1366 switch (IP_VS_FWD_METHOD(cp)) {
1367 case IP_VS_CONN_F_MASQ:
1368 fwd = 'M'; break;
1369 case IP_VS_CONN_F_LOCALNODE:
1370 fwd = 'L'; break;
1371 case IP_VS_CONN_F_TUNNEL:
1372 fwd = 'T'; break;
1373 case IP_VS_CONN_F_DROUTE:
1374 fwd = 'R'; break;
1375 case IP_VS_CONN_F_BYPASS:
1376 fwd = 'B'; break;
1377 default:
1378 fwd = '?'; break;
1379 }
1380 return fwd;
1381 }
1382
1383 extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
1384 struct ip_vs_conn *cp, int dir);
1385
1386 #ifdef CONFIG_IP_VS_IPV6
1387 extern void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
1388 struct ip_vs_conn *cp, int dir);
1389 #endif
1390
1391 extern __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
1392
1393 static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum)
1394 {
1395 __be32 diff[2] = { ~old, new };
1396
1397 return csum_partial(diff, sizeof(diff), oldsum);
1398 }
1399
1400 #ifdef CONFIG_IP_VS_IPV6
1401 static inline __wsum ip_vs_check_diff16(const __be32 *old, const __be32 *new,
1402 __wsum oldsum)
1403 {
1404 __be32 diff[8] = { ~old[3], ~old[2], ~old[1], ~old[0],
1405 new[3], new[2], new[1], new[0] };
1406
1407 return csum_partial(diff, sizeof(diff), oldsum);
1408 }
1409 #endif
1410
1411 static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum)
1412 {
1413 __be16 diff[2] = { ~old, new };
1414
1415 return csum_partial(diff, sizeof(diff), oldsum);
1416 }
1417
1418 /*
1419 * Forget current conntrack (unconfirmed) and attach notrack entry
1420 */
1421 static inline void ip_vs_notrack(struct sk_buff *skb)
1422 {
1423 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1424 enum ip_conntrack_info ctinfo;
1425 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1426
1427 if (!ct || !nf_ct_is_untracked(ct)) {
1428 nf_conntrack_put(skb->nfct);
1429 skb->nfct = &nf_ct_untracked_get()->ct_general;
1430 skb->nfctinfo = IP_CT_NEW;
1431 nf_conntrack_get(skb->nfct);
1432 }
1433 #endif
1434 }
1435
1436 #ifdef CONFIG_IP_VS_NFCT
1437 /*
1438 * Netfilter connection tracking
1439 * (from ip_vs_nfct.c)
1440 */
1441 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
1442 {
1443 #ifdef CONFIG_SYSCTL
1444 return ipvs->sysctl_conntrack;
1445 #else
1446 return 0;
1447 #endif
1448 }
1449
1450 extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
1451 int outin);
1452 extern int ip_vs_confirm_conntrack(struct sk_buff *skb);
1453 extern void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
1454 struct ip_vs_conn *cp, u_int8_t proto,
1455 const __be16 port, int from_rs);
1456 extern void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
1457
1458 #else
1459
1460 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
1461 {
1462 return 0;
1463 }
1464
1465 static inline void ip_vs_update_conntrack(struct sk_buff *skb,
1466 struct ip_vs_conn *cp, int outin)
1467 {
1468 }
1469
1470 static inline int ip_vs_confirm_conntrack(struct sk_buff *skb)
1471 {
1472 return NF_ACCEPT;
1473 }
1474
1475 static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
1476 {
1477 }
1478 /* CONFIG_IP_VS_NFCT */
1479 #endif
1480
1481 static inline unsigned int
1482 ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
1483 {
1484 /*
1485 * We think the overhead of processing active connections is 256
1486 * times higher than that of inactive connections in average. (This
1487 * 256 times might not be accurate, we will change it later) We
1488 * use the following formula to estimate the overhead now:
1489 * dest->activeconns*256 + dest->inactconns
1490 */
1491 return (atomic_read(&dest->activeconns) << 8) +
1492 atomic_read(&dest->inactconns);
1493 }
1494
1495 #endif /* _NET_IP_VS_H */