[IEEE80211] ipw2200: Simplify multicast checks.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / tcp_westwood.c
1 /*
2 * TCP Westwood+
3 *
4 * Angelo Dell'Aera: TCP Westwood+ support
5 */
6
7 #include <linux/config.h>
8 #include <linux/mm.h>
9 #include <linux/module.h>
10 #include <linux/skbuff.h>
11 #include <linux/inet_diag.h>
12 #include <net/tcp.h>
13
14 /* TCP Westwood structure */
15 struct westwood {
16 u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */
17 u32 bw_est; /* bandwidth estimate */
18 u32 rtt_win_sx; /* here starts a new evaluation... */
19 u32 bk;
20 u32 snd_una; /* used for evaluating the number of acked bytes */
21 u32 cumul_ack;
22 u32 accounted;
23 u32 rtt;
24 u32 rtt_min; /* minimum observed RTT */
25 };
26
27
28 /* TCP Westwood functions and constants */
29 #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
30 #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
31
32 /*
33 * @tcp_westwood_create
34 * This function initializes fields used in TCP Westwood+,
35 * it is called after the initial SYN, so the sequence numbers
36 * are correct but new passive connections we have no
37 * information about RTTmin at this time so we simply set it to
38 * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
39 * since in this way we're sure it will be updated in a consistent
40 * way as soon as possible. It will reasonably happen within the first
41 * RTT period of the connection lifetime.
42 */
43 static void tcp_westwood_init(struct sock *sk)
44 {
45 struct westwood *w = inet_csk_ca(sk);
46
47 w->bk = 0;
48 w->bw_ns_est = 0;
49 w->bw_est = 0;
50 w->accounted = 0;
51 w->cumul_ack = 0;
52 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
53 w->rtt_win_sx = tcp_time_stamp;
54 w->snd_una = tcp_sk(sk)->snd_una;
55 }
56
57 /*
58 * @westwood_do_filter
59 * Low-pass filter. Implemented using constant coefficients.
60 */
61 static inline u32 westwood_do_filter(u32 a, u32 b)
62 {
63 return (((7 * a) + b) >> 3);
64 }
65
66 static inline void westwood_filter(struct westwood *w, u32 delta)
67 {
68 w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
69 w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
70 }
71
72 /*
73 * @westwood_pkts_acked
74 * Called after processing group of packets.
75 * but all westwood needs is the last sample of srtt.
76 */
77 static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt)
78 {
79 struct westwood *w = inet_csk_ca(sk);
80 if (cnt > 0)
81 w->rtt = tcp_sk(sk)->srtt >> 3;
82 }
83
84 /*
85 * @westwood_update_window
86 * It updates RTT evaluation window if it is the right moment to do
87 * it. If so it calls filter for evaluating bandwidth.
88 */
89 static void westwood_update_window(struct sock *sk)
90 {
91 struct westwood *w = inet_csk_ca(sk);
92 s32 delta = tcp_time_stamp - w->rtt_win_sx;
93
94 /*
95 * See if a RTT-window has passed.
96 * Be careful since if RTT is less than
97 * 50ms we don't filter but we continue 'building the sample'.
98 * This minimum limit was chosen since an estimation on small
99 * time intervals is better to avoid...
100 * Obviously on a LAN we reasonably will always have
101 * right_bound = left_bound + WESTWOOD_RTT_MIN
102 */
103 if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
104 westwood_filter(w, delta);
105
106 w->bk = 0;
107 w->rtt_win_sx = tcp_time_stamp;
108 }
109 }
110
111 /*
112 * @westwood_fast_bw
113 * It is called when we are in fast path. In particular it is called when
114 * header prediction is successful. In such case in fact update is
115 * straight forward and doesn't need any particular care.
116 */
117 static inline void westwood_fast_bw(struct sock *sk)
118 {
119 const struct tcp_sock *tp = tcp_sk(sk);
120 struct westwood *w = inet_csk_ca(sk);
121
122 westwood_update_window(sk);
123
124 w->bk += tp->snd_una - w->snd_una;
125 w->snd_una = tp->snd_una;
126 w->rtt_min = min(w->rtt, w->rtt_min);
127 }
128
129 /*
130 * @westwood_acked_count
131 * This function evaluates cumul_ack for evaluating bk in case of
132 * delayed or partial acks.
133 */
134 static inline u32 westwood_acked_count(struct sock *sk)
135 {
136 const struct tcp_sock *tp = tcp_sk(sk);
137 struct westwood *w = inet_csk_ca(sk);
138
139 w->cumul_ack = tp->snd_una - w->snd_una;
140
141 /* If cumul_ack is 0 this is a dupack since it's not moving
142 * tp->snd_una.
143 */
144 if (!w->cumul_ack) {
145 w->accounted += tp->mss_cache;
146 w->cumul_ack = tp->mss_cache;
147 }
148
149 if (w->cumul_ack > tp->mss_cache) {
150 /* Partial or delayed ack */
151 if (w->accounted >= w->cumul_ack) {
152 w->accounted -= w->cumul_ack;
153 w->cumul_ack = tp->mss_cache;
154 } else {
155 w->cumul_ack -= w->accounted;
156 w->accounted = 0;
157 }
158 }
159
160 w->snd_una = tp->snd_una;
161
162 return w->cumul_ack;
163 }
164
165 static inline u32 westwood_bw_rttmin(const struct sock *sk)
166 {
167 const struct tcp_sock *tp = tcp_sk(sk);
168 const struct westwood *w = inet_csk_ca(sk);
169 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
170 }
171
172 /*
173 * TCP Westwood
174 * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
175 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
176 * so avoids ever returning 0.
177 */
178 static u32 tcp_westwood_cwnd_min(struct sock *sk)
179 {
180 return westwood_bw_rttmin(sk);
181 }
182
183 static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
184 {
185 struct tcp_sock *tp = tcp_sk(sk);
186 struct westwood *w = inet_csk_ca(sk);
187
188 switch(event) {
189 case CA_EVENT_FAST_ACK:
190 westwood_fast_bw(sk);
191 break;
192
193 case CA_EVENT_COMPLETE_CWR:
194 tp->snd_cwnd = tp->snd_ssthresh = westwood_bw_rttmin(sk);
195 break;
196
197 case CA_EVENT_FRTO:
198 tp->snd_ssthresh = westwood_bw_rttmin(sk);
199 break;
200
201 case CA_EVENT_SLOW_ACK:
202 westwood_update_window(sk);
203 w->bk += westwood_acked_count(sk);
204 w->rtt_min = min(w->rtt, w->rtt_min);
205 break;
206
207 default:
208 /* don't care */
209 break;
210 }
211 }
212
213
214 /* Extract info for Tcp socket info provided via netlink. */
215 static void tcp_westwood_info(struct sock *sk, u32 ext,
216 struct sk_buff *skb)
217 {
218 const struct westwood *ca = inet_csk_ca(sk);
219 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
220 struct rtattr *rta;
221 struct tcpvegas_info *info;
222
223 rta = __RTA_PUT(skb, INET_DIAG_VEGASINFO, sizeof(*info));
224 info = RTA_DATA(rta);
225 info->tcpv_enabled = 1;
226 info->tcpv_rttcnt = 0;
227 info->tcpv_rtt = jiffies_to_usecs(ca->rtt);
228 info->tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
229 rtattr_failure: ;
230 }
231 }
232
233
234 static struct tcp_congestion_ops tcp_westwood = {
235 .init = tcp_westwood_init,
236 .ssthresh = tcp_reno_ssthresh,
237 .cong_avoid = tcp_reno_cong_avoid,
238 .min_cwnd = tcp_westwood_cwnd_min,
239 .cwnd_event = tcp_westwood_event,
240 .get_info = tcp_westwood_info,
241 .pkts_acked = tcp_westwood_pkts_acked,
242
243 .owner = THIS_MODULE,
244 .name = "westwood"
245 };
246
247 static int __init tcp_westwood_register(void)
248 {
249 BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
250 return tcp_register_congestion_control(&tcp_westwood);
251 }
252
253 static void __exit tcp_westwood_unregister(void)
254 {
255 tcp_unregister_congestion_control(&tcp_westwood);
256 }
257
258 module_init(tcp_westwood_register);
259 module_exit(tcp_westwood_unregister);
260
261 MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
262 MODULE_LICENSE("GPL");
263 MODULE_DESCRIPTION("TCP Westwood+");