xen: support pirq_eoi_map
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / lib / checksum.c
1 /*
2 *
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * IP/TCP/UDP checksumming routines
8 *
9 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Tom May, <ftom@netcom.com>
12 * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
13 * Lots of code moved from tcp.c and ip.c; see those files
14 * for more names.
15 *
16 * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
17 * Fixed some nasty bugs, causing some horrible crashes.
18 * A: At some points, the sum (%0) was used as
19 * length-counter instead of the length counter
20 * (%1). Thanks to Roman Hodek for pointing this out.
21 * B: GCC seems to mess up if one uses too many
22 * data-registers to hold input values and one tries to
23 * specify d0 and d1 as scratch registers. Letting gcc
24 * choose these registers itself solves the problem.
25 *
26 * This program is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU General Public License
28 * as published by the Free Software Foundation; either version
29 * 2 of the License, or (at your option) any later version.
30 */
31
32 /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
33 kills, so most of the assembly has to go. */
34
35 #include <linux/module.h>
36 #include <net/checksum.h>
37
38 #include <asm/byteorder.h>
39
40 #ifndef do_csum
41 static inline unsigned short from32to16(unsigned int x)
42 {
43 /* add up 16-bit and 16-bit for 16+c bit */
44 x = (x & 0xffff) + (x >> 16);
45 /* add up carry.. */
46 x = (x & 0xffff) + (x >> 16);
47 return x;
48 }
49
50 static unsigned int do_csum(const unsigned char *buff, int len)
51 {
52 int odd;
53 unsigned int result = 0;
54
55 if (len <= 0)
56 goto out;
57 odd = 1 & (unsigned long) buff;
58 if (odd) {
59 #ifdef __LITTLE_ENDIAN
60 result += (*buff << 8);
61 #else
62 result = *buff;
63 #endif
64 len--;
65 buff++;
66 }
67 if (len >= 2) {
68 if (2 & (unsigned long) buff) {
69 result += *(unsigned short *) buff;
70 len -= 2;
71 buff += 2;
72 }
73 if (len >= 4) {
74 const unsigned char *end = buff + ((unsigned)len & ~3);
75 unsigned int carry = 0;
76 do {
77 unsigned int w = *(unsigned int *) buff;
78 buff += 4;
79 result += carry;
80 result += w;
81 carry = (w > result);
82 } while (buff < end);
83 result += carry;
84 result = (result & 0xffff) + (result >> 16);
85 }
86 if (len & 2) {
87 result += *(unsigned short *) buff;
88 buff += 2;
89 }
90 }
91 if (len & 1)
92 #ifdef __LITTLE_ENDIAN
93 result += *buff;
94 #else
95 result += (*buff << 8);
96 #endif
97 result = from32to16(result);
98 if (odd)
99 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
100 out:
101 return result;
102 }
103 #endif
104
105 /*
106 * This is a version of ip_compute_csum() optimized for IP headers,
107 * which always checksum on 4 octet boundaries.
108 */
109 __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
110 {
111 return (__force __sum16)~do_csum(iph, ihl*4);
112 }
113 EXPORT_SYMBOL(ip_fast_csum);
114
115 /*
116 * computes the checksum of a memory block at buff, length len,
117 * and adds in "sum" (32-bit)
118 *
119 * returns a 32-bit number suitable for feeding into itself
120 * or csum_tcpudp_magic
121 *
122 * this function must be called with even lengths, except
123 * for the last fragment, which may be odd
124 *
125 * it's best to have buff aligned on a 32-bit boundary
126 */
127 __wsum csum_partial(const void *buff, int len, __wsum wsum)
128 {
129 unsigned int sum = (__force unsigned int)wsum;
130 unsigned int result = do_csum(buff, len);
131
132 /* add in old sum, and carry.. */
133 result += sum;
134 if (sum > result)
135 result += 1;
136 return (__force __wsum)result;
137 }
138 EXPORT_SYMBOL(csum_partial);
139
140 /*
141 * this routine is used for miscellaneous IP-like checksums, mainly
142 * in icmp.c
143 */
144 __sum16 ip_compute_csum(const void *buff, int len)
145 {
146 return (__force __sum16)~do_csum(buff, len);
147 }
148 EXPORT_SYMBOL(ip_compute_csum);
149
150 /*
151 * copy from fs while checksumming, otherwise like csum_partial
152 */
153 __wsum
154 csum_partial_copy_from_user(const void __user *src, void *dst, int len,
155 __wsum sum, int *csum_err)
156 {
157 int missing;
158
159 missing = __copy_from_user(dst, src, len);
160 if (missing) {
161 memset(dst + len - missing, 0, missing);
162 *csum_err = -EFAULT;
163 } else
164 *csum_err = 0;
165
166 return csum_partial(dst, len, sum);
167 }
168 EXPORT_SYMBOL(csum_partial_copy_from_user);
169
170 /*
171 * copy from ds while checksumming, otherwise like csum_partial
172 */
173 __wsum
174 csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
175 {
176 memcpy(dst, src, len);
177 return csum_partial(dst, len, sum);
178 }
179 EXPORT_SYMBOL(csum_partial_copy);
180
181 #ifndef csum_tcpudp_nofold
182 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
183 unsigned short len,
184 unsigned short proto,
185 __wsum sum)
186 {
187 unsigned long long s = (__force u32)sum;
188
189 s += (__force u32)saddr;
190 s += (__force u32)daddr;
191 #ifdef __BIG_ENDIAN
192 s += proto + len;
193 #else
194 s += (proto + len) << 8;
195 #endif
196 s += (s >> 32);
197 return (__force __wsum)s;
198 }
199 EXPORT_SYMBOL(csum_tcpudp_nofold);
200 #endif