Linux-2.6.12-rc2
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / include / asm-arm / checksum.h
1 /*
2 * linux/include/asm-arm/checksum.h
3 *
4 * IP checksum routines
5 *
6 * Copyright (C) Original authors of ../asm-i386/checksum.h
7 * Copyright (C) 1996-1999 Russell King
8 */
9 #ifndef __ASM_ARM_CHECKSUM_H
10 #define __ASM_ARM_CHECKSUM_H
11
12 #include <linux/in6.h>
13
14 /*
15 * computes the checksum of a memory block at buff, length len,
16 * and adds in "sum" (32-bit)
17 *
18 * returns a 32-bit number suitable for feeding into itself
19 * or csum_tcpudp_magic
20 *
21 * this function must be called with even lengths, except
22 * for the last fragment, which may be odd
23 *
24 * it's best to have buff aligned on a 32-bit boundary
25 */
26 unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
27
28 /*
29 * the same as csum_partial, but copies from src while it
30 * checksums, and handles user-space pointer exceptions correctly, when needed.
31 *
32 * here even more important to align src and dst on a 32-bit (or even
33 * better 64-bit) boundary
34 */
35
36 unsigned int
37 csum_partial_copy_nocheck(const char *src, char *dst, int len, int sum);
38
39 unsigned int
40 csum_partial_copy_from_user(const char __user *src, char *dst, int len, int sum, int *err_ptr);
41
42 /*
43 * This is the old (and unsafe) way of doing checksums, a warning message will
44 * be printed if it is used and an exception occurs.
45 *
46 * this functions should go away after some time.
47 */
48 #define csum_partial_copy(src,dst,len,sum) csum_partial_copy_nocheck(src,dst,len,sum)
49
50 /*
51 * This is a version of ip_compute_csum() optimized for IP headers,
52 * which always checksum on 4 octet boundaries.
53 */
54 static inline unsigned short
55 ip_fast_csum(unsigned char * iph, unsigned int ihl)
56 {
57 unsigned int sum, tmp1;
58
59 __asm__ __volatile__(
60 "ldr %0, [%1], #4 @ ip_fast_csum \n\
61 ldr %3, [%1], #4 \n\
62 sub %2, %2, #5 \n\
63 adds %0, %0, %3 \n\
64 ldr %3, [%1], #4 \n\
65 adcs %0, %0, %3 \n\
66 ldr %3, [%1], #4 \n\
67 1: adcs %0, %0, %3 \n\
68 ldr %3, [%1], #4 \n\
69 tst %2, #15 @ do this carefully \n\
70 subne %2, %2, #1 @ without destroying \n\
71 bne 1b @ the carry flag \n\
72 adcs %0, %0, %3 \n\
73 adc %0, %0, #0 \n\
74 adds %0, %0, %0, lsl #16 \n\
75 addcs %0, %0, #0x10000 \n\
76 mvn %0, %0 \n\
77 mov %0, %0, lsr #16"
78 : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
79 : "1" (iph), "2" (ihl)
80 : "cc");
81 return sum;
82 }
83
84 /*
85 * Fold a partial checksum without adding pseudo headers
86 */
87 static inline unsigned int
88 csum_fold(unsigned int sum)
89 {
90 __asm__(
91 "adds %0, %1, %1, lsl #16 @ csum_fold \n\
92 addcs %0, %0, #0x10000"
93 : "=r" (sum)
94 : "r" (sum)
95 : "cc");
96 return (~sum) >> 16;
97 }
98
99 static inline unsigned int
100 csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
101 unsigned int proto, unsigned int sum)
102 {
103 __asm__(
104 "adds %0, %1, %2 @ csum_tcpudp_nofold \n\
105 adcs %0, %0, %3 \n\
106 adcs %0, %0, %4 \n\
107 adcs %0, %0, %5 \n\
108 adc %0, %0, #0"
109 : "=&r"(sum)
110 : "r" (sum), "r" (daddr), "r" (saddr), "r" (ntohs(len)), "Ir" (ntohs(proto))
111 : "cc");
112 return sum;
113 }
114 /*
115 * computes the checksum of the TCP/UDP pseudo-header
116 * returns a 16-bit checksum, already complemented
117 */
118 static inline unsigned short int
119 csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
120 unsigned int proto, unsigned int sum)
121 {
122 __asm__(
123 "adds %0, %1, %2 @ csum_tcpudp_magic \n\
124 adcs %0, %0, %3 \n\
125 adcs %0, %0, %4 \n\
126 adcs %0, %0, %5 \n\
127 adc %0, %0, #0 \n\
128 adds %0, %0, %0, lsl #16 \n\
129 addcs %0, %0, #0x10000 \n\
130 mvn %0, %0"
131 : "=&r"(sum)
132 : "r" (sum), "r" (daddr), "r" (saddr), "r" (ntohs(len)), "Ir" (ntohs(proto))
133 : "cc");
134 return sum >> 16;
135 }
136
137
138 /*
139 * this routine is used for miscellaneous IP-like checksums, mainly
140 * in icmp.c
141 */
142 static inline unsigned short
143 ip_compute_csum(unsigned char * buff, int len)
144 {
145 return csum_fold(csum_partial(buff, len, 0));
146 }
147
148 #define _HAVE_ARCH_IPV6_CSUM
149 extern unsigned long
150 __csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len,
151 __u32 proto, unsigned int sum);
152
153 static inline unsigned short int
154 csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len,
155 unsigned short proto, unsigned int sum)
156 {
157 return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
158 htonl(proto), sum));
159 }
160 #endif