[NET]: XTENSA checksum annotations and cleanups.
authorAl Viro <viro@zeniv.linux.org.uk>
Wed, 15 Nov 2006 05:23:40 +0000 (21:23 -0800)
committerDavid S. Miller <davem@sunset.davemloft.net>
Sun, 3 Dec 2006 05:23:24 +0000 (21:23 -0800)
* sanitize prototypes, annotate
* kill csum_partial_copy_fromuser
* kill csum_partial_copy
* kill useless shifts
* usual ntohs->shift

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/asm-xtensa/checksum.h

index 03114f8d1e18e436a4677514158cfab9e89a7564..5435aff9a4b7c807d955540e7958260d8e3e0262 100644 (file)
@@ -26,7 +26,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * the same as csum_partial, but copies from src while it
@@ -36,7 +36,7 @@ asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsign
  * better 64-bit) boundary
  */
 
-asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, int len, int sum,
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum,
                                                   int *src_err_ptr, int *dst_err_ptr);
 
 /*
@@ -46,34 +46,25 @@ asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, i
  *     If you use these functions directly please don't forget the access_ok().
  */
 static inline
-unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
-                                       int len, int sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+                                       int len, __wsum sum)
 {
-       return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
+       return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
 }
 
 static inline
-unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
-                                               int len, int sum, int *err_ptr)
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+                                               int len, __wsum sum, int *err_ptr)
 {
-       return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL);
+       return csum_partial_copy_generic((__force const void *)src, dst,
+                                       len, sum, err_ptr, NULL);
 }
 
-/*
- * These are the old (and unsafe) way of doing checksums, a warning message will be
- * printed if they are used and an exeption occurs.
- *
- * these functions should go away after some time.
- */
-
-#define csum_partial_copy_fromuser csum_partial_copy
-unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
-
 /*
  *     Fold a partial checksum
  */
 
-static __inline__ unsigned int csum_fold(unsigned int sum)
+static __inline__ __sum16 csum_fold(__wsum sum)
 {
        unsigned int __dummy;
        __asm__("extui  %1, %0, 16, 16\n\t"
@@ -87,14 +78,14 @@ static __inline__ unsigned int csum_fold(unsigned int sum)
                "extui  %0, %0, 0, 16\n\t"
                : "=r" (sum), "=&r" (__dummy)
                : "0" (sum));
-       return sum;
+       return (__force __sum16)sum;
 }
 
 /*
  *     This is a version of ip_compute_csum() optimized for IP headers,
  *     which always checksum on 4 octet boundaries.
  */
-static __inline__ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
        unsigned int sum, tmp, endaddr;
 
@@ -127,17 +118,16 @@ static __inline__ unsigned short ip_fast_csum(unsigned char * iph, unsigned int
        return  csum_fold(sum);
 }
 
-static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr,
-                                                  unsigned long daddr,
+static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
                                                   unsigned short len,
                                                   unsigned short proto,
-                                                  unsigned int sum)
+                                                  __wsum sum)
 {
 
 #ifdef __XTENSA_EL__
-       unsigned long len_proto = (ntohs(len)<<16)+proto*256;
+       unsigned long len_proto = (len + proto) << 8;
 #elif defined(__XTENSA_EB__)
-       unsigned long len_proto = (proto<<16)+len;
+       unsigned long len_proto = len + proto;
 #else
 # error processor byte order undefined!
 #endif
@@ -162,11 +152,10 @@ static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr,
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static __inline__ unsigned short int csum_tcpudp_magic(unsigned long saddr,
-                                                      unsigned long daddr,
+static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
                                                       unsigned short len,
                                                       unsigned short proto,
-                                                      unsigned int sum)
+                                                      __wsum sum)
 {
        return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -176,17 +165,16 @@ static __inline__ unsigned short int csum_tcpudp_magic(unsigned long saddr,
  * in icmp.c
  */
 
-static __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len)
+static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
 {
     return csum_fold (csum_partial(buff, len, 0));
 }
 
 #define _HAVE_ARCH_IPV6_CSUM
-static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
-                                                    struct in6_addr *daddr,
-                                                    __u32 len,
-                                                    unsigned short proto,
-                                                    unsigned int sum)
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+                                         const struct in6_addr *daddr,
+                                         __u32 len, unsigned short proto,
+                                         __wsum sum)
 {
        unsigned int __dummy;
        __asm__("l32i   %1, %2, 0\n\t"
@@ -248,8 +236,8 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
  *     Copy and checksum to user
  */
 #define HAVE_CSUM_COPY_USER
-static __inline__ unsigned int csum_and_copy_to_user (const char *src, char *dst,
-                                   int len, int sum, int *err_ptr)
+static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst,
+                                   int len, __wsum sum, int *err_ptr)
 {
        if (access_ok(VERIFY_WRITE, dst, len))
                return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr);
@@ -257,6 +245,6 @@ static __inline__ unsigned int csum_and_copy_to_user (const char *src, char *dst
        if (len)
                *err_ptr = -EFAULT;
 
-       return -1; /* invalid checksum */
+       return (__force __wsum)-1; /* invalid checksum */
 }
 #endif