From 1a203cb33a7dc791b6c0aedf701e70ac00c50cdb Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 10 Jul 2012 19:05:57 +0000 Subject: [PATCH] ipv6: optimize ipv6 addresses compares On 64 bit arches having efficient unaligned accesses (eg x86_64) we can use long words to reduce number of instructions for free. Joe Perches suggested to change ipv6_masked_addr_cmp() to return a bool instead of 'int', to make sure ipv6_masked_addr_cmp() cannot be used in a sorting function. Signed-off-by: Eric Dumazet Cc: Joe Perches Signed-off-by: David S. Miller --- include/net/ipv6.h | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index aecf88436ab..d4261d4d6c4 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -298,14 +298,23 @@ static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr return memcmp(a1, a2, sizeof(struct in6_addr)); } -static inline int +static inline bool ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, const struct in6_addr *a2) { +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + const unsigned long *ul1 = (const unsigned long *)a1; + const unsigned long *ulm = (const unsigned long *)m; + const unsigned long *ul2 = (const unsigned long *)a2; + + return !!(((ul1[0] ^ ul2[0]) & ulm[0]) | + ((ul1[1] ^ ul2[1]) & ulm[1])); +#else return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) | ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) | ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) | ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3])); +#endif } static inline void ipv6_addr_prefix(struct in6_addr *pfx, @@ -335,10 +344,17 @@ static inline void ipv6_addr_set(struct in6_addr *addr, static inline bool ipv6_addr_equal(const struct in6_addr *a1, const struct in6_addr *a2) { +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + const unsigned long *ul1 = (const unsigned long *)a1; + const unsigned long *ul2 = (const unsigned long *)a2; + + return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL; +#else return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | (a1->s6_addr32[2] ^ a2->s6_addr32[2]) | (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0; +#endif } static inline bool __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2, @@ -391,8 +407,14 @@ bool ip6_frag_match(struct inet_frag_queue *q, void *a); static inline bool ipv6_addr_any(const struct in6_addr *a) { +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + const unsigned long *ul = (const unsigned long *)a; + + return (ul[0] | ul[1]) == 0UL; +#else return (a->s6_addr32[0] | a->s6_addr32[1] | a->s6_addr32[2] | a->s6_addr32[3]) == 0; +#endif } static inline bool ipv6_addr_loopback(const struct in6_addr *a) -- 2.20.1