size_t offset, size_t size, bool is_mask)
{
struct sw_flow_key_range *range = NULL;
- size_t start = offset;
- size_t end = offset + size;
+ size_t start = rounddown(offset, sizeof(long));
+ size_t end = roundup(offset + size, sizeof(long));
if (!is_mask)
range = &match->range;
} \
} while (0)
+static u16 range_n_bytes(const struct sw_flow_key_range *range)
+{
+ return range->end - range->start;
+}
+
void ovs_match_init(struct sw_flow_match *match,
struct sw_flow_key *key,
struct sw_flow_mask *mask)
void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
const struct sw_flow_mask *mask)
{
- u8 *m = (u8 *)&mask->key + mask->range.start;
- u8 *s = (u8 *)src + mask->range.start;
- u8 *d = (u8 *)dst + mask->range.start;
+ const long *m = (long *)((u8 *)&mask->key + mask->range.start);
+ const long *s = (long *)((u8 *)src + mask->range.start);
+ long *d = (long *)((u8 *)dst + mask->range.start);
int i;
- memset(dst, 0, sizeof(*dst));
- for (i = 0; i < ovs_sw_flow_mask_size_roundup(mask); i++) {
- *d = *s & *m;
- d++, s++, m++;
- }
+ /* The memory outside of the 'mask->range' are not set since
+ * further operations on 'dst' only uses contents within
+ * 'mask->range'.
+ */
+ for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
+ *d++ = *s++ & *m++;
}
#define TCP_FLAGS_OFFSET 13
static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start,
int key_end)
{
- return jhash2((u32 *)((u8 *)key + key_start),
- DIV_ROUND_UP(key_end - key_start, sizeof(u32)), 0);
+ u32 *hash_key = (u32 *)((u8 *)key + key_start);
+ int hash_u32s = (key_end - key_start) >> 2;
+
+ /* Make sure number of hash bytes are multiple of u32. */
+ BUILD_BUG_ON(sizeof(long) % sizeof(u32));
+
+ return jhash2(hash_key, hash_u32s, 0);
}
static int flow_key_start(const struct sw_flow_key *key)
if (key->tun_key.ipv4_dst)
return 0;
else
- return offsetof(struct sw_flow_key, phy);
+ return rounddown(offsetof(struct sw_flow_key, phy),
+ sizeof(long));
}
static bool __cmp_key(const struct sw_flow_key *key1,
const struct sw_flow_key *key2, int key_start, int key_end)
{
- return !memcmp((u8 *)key1 + key_start,
- (u8 *)key2 + key_start, (key_end - key_start));
+ const long *cp1 = (long *)((u8 *)key1 + key_start);
+ const long *cp2 = (long *)((u8 *)key2 + key_start);
+ long diffs = 0;
+ int i;
+
+ for (i = key_start; i < key_end; i += sizeof(long))
+ diffs |= *cp1++ ^ *cp2++;
+
+ return diffs == 0;
}
-static bool __flow_cmp_key(const struct sw_flow *flow,
+static bool __flow_cmp_masked_key(const struct sw_flow *flow,
const struct sw_flow_key *key, int key_start, int key_end)
{
return __cmp_key(&flow->key, key, key_start, key_end);
}
static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table,
- const struct sw_flow_key *flow_key,
+ const struct sw_flow_key *unmasked,
struct sw_flow_mask *mask)
{
struct sw_flow *flow;
u32 hash;
struct sw_flow_key masked_key;
- ovs_flow_key_mask(&masked_key, flow_key, mask);
+ ovs_flow_key_mask(&masked_key, unmasked, mask);
hash = ovs_flow_hash(&masked_key, key_start, key_end);
head = find_bucket(table, hash);
hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
if (flow->mask == mask &&
- __flow_cmp_key(flow, &masked_key, key_start, key_end))
+ __flow_cmp_masked_key(flow, &masked_key,
+ key_start, key_end))
return flow;
}
return NULL;
* Returns zero if successful or a negative error code. */
int ovs_flow_init(void)
{
+ BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
+
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
0, NULL);
if (flow_cache == NULL)
return (a->range.end == b->range.end)
&& (a->range.start == b->range.start)
- && (memcmp(a_, b_, ovs_sw_flow_mask_actual_size(a)) == 0);
+ && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
}
struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
u8 *m = (u8 *)&mask->key + range->start;
mask->range = *range;
- memset(m, val, ovs_sw_flow_mask_size_roundup(mask));
+ memset(m, val, range_n_bytes(range));
}