break;
case BPF_S_ANC_RXHASH:
ctx->seen |= SEEN_SKB;
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
- off = offsetof(struct sk_buff, rxhash);
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+ off = offsetof(struct sk_buff, hash);
emit(ARM_LDR_I(r_A, r_skb, off), ctx);
break;
case BPF_S_ANC_VLAN_TAG:
mark));
break;
case BPF_S_ANC_RXHASH:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
- rxhash));
+ hash));
break;
case BPF_S_ANC_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT:
/* icm %r5,3,<d(type)>(%r1) */
EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
break;
- case BPF_S_ANC_RXHASH: /* A = skb->rxhash */
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
- /* l %r5,<d(rxhash)>(%r2) */
- EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash));
+ case BPF_S_ANC_RXHASH: /* A = skb->hash */
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+ /* l %r5,<d(hash)>(%r2) */
+ EMIT4_DISP(0x58502000, offsetof(struct sk_buff, hash));
break;
case BPF_S_ANC_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT:
emit_load16(r_A, struct net_device, type, r_A);
break;
case BPF_S_ANC_RXHASH:
- emit_skb_load32(rxhash, r_A);
+ emit_skb_load32(hash, r_A);
break;
case BPF_S_ANC_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT:
}
break;
case BPF_S_ANC_RXHASH:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
- if (is_imm8(offsetof(struct sk_buff, rxhash))) {
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+ if (is_imm8(offsetof(struct sk_buff, hash))) {
/* mov off8(%rdi),%eax */
- EMIT3(0x8b, 0x47, offsetof(struct sk_buff, rxhash));
+ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash));
} else {
EMIT2(0x8b, 0x87);
- EMIT(offsetof(struct sk_buff, rxhash), 4);
+ EMIT(offsetof(struct sk_buff, hash), 4);
}
break;
case BPF_S_ANC_QUEUE:
* @skb_iif: ifindex of device we arrived on
* @tc_index: Traffic control index
* @tc_verd: traffic control verdict
- * @rxhash: the packet hash computed on receive
+ * @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
- * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
+ * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
* ports.
* @wifi_acked_valid: wifi_acked was set
* @wifi_acked: whether frame was acked on wifi or not
int skb_iif;
- __u32 rxhash;
+ __u32 hash;
__be16 vlan_proto;
__u16 vlan_tci;
#endif
__u8 pfmemalloc:1;
__u8 ooo_okay:1;
- __u8 l4_rxhash:1;
+ __u8 l4_hash:1;
__u8 wifi_acked_valid:1;
__u8 wifi_acked:1;
__u8 no_fcs:1;
static inline void
skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
{
- skb->l4_rxhash = (type == PKT_HASH_TYPE_L4);
- skb->rxhash = hash;
+ skb->l4_hash = (type == PKT_HASH_TYPE_L4);
+ skb->hash = hash;
}
void __skb_get_hash(struct sk_buff *skb);
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
- if (!skb->l4_rxhash)
+ if (!skb->l4_hash)
__skb_get_hash(skb);
- return skb->rxhash;
+ return skb->hash;
}
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
- return skb->rxhash;
+ return skb->hash;
}
static inline void skb_clear_hash(struct sk_buff *skb)
{
- skb->rxhash = 0;
- skb->l4_rxhash = 0;
+ skb->hash = 0;
+ skb->l4_hash = 0;
}
static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
{
- if (!skb->l4_rxhash)
+ if (!skb->l4_hash)
skb_clear_hash(skb);
}
static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
{
- to->rxhash = from->rxhash;
- to->l4_rxhash = from->l4_rxhash;
+ to->hash = from->hash;
+ to->l4_hash = from->l4_hash;
};
#ifdef NET_SKBUFF_DATA_USES_OFFSET
const struct sk_buff *skb)
{
#ifdef CONFIG_RPS
- if (unlikely(sk->sk_rxhash != skb->rxhash)) {
+ if (unlikely(sk->sk_rxhash != skb->hash)) {
sock_rps_reset_flow(sk);
- sk->sk_rxhash = skb->rxhash;
+ sk->sk_rxhash = skb->hash;
}
#endif
}
__field( u16, vlan_tci )
__field( u16, protocol )
__field( u8, ip_summed )
- __field( u32, rxhash )
- __field( bool, l4_rxhash )
+ __field( u32, hash )
+ __field( bool, l4_hash )
__field( unsigned int, len )
__field( unsigned int, data_len )
__field( unsigned int, truesize )
__entry->vlan_tci = vlan_tx_tag_get(skb);
__entry->protocol = ntohs(skb->protocol);
__entry->ip_summed = skb->ip_summed;
- __entry->rxhash = skb->rxhash;
- __entry->l4_rxhash = skb->l4_rxhash;
+ __entry->hash = skb->hash;
+ __entry->l4_hash = skb->l4_hash;
__entry->len = skb->len;
__entry->data_len = skb->data_len;
__entry->truesize = skb->truesize;
__entry->gso_type = skb_shinfo(skb)->gso_type;
),
- TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d rxhash=0x%08x l4_rxhash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x",
+ TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x",
__get_str(name), __entry->napi_id, __entry->queue_mapping,
__entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto,
__entry->vlan_tci, __entry->protocol, __entry->ip_summed,
- __entry->rxhash, __entry->l4_rxhash, __entry->len,
+ __entry->hash, __entry->l4_hash, __entry->len,
__entry->data_len, __entry->truesize,
__entry->mac_header_valid, __entry->mac_header,
__entry->nr_frags, __entry->gso_size, __entry->gso_type)
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (!flow_table)
goto out;
- flow_id = skb->rxhash & flow_table->mask;
+ flow_id = skb_get_hash(skb) & flow_table->mask;
rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
rxq_index, flow_id);
if (rc < 0)
struct rps_sock_flow_table *sock_flow_table;
int cpu = -1;
u16 tcpu;
+ u32 hash;
if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
}
skb_reset_network_header(skb);
- if (!skb_get_hash(skb))
+ hash = skb_get_hash(skb);
+ if (!hash)
goto done;
flow_table = rcu_dereference(rxqueue->rps_flow_table);
u16 next_cpu;
struct rps_dev_flow *rflow;
- rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
+ rflow = &flow_table->flows[hash & flow_table->mask];
tcpu = rflow->cpu;
- next_cpu = sock_flow_table->ents[skb->rxhash &
- sock_flow_table->mask];
+ next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask];
/*
* If the desired CPU (where last recvmsg was done) is
}
if (map) {
- tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
+ tcpu = map->cpus[((u64) hash * map->len) >> 32];
if (cpu_online(tcpu)) {
cpu = tcpu;
A = skb->dev->type;
continue;
case BPF_S_ANC_RXHASH:
- A = skb->rxhash;
+ A = skb->hash;
continue;
case BPF_S_ANC_CPU:
A = raw_smp_processor_id();
/*
* __skb_get_hash: calculate a flow hash based on src/dst addresses
- * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
- * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
+ * and src/dst port numbers. Sets hash in skb to non-zero hash value
+ * on success, zero indicates no valid hash. Also, sets l4_hash in skb
* if hash is a canonical 4-tuple hash over transport ports.
*/
void __skb_get_hash(struct sk_buff *skb)
return;
if (keys.ports)
- skb->l4_rxhash = 1;
+ skb->l4_hash = 1;
/* get a consistent hash (same value on both flow directions) */
if (((__force u32)keys.dst < (__force u32)keys.src) ||
if (!hash)
hash = 1;
- skb->rxhash = hash;
+ skb->hash = hash;
}
EXPORT_SYMBOL(__skb_get_hash);
hash = skb->sk->sk_hash;
else
hash = (__force u16) skb->protocol ^
- skb->rxhash;
+ skb->hash;
hash = __flow_hash_1word(hash);
queue_index = map->queues[
((u64)hash * map->len) >> 32];
struct sk_buff *skb,
unsigned int num)
{
- return reciprocal_scale(skb->rxhash, num);
+ return reciprocal_scale(skb_get_hash(skb), num);
}
static unsigned int fanout_demux_lb(struct packet_fanout *f,
if (!skb)
return 0;
}
- skb_get_hash(skb);
idx = fanout_demux_hash(f, skb, num);
break;
case PACKET_FANOUT_LB: