*/
struct nft_regs {
union {
- struct nft_data data[NFT_REG_MAX + 1];
+ u32 data[20];
struct nft_verdict verdict;
};
};
-static inline void nft_data_copy(struct nft_data *dst,
- const struct nft_data *src)
+static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
+ unsigned int len)
{
- BUILD_BUG_ON(__alignof__(*dst) != __alignof__(u64));
- *(u64 *)&dst->data[0] = *(u64 *)&src->data[0];
- *(u64 *)&dst->data[2] = *(u64 *)&src->data[2];
+ memcpy(dst, src, len);
}
static inline void nft_data_debug(const struct nft_data *data)
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl,
- const struct nft_data *key,
- const struct nft_data *data,
+ const u32 *key, const u32 *data,
u64 timeout, gfp_t gfp);
void nft_set_elem_destroy(const struct nft_set *set, void *elem);
#define NFT_CHAIN_MAXNAMELEN 32
#define NFT_USERDATA_MAXLEN 256
+/**
+ * enum nft_registers - nf_tables registers
+ *
+ * nf_tables used to have five registers: a verdict register and four data
+ * registers of size 16. The data registers have been changed to 16 registers
+ * of size 4. For compatibility reasons, the NFT_REG_[1-4] registers still
+ * map to areas of size 16, the 4 byte registers are addressed using
+ * NFT_REG32_00 - NFT_REG32_15.
+ */
enum nft_registers {
NFT_REG_VERDICT,
NFT_REG_1,
NFT_REG_2,
NFT_REG_3,
NFT_REG_4,
- __NFT_REG_MAX
+ __NFT_REG_MAX,
+
+ NFT_REG32_00 = 8,
+ MFT_REG32_01,
+ NFT_REG32_02,
+ NFT_REG32_03,
+ NFT_REG32_04,
+ NFT_REG32_05,
+ NFT_REG32_06,
+ NFT_REG32_07,
+ NFT_REG32_08,
+ NFT_REG32_09,
+ NFT_REG32_10,
+ NFT_REG32_11,
+ NFT_REG32_12,
+ NFT_REG32_13,
+ NFT_REG32_14,
+ NFT_REG32_15,
};
#define NFT_REG_MAX (__NFT_REG_MAX - 1)
+#define NFT_REG_SIZE 16
+#define NFT_REG32_SIZE 4
+
/**
* enum nft_verdicts - nf_tables internal verdicts
*
{
const struct nft_meta *priv = nft_expr_priv(expr);
const struct net_device *in = pkt->in, *out = pkt->out;
- u32 *dest = ®s->data[priv->dreg].data[0];
+ u32 *dest = ®s->data[priv->dreg];
const struct net_bridge_port *p;
switch (priv->key) {
memset(&mr, 0, sizeof(mr));
if (priv->sreg_proto_min) {
mr.range[0].min.all =
- *(__be16 *)®s->data[priv->sreg_proto_min].data[0];
+ *(__be16 *)®s->data[priv->sreg_proto_min];
mr.range[0].max.all =
- *(__be16 *)®s->data[priv->sreg_proto_max].data[0];
+ *(__be16 *)®s->data[priv->sreg_proto_max];
mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
memset(&range, 0, sizeof(range));
if (priv->sreg_proto_min) {
range.min_proto.all =
- *(__be16 *)®s->data[priv->sreg_proto_min].data[0];
+ *(__be16 *)®s->data[priv->sreg_proto_min],
range.max_proto.all =
- *(__be16 *)®s->data[priv->sreg_proto_max].data[0];
+ *(__be16 *)®s->data[priv->sreg_proto_max],
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl,
- const struct nft_data *key,
- const struct nft_data *data,
+ const u32 *key, const u32 *data,
u64 timeout, gfp_t gfp)
{
struct nft_set_ext *ext;
}
err = -ENOMEM;
- elem.priv = nft_set_elem_init(set, &tmpl, &elem.key, &data,
+ elem.priv = nft_set_elem_init(set, &tmpl, elem.key.data, data.data,
timeout, GFP_KERNEL);
if (elem.priv == NULL)
goto err3;
return 0;
}
+/**
+ * nft_parse_register - parse a register value from a netlink attribute
+ *
+ * @attr: netlink attribute
+ *
+ * Parse and translate a register value from a netlink attribute.
+ * Registers used to be 128 bit wide, these register numbers will be
+ * mapped to the corresponding 32 bit register numbers.
+ */
unsigned int nft_parse_register(const struct nlattr *attr)
{
- return ntohl(nla_get_be32(attr));
+ unsigned int reg;
+
+ reg = ntohl(nla_get_be32(attr));
+ switch (reg) {
+ case NFT_REG_VERDICT...NFT_REG_4:
+ return reg * NFT_REG_SIZE / NFT_REG32_SIZE;
+ default:
+ return reg + NFT_REG_SIZE / NFT_REG32_SIZE - NFT_REG32_00;
+ }
}
EXPORT_SYMBOL_GPL(nft_parse_register);
+/**
+ * nft_dump_register - dump a register value to a netlink attribute
+ *
+ * @skb: socket buffer
+ * @attr: attribute number
+ * @reg: register number
+ *
+ * Construct a netlink attribute containing the register number. For
+ * compatibility reasons, register numbers being a multiple of 4 are
+ * translated to the corresponding 128 bit register numbers.
+ */
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg)
{
+ if (reg % (NFT_REG_SIZE / NFT_REG32_SIZE) == 0)
+ reg = reg / (NFT_REG_SIZE / NFT_REG32_SIZE);
+ else
+ reg = reg - NFT_REG_SIZE / NFT_REG32_SIZE + NFT_REG32_00;
+
return nla_put_be32(skb, attr, htonl(reg));
}
EXPORT_SYMBOL_GPL(nft_dump_register);
*/
int nft_validate_register_load(enum nft_registers reg, unsigned int len)
{
- if (reg <= NFT_REG_VERDICT)
+ if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
return -EINVAL;
- if (reg > NFT_REG_MAX)
- return -ERANGE;
if (len == 0)
return -EINVAL;
- if (len > FIELD_SIZEOF(struct nft_data, data))
+ if (reg * NFT_REG32_SIZE + len > FIELD_SIZEOF(struct nft_regs, data))
return -ERANGE;
+
return 0;
}
EXPORT_SYMBOL_GPL(nft_validate_register_load);
return 0;
default:
- if (reg < NFT_REG_1)
+ if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
return -EINVAL;
- if (reg > NFT_REG_MAX)
- return -ERANGE;
if (len == 0)
return -EINVAL;
- if (len > FIELD_SIZEOF(struct nft_data, data))
+ if (reg * NFT_REG32_SIZE + len >
+ FIELD_SIZEOF(struct nft_regs, data))
return -ERANGE;
if (data != NULL && type != NFT_DATA_VALUE)
const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
u32 mask = nft_cmp_fast_mask(priv->len);
- if ((regs->data[priv->sreg].data[0] & mask) == priv->data)
+ if ((regs->data[priv->sreg] & mask) == priv->data)
return;
regs->verdict.code = NFT_BREAK;
}
{
const struct nft_payload *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
- u32 *dest = ®s->data[priv->dreg].data[0];
+ u32 *dest = ®s->data[priv->dreg];
unsigned char *ptr;
if (priv->base == NFT_PAYLOAD_NETWORK_HEADER)
if (unlikely(ptr + priv->len >= skb_tail_pointer(skb)))
return false;
+ *dest = 0;
if (priv->len == 2)
*(u16 *)dest = *(u16 *)ptr;
else if (priv->len == 4)
const struct nft_pktinfo *pkt)
{
const struct nft_bitwise *priv = nft_expr_priv(expr);
- const u32 *src = ®s->data[priv->sreg].data[0];
- u32 *dst = ®s->data[priv->dreg].data[0];
+ const u32 *src = ®s->data[priv->sreg];
+ u32 *dst = ®s->data[priv->dreg];
unsigned int i;
for (i = 0; i < DIV_ROUND_UP(priv->len, 4); i++)
const struct nft_pktinfo *pkt)
{
const struct nft_byteorder *priv = nft_expr_priv(expr);
- u32 *src = ®s->data[priv->sreg].data[0];
- u32 *dst = ®s->data[priv->dreg].data[0];
+ u32 *src = ®s->data[priv->sreg];
+ u32 *dst = ®s->data[priv->dreg];
union { u32 u32; u16 u16; } *s, *d;
unsigned int i;
const struct nft_pktinfo *pkt)
{
const struct nft_ct *priv = nft_expr_priv(expr);
- u32 *dest = ®s->data[priv->dreg].data[0];
+ u32 *dest = ®s->data[priv->dreg];
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
const struct nf_conn_help *help;
const struct nft_ct *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
#ifdef CONFIG_NF_CONNTRACK_MARK
- u32 value = regs->data[priv->sreg].data[0];
+ u32 value = regs->data[priv->sreg];
#endif
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
const struct nft_pktinfo *pkt)
{
struct nft_exthdr *priv = nft_expr_priv(expr);
- u32 *dest = ®s->data[priv->dreg].data[0];
+ u32 *dest = ®s->data[priv->dreg];
unsigned int offset = 0;
int err;
goto err;
offset += priv->offset;
+ dest[priv->len / NFT_REG32_SIZE] = 0;
if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
goto err;
return;
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
- nft_data_copy(®s->data[priv->dreg], &priv->data);
+ nft_data_copy(®s->data[priv->dreg], &priv->data, priv->dlen);
}
static const struct nla_policy nft_immediate_policy[NFTA_IMMEDIATE_MAX + 1] = {
if (set->ops->lookup(set, ®s->data[priv->sreg], &ext)) {
if (set->flags & NFT_SET_MAP)
nft_data_copy(®s->data[priv->dreg],
- nft_set_ext_data(ext));
+ nft_set_ext_data(ext), set->dlen);
return;
}
regs->verdict.code = NFT_BREAK;
const struct nft_meta *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
const struct net_device *in = pkt->in, *out = pkt->out;
- u32 *dest = ®s->data[priv->dreg].data[0];
+ u32 *dest = ®s->data[priv->dreg];
switch (priv->key) {
case NFT_META_LEN:
*dest = skb->len;
break;
case NFT_META_PROTOCOL:
+ *dest = 0;
*(__be16 *)dest = skb->protocol;
break;
case NFT_META_NFPROTO:
case NFT_META_IIFTYPE:
if (in == NULL)
goto err;
+ *dest = 0;
*(u16 *)dest = in->type;
break;
case NFT_META_OIFTYPE:
if (out == NULL)
goto err;
+ *dest = 0;
*(u16 *)dest = out->type;
break;
case NFT_META_SKUID:
{
const struct nft_meta *meta = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
- u32 value = regs->data[meta->sreg].data[0];
+ u32 value = regs->data[meta->sreg];
switch (meta->key) {
case NFT_META_MARK:
if (priv->sreg_addr_min) {
if (priv->family == AF_INET) {
range.min_addr.ip = (__force __be32)
- regs->data[priv->sreg_addr_min].data[0];
+ regs->data[priv->sreg_addr_min];
range.max_addr.ip = (__force __be32)
- regs->data[priv->sreg_addr_max].data[0];
+ regs->data[priv->sreg_addr_max];
} else {
memcpy(range.min_addr.ip6,
- ®s->data[priv->sreg_addr_min].data,
- sizeof(struct nft_data));
+ ®s->data[priv->sreg_addr_min],
+ sizeof(range.min_addr.ip6));
memcpy(range.max_addr.ip6,
- ®s->data[priv->sreg_addr_max].data,
- sizeof(struct nft_data));
+ ®s->data[priv->sreg_addr_max],
+ sizeof(range.max_addr.ip6));
}
range.flags |= NF_NAT_RANGE_MAP_IPS;
}
if (priv->sreg_proto_min) {
range.min_proto.all =
- *(__be16 *)®s->data[priv->sreg_proto_min].data[0];
+ *(__be16 *)®s->data[priv->sreg_proto_min];
range.max_proto.all =
- *(__be16 *)®s->data[priv->sreg_proto_max].data[0];
+ *(__be16 *)®s->data[priv->sreg_proto_max];
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
{
const struct nft_payload *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
- u32 *dest = ®s->data[priv->dreg].data[0];
+ u32 *dest = ®s->data[priv->dreg];
int offset;
switch (priv->base) {
}
offset += priv->offset;
+ dest[priv->len / NFT_REG32_SIZE] = 0;
if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
goto err;
return;