struct fib_rule __rcu *ctarget;
char iifname[IFNAMSIZ];
char oifname[IFNAMSIZ];
- kuid_t uid_start;
- kuid_t uid_end;
struct rcu_head rcu;
struct net * fr_net;
};
[FRA_FWMARK] = { .type = NLA_U32 }, \
[FRA_FWMASK] = { .type = NLA_U32 }, \
[FRA_TABLE] = { .type = NLA_U32 }, \
- [FRA_GOTO] = { .type = NLA_U32 }, \
- [FRA_UID_START] = { .type = NLA_U32 }, \
- [FRA_UID_END] = { .type = NLA_U32 }
+ [FRA_GOTO] = { .type = NLA_U32 },
static inline void fib_rule_get(struct fib_rule *rule)
{
#include <linux/socket.h>
#include <linux/in6.h>
#include <linux/atomic.h>
-#include <linux/uidgid.h>
/*
* ifindex generation is per-net namespace, and loopback is
#define FLOWI_FLAG_CAN_SLEEP 0x02
#define FLOWI_FLAG_KNOWN_NH 0x04
__u32 flowic_secid;
- kuid_t flowic_uid;
};
union flowi_uli {
#define flowi4_proto __fl_common.flowic_proto
#define flowi4_flags __fl_common.flowic_flags
#define flowi4_secid __fl_common.flowic_secid
-#define flowi4_uid __fl_common.flowic_uid
/* (saddr,daddr) must be grouped, same order as in IP header */
__be32 saddr;
__u32 mark, __u8 tos, __u8 scope,
__u8 proto, __u8 flags,
__be32 daddr, __be32 saddr,
- __be16 dport, __be16 sport,
- kuid_t uid)
+ __be16 dport, __be16 sport)
{
fl4->flowi4_oif = oif;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
fl4->flowi4_proto = proto;
fl4->flowi4_flags = flags;
fl4->flowi4_secid = 0;
- fl4->flowi4_uid = uid;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->fl4_dport = dport;
#define flowi6_proto __fl_common.flowic_proto
#define flowi6_flags __fl_common.flowic_flags
#define flowi6_secid __fl_common.flowic_secid
-#define flowi6_uid __fl_common.flowic_uid
struct in6_addr daddr;
struct in6_addr saddr;
__be32 flowlabel;
#define flowi_proto u.__fl_common.flowic_proto
#define flowi_flags u.__fl_common.flowic_flags
#define flowi_secid u.__fl_common.flowic_secid
-#define flowi_uid u.__fl_common.flowic_uid
} __attribute__((__aligned__(BITS_PER_LONG/8)));
static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
/* -1 if not needed */
int bound_dev_if;
u8 tos;
- kuid_t uid;
};
#define IP_REPLY_ARG_NOSRCCHECK 1
const struct in6_addr *gwaddr);
extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
- int oif, u32 mark, kuid_t uid);
+ int oif, u32 mark);
extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
__be32 mtu);
extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
RT_SCOPE_UNIVERSE, proto,
sk ? inet_sk_flowi_flags(sk) : 0,
- daddr, saddr, dport, sport, sock_i_uid(sk));
+ daddr, saddr, dport, sport);
if (sk)
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
return ip_route_output_flow(net, fl4, sk);
flow_flags |= FLOWI_FLAG_CAN_SLEEP;
flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
- protocol, flow_flags, dst, src, dport, sport,
- sock_i_uid(sk));
+ protocol, flow_flags, dst, src, dport, sport);
}
static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
FRA_TABLE, /* Extended table id */
FRA_FWMASK, /* mask for netfilter mark */
FRA_OIFNAME,
- FRA_UID_START, /* UID range */
- FRA_UID_END,
__FRA_MAX
};
RTA_TABLE,
RTA_MARK,
RTA_MFC_STATS,
- RTA_UID,
__RTA_MAX
};
r->pref = pref;
r->table = table;
r->flags = flags;
- r->uid_start = INVALID_UID;
- r->uid_end = INVALID_UID;
r->fr_net = hold_net(ops->fro_net);
/* The lock is not required here, the list in unreacheable
}
EXPORT_SYMBOL_GPL(fib_rules_unregister);
-static inline kuid_t fib_nl_uid(struct nlattr *nla)
-{
- return make_kuid(current_user_ns(), nla_get_u32(nla));
-}
-
-static int nla_put_uid(struct sk_buff *skb, int idx, kuid_t uid)
-{
- return nla_put_u32(skb, idx, from_kuid_munged(current_user_ns(), uid));
-}
-
-static int fib_uid_range_match(struct flowi *fl, struct fib_rule *rule)
-{
- return (!uid_valid(rule->uid_start) && !uid_valid(rule->uid_end)) ||
- (uid_gte(fl->flowi_uid, rule->uid_start) &&
- uid_lte(fl->flowi_uid, rule->uid_end));
-}
-
static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
struct flowi *fl, int flags)
{
if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
goto out;
- if (!fib_uid_range_match(fl, rule))
- goto out;
-
ret = ops->match(rule, fl, flags);
out:
return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
} else if (rule->action == FR_ACT_GOTO)
goto errout_free;
- /* UID start and end must either both be valid or both unspecified. */
- rule->uid_start = rule->uid_end = INVALID_UID;
- if (tb[FRA_UID_START] || tb[FRA_UID_END]) {
- if (tb[FRA_UID_START] && tb[FRA_UID_END]) {
- rule->uid_start = fib_nl_uid(tb[FRA_UID_START]);
- rule->uid_end = fib_nl_uid(tb[FRA_UID_END]);
- }
- if (!uid_valid(rule->uid_start) ||
- !uid_valid(rule->uid_end) ||
- !uid_lte(rule->uid_start, rule->uid_end))
- goto errout_free;
- }
-
err = ops->configure(rule, skb, frh, tb);
if (err < 0)
goto errout_free;
(rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
continue;
- if (tb[FRA_UID_START] &&
- !uid_eq(rule->uid_start, fib_nl_uid(tb[FRA_UID_START])))
- continue;
-
- if (tb[FRA_UID_END] &&
- !uid_eq(rule->uid_end, fib_nl_uid(tb[FRA_UID_END])))
- continue;
-
if (!ops->compare(rule, frh, tb))
continue;
+ nla_total_size(4) /* FRA_PRIORITY */
+ nla_total_size(4) /* FRA_TABLE */
+ nla_total_size(4) /* FRA_FWMARK */
- + nla_total_size(4) /* FRA_FWMASK */
- + nla_total_size(4) /* FRA_UID_START */
- + nla_total_size(4); /* FRA_UID_END */
+ + nla_total_size(4); /* FRA_FWMASK */
if (ops->nlmsg_payload)
payload += ops->nlmsg_payload(rule);
((rule->mark_mask || rule->mark) &&
nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
(rule->target &&
- nla_put_u32(skb, FRA_GOTO, rule->target)) ||
- (uid_valid(rule->uid_start) &&
- nla_put_uid(skb, FRA_UID_START, rule->uid_start)) ||
- (uid_valid(rule->uid_end) &&
- nla_put_uid(skb, FRA_UID_END, rule->uid_end)))
+ nla_put_u32(skb, FRA_GOTO, rule->target)))
goto nla_put_failure;
if (ops->fill(rule, skb, frh) < 0)
goto nla_put_failure;
[RTA_METRICS] = { .type = NLA_NESTED },
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
[RTA_FLOW] = { .type = NLA_U32 },
- [RTA_UID] = { .type = NLA_U32 },
};
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
sk->sk_protocol,
flags,
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
- ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport,
- sock_i_uid(sk));
+ ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
security_req_classify_flow(req, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol, inet_sk_flowi_flags(sk),
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
- ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport,
- sock_i_uid(sk));
+ ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
security_req_classify_flow(req, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
ip_reply_arg_flowi_flags(arg),
daddr, saddr,
- tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
- arg->uid);
+ tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
- inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
- sock_i_uid(sk));
+ inet_sk_flowi_flags(sk), faddr, saddr, 0, 0);
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
(inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
- daddr, saddr, 0, 0,
- sock_i_uid(sk));
+ daddr, saddr, 0, 0);
if (!inet->hdrincl) {
err = raw_probe_proto_opt(&fl4, msg);
flowi4_init_output(fl4, oif, mark, tos,
RT_SCOPE_UNIVERSE, prot,
flow_flags,
- iph->daddr, iph->saddr, 0, 0,
- sock_i_uid(sk));
+ iph->daddr, iph->saddr, 0, 0);
}
static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
- struct sock *sk)
+ const struct sock *sk)
{
const struct iphdr *iph = ip_hdr(skb);
int oif = skb->dev->ifindex;
__build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
}
-static void build_sk_flow_key(struct flowi4 *fl4, struct sock *sk)
+static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ip_options_rcu *inet_opt;
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk),
- daddr, inet->inet_saddr, 0, 0,
- sock_i_uid(sk));
+ daddr, inet->inet_saddr, 0, 0);
rcu_read_unlock();
}
-static void ip_rt_build_flow_key(struct flowi4 *fl4, struct sock *sk,
+static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
const struct sk_buff *skb)
{
if (skb)
nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
goto nla_put_failure;
- if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
- nla_put_u32(skb, RTA_UID,
- from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
- goto nla_put_failure;
-
error = rt->dst.error;
if (rt_is_input_route(rt)) {
int err;
int mark;
struct sk_buff *skb;
- kuid_t uid;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
if (err < 0)
dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
- if (tb[RTA_UID])
- uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
- else
- uid = (iif ? INVALID_UID : current_uid());
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = dst;
fl4.flowi4_tos = rtm->rtm_tos;
fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
fl4.flowi4_mark = mark;
- fl4.flowi4_uid = uid;
if (iif) {
struct net_device *dev;
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
inet_sk_flowi_flags(sk),
(opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
- ireq->loc_addr, th->source, th->dest,
- sock_i_uid(sk));
+ ireq->loc_addr, th->source, th->dest);
security_req_classify_flow(req, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(sock_net(sk), &fl4);
if (IS_ERR(rt)) {
flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
- faddr, saddr, dport, inet->inet_sport,
- sock_i_uid(sk));
+ faddr, saddr, dport, inet->inet_sport);
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
- fl6.flowi6_uid = sock_i_uid(sk);
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
final_p = fl6_update_dst(&fl6, np->opt, &final);
if (type == NDISC_REDIRECT)
ip6_redirect(skb, net, 0, 0);
else
- ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
+ ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
}
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
- fl6.flowi6_uid = sock_i_uid(sk);
if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
fl6.flowi6_oif = np->mcast_oif;
if (type == NDISC_REDIRECT)
ip6_redirect(skb, net, 0, 0);
else
- ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
+ ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
}
struct net *net = dev_net(skb->dev);
if (type == ICMPV6_PKT_TOOBIG)
- ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
+ ip6_update_pmtu(skb, net, info, 0, 0);
else if (type == NDISC_REDIRECT)
ip6_redirect(skb, net, 0, 0);
fl6->flowi6_mark = inet_rsk(req)->ir_mark;
fl6->fl6_dport = inet_rsk(req)->rmt_port;
fl6->fl6_sport = inet_rsk(req)->loc_port;
- fl6->flowi6_uid = sock_i_uid(sk);
security_req_classify_flow(req, flowi6_to_flowi(fl6));
dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
fl6->flowi6_mark = sk->sk_mark;
fl6->fl6_sport = inet->inet_sport;
fl6->fl6_dport = inet->inet_dport;
- fl6->flowi6_uid = sock_i_uid(sk);
security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
final_p = fl6_update_dst(fl6, np->opt, &final);
if (type == NDISC_REDIRECT)
ip6_redirect(skb, net, 0, 0);
else
- ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
+ ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
}
fl6.saddr = np->saddr;
fl6.daddr = *daddr;
fl6.flowi6_mark = sk->sk_mark;
- fl6.flowi6_uid = sock_i_uid(sk);
fl6.fl6_icmp_type = user_icmph.icmp6_type;
fl6.fl6_icmp_code = user_icmph.icmp6_code;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = sk->sk_mark;
- fl6.flowi6_uid = sock_i_uid(sk);
if (sin6) {
if (addr_len < SIN6_LEN_RFC2133)
}
void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
- int oif, u32 mark, kuid_t uid)
+ int oif, u32 mark)
{
const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
struct dst_entry *dst;
fl6.daddr = iph->daddr;
fl6.saddr = iph->saddr;
fl6.flowlabel = ip6_flowinfo(iph);
- fl6.flowi6_uid = uid;
dst = ip6_route_output(net, NULL, &fl6);
if (!dst->error)
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
{
ip6_update_pmtu(skb, sock_net(sk), mtu,
- sk->sk_bound_dev_if, sk->sk_mark, sock_i_uid(sk));
+ sk->sk_bound_dev_if, sk->sk_mark);
}
EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
[RTA_PRIORITY] = { .type = NLA_U32 },
[RTA_METRICS] = { .type = NLA_NESTED },
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
- [RTA_UID] = { .type = NLA_U32 },
};
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[RTA_OIF])
oif = nla_get_u32(tb[RTA_OIF]);
- if (tb[RTA_UID])
- fl6.flowi6_uid = make_kuid(current_user_ns(),
- nla_get_u32(tb[RTA_UID]));
- else
- fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
-
if (iif) {
struct net_device *dev;
int flags = 0;
fl6.flowi6_mark = ireq->ir_mark;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
fl6.fl6_sport = inet_sk(sk)->inet_sport;
- fl6.flowi6_uid = sock_i_uid(sk);
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
- fl6.flowi6_uid = sock_i_uid(sk);
final_p = fl6_update_dst(&fl6, np->opt, &final);
fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
fl6.flowi6_mark = sk->sk_mark;
- fl6.flowi6_uid = sock_i_uid(sk);
if (msg->msg_controllen) {
opt = &opt_space;