Help produce better optimized code.
Signed-off-by: Thomas Graf <tgraf@noironetworks.com>
Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
fifo->tail = 0;
}
-static bool action_fifo_is_empty(struct action_fifo *fifo)
+static bool action_fifo_is_empty(const struct action_fifo *fifo)
{
return (fifo->head == fifo->tail);
}
/* Return true if fifo is not full */
static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
- struct sw_flow_key *key,
+ const struct sw_flow_key *key,
const struct nlattr *attr)
{
struct action_fifo *fifo;
/* Execute a list of actions against 'skb'. */
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
- struct sw_flow_actions *acts, struct sw_flow_key *key)
+ const struct sw_flow_actions *acts,
+ struct sw_flow_key *key)
{
int level = this_cpu_read(exec_actions_level);
int err;
return vport->ops->get_name(vport);
}
-static int get_dpifindex(struct datapath *dp)
+static int get_dpifindex(const struct datapath *dp)
{
struct vport *local;
int ifindex;
.n_ops = ARRAY_SIZE(dp_packet_genl_ops),
};
-static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
+static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
struct ovs_dp_megaflow_stats *mega_stats)
{
int i;
/* Called with rcu_read_lock or ovs_mutex. */
static struct datapath *lookup_datapath(struct net *net,
- struct ovs_header *ovs_header,
+ const struct ovs_header *ovs_header,
struct nlattr *a[OVS_DP_ATTR_MAX + 1])
{
struct datapath *dp;
dp->user_features = 0;
}
-static void ovs_dp_change(struct datapath *dp, struct nlattr **a)
+static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
{
if (a[OVS_DP_ATTR_USER_FEATURES])
dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
/* Called with ovs_mutex or RCU read lock. */
static struct vport *lookup_vport(struct net *net,
- struct ovs_header *ovs_header,
+ const struct ovs_header *ovs_header,
struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
{
struct datapath *dp;
#define rcu_dereference_ovsl(p) \
rcu_dereference_check(p, lockdep_ovsl_is_held())
-static inline struct net *ovs_dp_get_net(struct datapath *dp)
+static inline struct net *ovs_dp_get_net(const struct datapath *dp)
{
return read_pnet(&dp->net);
}
u8 cmd);
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
- struct sw_flow_actions *acts, struct sw_flow_key *);
+ const struct sw_flow_actions *, struct sw_flow_key *);
void ovs_dp_notify_wq(struct work_struct *work);
#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
struct flow_stats *stats;
int node = numa_node_id();
return key_extract(skb, key);
}
-int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info,
+int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
struct sk_buff *skb, struct sw_flow_key *key)
{
/* Extract metadata from packet. */
struct ovs_tunnel_info {
struct ovs_key_ipv4_tunnel tunnel;
- struct geneve_opt *options;
+ const struct geneve_opt *options;
u8 options_len;
};
__be16 tp_dst,
__be64 tun_id,
__be16 tun_flags,
- struct geneve_opt *opts,
+ const struct geneve_opt *opts,
u8 opts_len)
{
tun_info->tunnel.tun_id = tun_id;
__be16 tp_dst,
__be64 tun_id,
__be16 tun_flags,
- struct geneve_opt *opts,
+ const struct geneve_opt *opts,
u8 opts_len)
{
__ovs_flow_tun_info_init(tun_info, iph->saddr, iph->daddr,
} __packed;
void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
- struct sk_buff *);
+ const struct sk_buff *);
void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
unsigned long *used, __be16 *tcp_flags);
void ovs_flow_stats_clear(struct sw_flow *);
u64 ovs_flow_used_time(unsigned long flow_jiffies);
int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
-int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info, struct sk_buff *skb,
+int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
+ struct sk_buff *skb,
struct sw_flow_key *key);
/* Extract key from packet coming from userspace. */
int ovs_flow_key_extract_userspace(const struct nlattr *attr,
return ERR_PTR(-ENOMEM);
}
-int ovs_flow_tbl_count(struct flow_table *table)
+int ovs_flow_tbl_count(const struct flow_table *table)
{
return table->count;
}
}
bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
- struct sw_flow_match *match)
+ const struct sw_flow_match *match)
{
struct sw_flow_key *key = match->key;
int key_start = flow_key_start(key);
static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
const struct sw_flow_key *unmasked,
- struct sw_flow_mask *mask)
+ const struct sw_flow_mask *mask)
{
struct sw_flow *flow;
struct hlist_head *head;
}
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
- struct sw_flow_match *match)
+ const struct sw_flow_match *match)
{
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
struct sw_flow_mask *mask;
/* Add 'mask' into the mask list, if it is not already there. */
static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
- struct sw_flow_mask *new)
+ const struct sw_flow_mask *new)
{
struct sw_flow_mask *mask;
mask = flow_mask_find(tbl, new);
/* Must be called with OVS mutex held. */
int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
- struct sw_flow_mask *mask)
+ const struct sw_flow_mask *mask)
{
struct table_instance *new_ti = NULL;
struct table_instance *ti;
void ovs_flow_free(struct sw_flow *, bool deferred);
int ovs_flow_tbl_init(struct flow_table *);
-int ovs_flow_tbl_count(struct flow_table *table);
+int ovs_flow_tbl_count(const struct flow_table *table);
void ovs_flow_tbl_destroy(struct flow_table *table);
int ovs_flow_tbl_flush(struct flow_table *flow_table);
int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
- struct sw_flow_mask *mask);
+ const struct sw_flow_mask *mask);
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
int ovs_flow_tbl_num_masks(const struct flow_table *table);
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
const struct sw_flow_key *);
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
- struct sw_flow_match *match);
+ const struct sw_flow_match *match);
bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
- struct sw_flow_match *match);
+ const struct sw_flow_match *match);
void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
const struct sw_flow_mask *mask);
}
/* Convert 24 bit VNI to 64 bit tunnel ID. */
-static __be64 vni_to_tunnel_id(__u8 *vni)
+static __be64 vni_to_tunnel_id(const __u8 *vni)
{
#ifdef __BIG_ENDIAN
return (vni[0] << 16) | (vni[1] << 8) | vni[2];
return RX_HANDLER_CONSUMED;
}
-static struct net_device *get_dpdev(struct datapath *dp)
+static struct net_device *get_dpdev(const struct datapath *dp)
{
struct vport *local;
kfree(dev_table);
}
-static struct hlist_head *hash_bucket(struct net *net, const char *name)
+static struct hlist_head *hash_bucket(const struct net *net, const char *name)
{
unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
*
* Must be called with ovs or RCU read lock.
*/
-struct vport *ovs_vport_locate(struct net *net, const char *name)
+struct vport *ovs_vport_locate(const struct net *net, const char *name)
{
struct hlist_head *bucket = hash_bucket(net, name);
struct vport *vport;
*
* Must be called with ovs_mutex.
*/
-int ovs_vport_set_upcall_portids(struct vport *vport, struct nlattr *ids)
+int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
{
struct vport_portids *old, *vport_portids;
* skb->data should point to the Ethernet header.
*/
void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
- struct ovs_tunnel_info *tun_info)
+ const struct ovs_tunnel_info *tun_info)
{
struct pcpu_sw_netstats *stats;
struct sw_flow_key key;
struct vport *ovs_vport_add(const struct vport_parms *);
void ovs_vport_del(struct vport *);
-struct vport *ovs_vport_locate(struct net *net, const char *name);
+struct vport *ovs_vport_locate(const struct net *net, const char *name);
void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
int ovs_vport_set_options(struct vport *, struct nlattr *options);
int ovs_vport_get_options(const struct vport *, struct sk_buff *);
-int ovs_vport_set_upcall_portids(struct vport *, struct nlattr *pids);
+int ovs_vport_set_upcall_portids(struct vport *, const struct nlattr *pids);
int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
}
void ovs_vport_receive(struct vport *, struct sk_buff *,
- struct ovs_tunnel_info *);
+ const struct ovs_tunnel_info *);
static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)