*/
struct xps_dev_maps {
struct rcu_head rcu;
- struct xps_map *cpu_map[0];
+ struct xps_map __rcu *cpu_map[0];
};
#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
(nr_cpu_ids * sizeof(struct xps_map *)))
spinlock_t tx_global_lock;
#ifdef CONFIG_XPS
- struct xps_dev_maps *xps_maps;
+ struct xps_dev_maps __rcu *xps_maps;
#endif
/* These may be needed for future network-power-down code. */
}
static DEFINE_MUTEX(xps_map_mutex);
+#define xmap_dereference(P) \
+ rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
static ssize_t store_xps_map(struct netdev_queue *queue,
struct netdev_queue_attribute *attribute,
mutex_lock(&xps_map_mutex);
- dev_maps = dev->xps_maps;
+ dev_maps = xmap_dereference(dev->xps_maps);
for_each_possible_cpu(cpu) {
- new_map = map = dev_maps ? dev_maps->cpu_map[cpu] : NULL;
-
+ map = dev_maps ?
+ xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
+ new_map = map;
if (map) {
for (pos = 0; pos < map->len; pos++)
if (map->queues[pos] == index)
else
new_map = NULL;
}
- new_dev_maps->cpu_map[cpu] = new_map;
+ RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
}
/* Cleanup old maps */
for_each_possible_cpu(cpu) {
- map = dev_maps ? dev_maps->cpu_map[cpu] : NULL;
- if (map && new_dev_maps->cpu_map[cpu] != map)
+ map = dev_maps ?
+ xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
+ if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
call_rcu(&map->rcu, xps_map_release);
if (new_dev_maps->cpu_map[cpu])
nonempty = 1;
if (new_dev_maps)
for_each_possible_cpu(i)
- kfree(new_dev_maps->cpu_map[i]);
+ kfree(rcu_dereference_protected(
+ new_dev_maps->cpu_map[i],
+ 1));
kfree(new_dev_maps);
free_cpumask_var(mask);
return -ENOMEM;
index = get_netdev_queue_index(queue);
mutex_lock(&xps_map_mutex);
- dev_maps = dev->xps_maps;
+ dev_maps = xmap_dereference(dev->xps_maps);
if (dev_maps) {
for_each_possible_cpu(i) {
- map = dev_maps->cpu_map[i];
+ map = xmap_dereference(dev_maps->cpu_map[i]);
if (!map)
continue;