/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
+
+ /* Per-cpu variable to show the mapping from CPU to virtqueue */
+ int __percpu *vq_index;
};
struct skb_vnet_hdr {
static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
{
int i;
+ int cpu;
/* In multiqueue mode, when the number of cpu is equal to the number of
* queue pairs, we let the queue pairs to be private to one cpu by
*/
if ((vi->curr_queue_pairs == 1 ||
vi->max_queue_pairs != num_online_cpus()) && set) {
- if (vi->affinity_hint_set)
+ if (vi->affinity_hint_set) {
set = false;
- else
+ } else {
+ i = 0;
+ for_each_online_cpu(cpu)
+ *per_cpu_ptr(vi->vq_index, cpu) =
+ ++i % vi->curr_queue_pairs;
return;
+ }
}
- for (i = 0; i < vi->max_queue_pairs; i++) {
- int cpu = set ? i : -1;
- virtqueue_set_affinity(vi->rq[i].vq, cpu);
- virtqueue_set_affinity(vi->sq[i].vq, cpu);
- }
+ if (set) {
+ i = 0;
+ for_each_online_cpu(cpu) {
+ virtqueue_set_affinity(vi->rq[i].vq, cpu);
+ virtqueue_set_affinity(vi->sq[i].vq, cpu);
+ *per_cpu_ptr(vi->vq_index, cpu) = i;
+ i++;
+ }
- if (set)
vi->affinity_hint_set = true;
- else
+ } else {
+ for(i = 0; i < vi->max_queue_pairs; i++) {
+ virtqueue_set_affinity(vi->rq[i].vq, -1);
+ virtqueue_set_affinity(vi->sq[i].vq, -1);
+ }
+
+ i = 0;
+ for_each_online_cpu(cpu)
+ *per_cpu_ptr(vi->vq_index, cpu) =
+ ++i % vi->curr_queue_pairs;
+
vi->affinity_hint_set = false;
+ }
}
static void virtnet_get_ringparam(struct net_device *dev,
if (queue_pairs > vi->max_queue_pairs)
return -EINVAL;
+ get_online_cpus();
err = virtnet_set_queues(vi, queue_pairs);
if (!err) {
netif_set_real_num_tx_queues(dev, queue_pairs);
virtnet_set_affinity(vi, true);
}
+ put_online_cpus();
return err;
}
/* To avoid contending a lock hold by a vcpu who would exit to host, select the
* txq based on the processor id.
- * TODO: handle cpu hotplug.
*/
static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
{
- int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
- smp_processor_id();
+ int txq;
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ if (skb_rx_queue_recorded(skb)) {
+ txq = skb_get_rx_queue(skb);
+ } else {
+ txq = *__this_cpu_ptr(vi->vq_index);
+ if (txq == -1)
+ txq = 0;
+ }
while (unlikely(txq >= dev->real_num_tx_queues))
txq -= dev->real_num_tx_queues;
if (ret)
goto err_free;
+ get_online_cpus();
virtnet_set_affinity(vi, true);
+ put_online_cpus();
+
return 0;
err_free:
if (vi->stats == NULL)
goto free;
+ vi->vq_index = alloc_percpu(int);
+ if (vi->vq_index == NULL)
+ goto free_stats;
+
mutex_init(&vi->config_lock);
vi->config_enable = true;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
- goto free_stats;
+ goto free_index;
netif_set_real_num_tx_queues(dev, 1);
netif_set_real_num_rx_queues(dev, 1);
free_vqs:
cancel_delayed_work_sync(&vi->refill);
virtnet_del_vqs(vi);
+free_index:
+ free_percpu(vi->vq_index);
free_stats:
free_percpu(vi->stats);
free:
flush_work(&vi->config_work);
+ free_percpu(vi->vq_index);
free_percpu(vi->stats);
free_netdev(vi->dev);
}