unsigned long tx_dropped;
} ____cacheline_aligned_in_smp;
+#ifdef CONFIG_RPS
/*
* This structure holds an RPS map which can be of variable length. The
* map is an array of CPUs.
struct netdev_rx_queue *first;
atomic_t count;
} ____cacheline_aligned_in_smp;
+#endif
/*
* This structure defines the management hooks for network devices.
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
+#ifdef CONFIG_RPS
struct kset *queues_kset;
struct netdev_rx_queue *_rx;
/* Number of RX queues allocated at alloc_netdev_mq() time */
unsigned int num_rx_queues;
+#endif
struct netdev_queue rx_queue;
DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RPS
/*
* get_rps_cpu is called from netif_receive_skb and returns the target
* CPU from the RPS map of the receiving queue for a given skb.
/* Schedule NAPI for backlog device */
if (napi_schedule_prep(&queue->backlog)) {
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RPS
if (cpu != smp_processor_id()) {
struct rps_remote_softirq_cpus *rcpus =
&__get_cpu_var(rps_remote_softirq_cpus);
if (!skb->tstamp.tv64)
net_timestamp(skb);
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RPS
cpu = get_rps_cpu(skb->dev, skb);
if (cpu < 0)
cpu = smp_processor_id();
*/
int netif_receive_skb(struct sk_buff *skb)
{
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RPS
int cpu;
cpu = get_rps_cpu(skb->dev, skb);
}
EXPORT_SYMBOL(netif_napi_del);
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RPS
/*
* net_rps_action sends any pending IPI's for rps. This is only called from
* softirq and interrupts must be enabled.
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
void *have;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RPS
int select;
struct rps_remote_softirq_cpus *rcpus;
#endif
netpoll_poll_unlock(have);
}
out:
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RPS
rcpus = &__get_cpu_var(rps_remote_softirq_cpus);
select = rcpus->select;
rcpus->select ^= 1;
dev->iflink = -1;
+#ifdef CONFIG_RPS
if (!dev->num_rx_queues) {
/*
* Allocate a single RX queue if driver never called
atomic_set(&dev->_rx->count, 1);
dev->num_rx_queues = 1;
}
-
+#endif
/* Init, if this function is available */
if (dev->netdev_ops->ndo_init) {
ret = dev->netdev_ops->ndo_init(dev);
void (*setup)(struct net_device *), unsigned int queue_count)
{
struct netdev_queue *tx;
- struct netdev_rx_queue *rx;
struct net_device *dev;
size_t alloc_size;
struct net_device *p;
+#ifdef CONFIG_RPS
+ struct netdev_rx_queue *rx;
int i;
+#endif
BUG_ON(strlen(name) >= sizeof(dev->name));
goto free_p;
}
+#ifdef CONFIG_RPS
rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
if (!rx) {
printk(KERN_ERR "alloc_netdev: Unable to allocate "
*/
for (i = 0; i < queue_count; i++)
rx[i].first = rx;
+#endif
dev = PTR_ALIGN(p, NETDEV_ALIGN);
dev->padded = (char *)dev - (char *)p;
dev->num_tx_queues = queue_count;
dev->real_num_tx_queues = queue_count;
+#ifdef CONFIG_RPS
dev->_rx = rx;
dev->num_rx_queues = queue_count;
+#endif
dev->gso_max_size = GSO_MAX_SIZE;
return dev;
free_rx:
+#ifdef CONFIG_RPS
kfree(rx);
free_tx:
+#endif
kfree(tx);
free_p:
kfree(p);
queue->completion_queue = NULL;
INIT_LIST_HEAD(&queue->poll_list);
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RPS
queue->csd.func = trigger_softirq;
queue->csd.info = queue;
queue->csd.flags = 0;