struct ixgbe_ring *rx_ring,
int cleaned_count)
{
- struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
}
if (!bi->skb) {
- struct sk_buff *skb = netdev_alloc_skb(netdev, bufsz);
+ struct sk_buff *skb = netdev_alloc_skb(adapter->netdev,
+ bufsz);
if (!skb) {
adapter->alloc_rx_buff_failed++;
struct ixgbe_ring *rx_ring,
int *work_done, int work_to_do)
{
- struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
total_rx_bytes += skb->len;
total_rx_packets++;
- skb->protocol = eth_type_trans(skb, netdev);
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
- netdev->last_rx = jiffies;
+ adapter->netdev->last_rx = jiffies;
next_desc:
rx_desc->wb.upper.status_error = 0;
struct ixgbe_q_vector *q_vector =
container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
- struct net_device *netdev = adapter->netdev;
struct ixgbe_ring *rx_ring = NULL;
int work_done = 0, i;
long r_idx;
rx_ring = &(adapter->rx_ring[r_idx]);
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
- netif_rx_complete(netdev, napi);
+ netif_rx_complete(adapter->netdev, napi);
if (adapter->itr_setting & 3)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
struct ixgbe_q_vector *q_vector = container_of(napi,
struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
- int tx_cleaned = 0, work_done = 0;
+ int tx_cleaned, work_done = 0;
#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable(adapter);
}
-
return work_done;
}