memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
sizeof(struct ixgbe_ring));
temp_tx_ring[i].count = new_tx_count;
- err = ixgbe_setup_tx_resources(adapter,
- &temp_tx_ring[i]);
+ err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
if (err) {
while (i) {
i--;
- ixgbe_free_tx_resources(adapter,
- &temp_tx_ring[i]);
+ ixgbe_free_tx_resources(&temp_tx_ring[i]);
}
goto clear_reset;
}
memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
sizeof(struct ixgbe_ring));
temp_rx_ring[i].count = new_rx_count;
- err = ixgbe_setup_rx_resources(adapter,
- &temp_rx_ring[i]);
+ err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
if (err) {
while (i) {
i--;
- ixgbe_free_rx_resources(adapter,
- &temp_rx_ring[i]);
+ ixgbe_free_rx_resources(&temp_rx_ring[i]);
}
goto err_setup;
}
/* tx */
if (new_tx_count != adapter->tx_ring_count) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- ixgbe_free_tx_resources(adapter,
- adapter->tx_ring[i]);
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
sizeof(struct ixgbe_ring));
}
/* rx */
if (new_rx_count != adapter->rx_ring_count) {
for (i = 0; i < adapter->num_rx_queues; i++) {
- ixgbe_free_rx_resources(adapter,
- adapter->rx_ring[i]);
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
sizeof(struct ixgbe_ring));
}
ixgbe_reset(adapter);
- ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring);
- ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring);
+ ixgbe_free_tx_resources(&adapter->test_tx_ring);
+ ixgbe_free_rx_resources(&adapter->test_rx_ring);
}
static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
tx_ring->count = IXGBE_DEFAULT_TXD;
tx_ring->queue_index = 0;
+ tx_ring->dev = &adapter->pdev->dev;
tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
tx_ring->numa_node = adapter->node;
- err = ixgbe_setup_tx_resources(adapter, tx_ring);
+ err = ixgbe_setup_tx_resources(tx_ring);
if (err)
return 1;
/* Setup Rx Descriptor ring and Rx buffers */
rx_ring->count = IXGBE_DEFAULT_RXD;
rx_ring->queue_index = 0;
+ rx_ring->dev = &adapter->pdev->dev;
rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
rx_ring->numa_node = adapter->node;
- err = ixgbe_setup_rx_resources(adapter, rx_ring);
+ err = ixgbe_setup_rx_resources(rx_ring);
if (err) {
ret_val = 4;
goto err_nomem;
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
/* unmap Rx buffer, will be remapped by alloc_rx_buffers */
- dma_unmap_single(&adapter->pdev->dev,
+ dma_unmap_single(rx_ring->dev,
rx_buffer_info->dma,
bufsz,
DMA_FROM_DEVICE);
/* unmap buffer on Tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
- ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment Rx/Tx next to clean counters */
rx_ntc++;
}
}
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
- struct ixgbe_tx_buffer
- *tx_buffer_info)
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *tx_buffer_info)
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- dma_unmap_page(&adapter->pdev->dev,
+ dma_unmap_page(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
else
- dma_unmap_single(&adapter->pdev->dev,
+ dma_unmap_single(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
total_packets += tx_buffer_info->gso_segs;
}
- ixgbe_unmap_and_free_tx_resource(adapter,
+ ixgbe_unmap_and_free_tx_resource(tx_ring,
tx_buffer_info);
}
struct ixgbe_ring *rx_ring,
u16 cleaned_count)
{
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
struct sk_buff *skb;
}
if (!bi->dma) {
- bi->dma = dma_map_single(&pdev->dev,
+ bi->dma = dma_map_single(rx_ring->dev,
skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev, bi->dma)) {
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
adapter->alloc_rx_buff_failed++;
bi->dma = 0;
goto no_buffers;
if (!bi->page_dma) {
/* use a half page if we're re-using */
bi->page_offset ^= PAGE_SIZE / 2;
- bi->page_dma = dma_map_page(&pdev->dev,
+ bi->page_dma = dma_map_page(rx_ring->dev,
bi->page,
bi->page_offset,
PAGE_SIZE / 2,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev,
+ if (dma_mapping_error(rx_ring->dev,
bi->page_dma)) {
adapter->alloc_rx_page_failed++;
bi->page_dma = 0;
int *work_done, int work_to_do)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
struct sk_buff *skb;
IXGBE_RSC_CB(skb)->delay_unmap = true;
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
} else {
- dma_unmap_single(&pdev->dev,
+ dma_unmap_single(rx_ring->dev,
rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
}
if (upper_len) {
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_ring->dev,
+ rx_buffer_info->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
&(rx_ring->rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
if (IXGBE_RSC_CB(skb)->delay_unmap) {
- dma_unmap_single(&pdev->dev,
+ dma_unmap_single(rx_ring->dev,
IXGBE_RSC_CB(skb)->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
-static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = rx_ring->dev;
unsigned long size;
- unsigned int i;
+ u16 i;
/* ring already cleared, nothing to do */
if (!rx_ring->rx_buffer_info)
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
do {
struct sk_buff *this = skb;
if (IXGBE_RSC_CB(this)->delay_unmap) {
- dma_unmap_single(&pdev->dev,
+ dma_unmap_single(dev,
IXGBE_RSC_CB(this)->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
if (!rx_buffer_info->page)
continue;
if (rx_buffer_info->page_dma) {
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ dma_unmap_page(dev, rx_buffer_info->page_dma,
PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
}
/**
* ixgbe_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
-static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
{
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned long size;
- unsigned int i;
+ u16 i;
/* ring already cleared, nothing to do */
if (!tx_ring->tx_buffer_info)
/* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
+ ixgbe_clean_rx_ring(adapter->rx_ring[i]);
}
/**
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
+ ixgbe_clean_tx_ring(adapter->tx_ring[i]);
}
void ixgbe_down(struct ixgbe_adapter *adapter)
static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
{
int i;
+ int rx_count;
int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) {
goto err_tx_ring_allocation;
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
+ ring->dev = &adapter->pdev->dev;
ring->numa_node = adapter->node;
adapter->tx_ring[i] = ring;
/* Restore the adapter's original node */
adapter->node = orig_node;
+ rx_count = adapter->rx_ring_count;
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
if (orig_node == -1) {
ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
if (!ring)
goto err_rx_ring_allocation;
- ring->count = adapter->rx_ring_count;
+ ring->count = rx_count;
ring->queue_index = i;
+ ring->dev = &adapter->pdev->dev;
ring->numa_node = adapter->node;
adapter->rx_ring[i] = ring;
/**
* ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
* @tx_ring: tx descriptor ring (for a specific queue) to setup
*
* Return 0 on success, negative on failure
**/
-int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = tx_ring->dev;
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
err:
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n");
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
+ err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
if (!err)
continue;
e_err(probe, "Allocation for Tx Queue %u failed\n", i);
/**
* ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
* @rx_ring: rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
**/
-int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = rx_ring->dev;
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
+ rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node);
if (!rx_ring->rx_buffer_info)
rx_ring->rx_buffer_info = vmalloc(size);
- if (!rx_ring->rx_buffer_info) {
- e_err(probe, "vmalloc allocation failed for the Rx "
- "descriptor ring\n");
- goto alloc_failed;
- }
+ if (!rx_ring->rx_buffer_info)
+ goto err;
memset(rx_ring->rx_buffer_info, 0, size);
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
- if (!rx_ring->desc) {
- e_err(probe, "Memory allocation failed for the Rx "
- "descriptor ring\n");
- vfree(rx_ring->rx_buffer_info);
- goto alloc_failed;
- }
+ if (!rx_ring->desc)
+ goto err;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
return 0;
-
-alloc_failed:
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
*
* Return 0 on success, negative on failure
**/
-
static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
{
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
+ err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
if (!err)
continue;
e_err(probe, "Allocation for Rx Queue %u failed\n", i);
/**
* ixgbe_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
* @tx_ring: Tx descriptor ring for a specific queue
*
* Free all transmit software resources
**/
-void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbe_clean_tx_ring(adapter, tx_ring);
+ ixgbe_clean_tx_ring(tx_ring);
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tx_ring[i]->desc)
- ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
}
/**
* ixgbe_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
* @rx_ring: ring to clean the resources from
*
* Free all receive software resources
**/
-void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbe_clean_rx_ring(adapter, rx_ring);
+ ixgbe_clean_rx_ring(rx_ring);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->rx_ring[i]->desc)
- ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
}
/**
struct sk_buff *skb, u32 tx_flags,
unsigned int first, const u8 hdr_len)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = tx_ring->dev;
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int len;
unsigned int total = skb->len;
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = dma_map_single(&pdev->dev,
+ tx_buffer_info->dma = dma_map_single(dev,
skb->data + offset,
size, DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
+ tx_buffer_info->dma = dma_map_page(dev,
frag->page,
offset, size,
DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
i += tx_ring->count;
i--;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
return 0;