nfp_net_dma_map_rx(struct nfp_net *nn, void *frag, unsigned int bufsz,
int direction)
{
- return dma_map_single(&nn->pdev->dev, frag + NFP_NET_RX_BUF_HEADROOM,
+ return dma_map_single(nn->dev, frag + NFP_NET_RX_BUF_HEADROOM,
bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
}
nfp_net_dma_unmap_rx(struct nfp_net *nn, dma_addr_t dma_addr,
unsigned int bufsz, int direction)
{
- dma_unmap_single(&nn->pdev->dev, dma_addr,
+ dma_unmap_single(nn->dev, dma_addr,
bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
}
}
/* Start with the head skbuf */
- dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb),
+ dma_addr = dma_map_single(nn->dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
- if (dma_mapping_error(&nn->pdev->dev, dma_addr))
+ if (dma_mapping_error(nn->dev, dma_addr))
goto err_free;
wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
frag = &skb_shinfo(skb)->frags[f];
fsize = skb_frag_size(frag);
- dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0,
+ dma_addr = skb_frag_dma_map(nn->dev, frag, 0,
fsize, DMA_TO_DEVICE);
- if (dma_mapping_error(&nn->pdev->dev, dma_addr))
+ if (dma_mapping_error(nn->dev, dma_addr))
goto err_unmap;
wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1);
--f;
while (f >= 0) {
frag = &skb_shinfo(skb)->frags[f];
- dma_unmap_page(&nn->pdev->dev,
- tx_ring->txbufs[wr_idx].dma_addr,
+ dma_unmap_page(nn->dev, tx_ring->txbufs[wr_idx].dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring->txbufs[wr_idx].dma_addr = 0;
if (wr_idx < 0)
wr_idx += tx_ring->cnt;
}
- dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr,
+ dma_unmap_single(nn->dev, tx_ring->txbufs[wr_idx].dma_addr,
skb_headlen(skb), DMA_TO_DEVICE);
tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring->txbufs[wr_idx].dma_addr = 0;
if (fidx == -1) {
/* unmap head */
- dma_unmap_single(&nn->pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
+ dma_unmap_single(nn->dev, tx_ring->txbufs[idx].dma_addr,
skb_headlen(skb), DMA_TO_DEVICE);
done_pkts += tx_ring->txbufs[idx].pkt_cnt;
} else {
/* unmap fragment */
frag = &skb_shinfo(skb)->frags[fidx];
- dma_unmap_page(&nn->pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
+ dma_unmap_page(nn->dev, tx_ring->txbufs[idx].dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
}
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
const struct skb_frag_struct *frag;
- struct pci_dev *pdev = nn->pdev;
struct netdev_queue *nd_q;
while (tx_ring->rd_p != tx_ring->wr_p) {
if (tx_buf->fidx == -1) {
/* unmap head */
- dma_unmap_single(&pdev->dev, tx_buf->dma_addr,
+ dma_unmap_single(nn->dev, tx_buf->dma_addr,
skb_headlen(skb),
DMA_TO_DEVICE);
} else {
/* unmap fragment */
frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
- dma_unmap_page(&pdev->dev, tx_buf->dma_addr,
+ dma_unmap_page(nn->dev, tx_buf->dma_addr,
skb_frag_size(frag),
DMA_TO_DEVICE);
}
direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
*dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, direction);
- if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
+ if (dma_mapping_error(nn->dev, *dma_addr)) {
nfp_net_free_frag(frag, xdp);
nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
return NULL;
}
*dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, direction);
- if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
+ if (dma_mapping_error(nn->dev, *dma_addr)) {
nfp_net_free_frag(frag, nn->xdp_prog);
nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
return NULL;
txbuf->pkt_cnt = 1;
txbuf->real_len = pkt_len;
- dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off,
+ dma_sync_single_for_device(nn->dev, rxbuf->dma_addr + pkt_off,
pkt_len, DMA_BIDIRECTIONAL);
/* Build TX descriptor */
nn->bpf_offload_xdp)) {
int act;
- dma_sync_single_for_cpu(&nn->pdev->dev,
+ dma_sync_single_for_cpu(nn->dev,
rxbuf->dma_addr + pkt_off,
pkt_len, DMA_BIDIRECTIONAL);
act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off,
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
kfree(tx_ring->txbufs);
if (tx_ring->txds)
- dma_free_coherent(&pdev->dev, tx_ring->size,
+ dma_free_coherent(nn->dev, tx_ring->size,
tx_ring->txds, tx_ring->dma);
tx_ring->cnt = 0;
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
int sz;
tx_ring->cnt = cnt;
tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
- tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
+ tx_ring->txds = dma_zalloc_coherent(nn->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->txds)
goto err_alloc;
{
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
kfree(rx_ring->rxbufs);
if (rx_ring->rxds)
- dma_free_coherent(&pdev->dev, rx_ring->size,
+ dma_free_coherent(nn->dev, rx_ring->size,
rx_ring->rxds, rx_ring->dma);
rx_ring->cnt = 0;
{
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
int sz;
rx_ring->cnt = cnt;
rx_ring->bufsz = fl_bufsz;
rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
- rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
+ rx_ring->rxds = dma_zalloc_coherent(nn->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->rxds)
goto err_alloc;
nn = netdev_priv(netdev);
nn->netdev = netdev;
+ nn->dev = &pdev->dev;
nn->pdev = pdev;
nn->max_tx_rings = max_tx_rings;
func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
- dev_warn(&nn->pdev->dev,
+ dev_warn(nn->dev,
"Bad RSS config, defaulting to Toeplitz hash\n");
func_bit = ETH_RSS_HASH_TOP_BIT;
}