2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: Common functions between PF and VF
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Jason McMullan <jason.mcmullan@netronome.com>
39 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
40 * Brad Petrus <brad.petrus@netronome.com>
41 * Chris Telfer <chris.telfer@netronome.com>
44 #include <linux/bitfield.h>
45 #include <linux/bpf.h>
46 #include <linux/bpf_trace.h>
47 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/init.h>
51 #include <linux/netdevice.h>
52 #include <linux/etherdevice.h>
53 #include <linux/interrupt.h>
55 #include <linux/ipv6.h>
56 #include <linux/page_ref.h>
57 #include <linux/pci.h>
58 #include <linux/pci_regs.h>
59 #include <linux/msi.h>
60 #include <linux/ethtool.h>
61 #include <linux/log2.h>
62 #include <linux/if_vlan.h>
63 #include <linux/random.h>
64 #include <linux/vmalloc.h>
65 #include <linux/ktime.h>
67 #include <net/switchdev.h>
68 #include <net/vxlan.h>
70 #include "nfpcore/nfp_nsp.h"
72 #include "nfp_net_ctrl.h"
74 #include "nfp_net_sriov.h"
78 * nfp_net_get_fw_version() - Read and parse the FW version
79 * @fw_ver: Output fw_version structure to read to
80 * @ctrl_bar: Mapped address of the control BAR
82 void nfp_net_get_fw_version(struct nfp_net_fw_version
*fw_ver
,
83 void __iomem
*ctrl_bar
)
87 reg
= readl(ctrl_bar
+ NFP_NET_CFG_VERSION
);
88 put_unaligned_le32(reg
, fw_ver
);
91 static dma_addr_t
nfp_net_dma_map_rx(struct nfp_net_dp
*dp
, void *frag
)
93 return dma_map_single_attrs(dp
->dev
, frag
+ NFP_NET_RX_BUF_HEADROOM
,
94 dp
->fl_bufsz
- NFP_NET_RX_BUF_NON_DATA
,
95 dp
->rx_dma_dir
, DMA_ATTR_SKIP_CPU_SYNC
);
99 nfp_net_dma_sync_dev_rx(const struct nfp_net_dp
*dp
, dma_addr_t dma_addr
)
101 dma_sync_single_for_device(dp
->dev
, dma_addr
,
102 dp
->fl_bufsz
- NFP_NET_RX_BUF_NON_DATA
,
106 static void nfp_net_dma_unmap_rx(struct nfp_net_dp
*dp
, dma_addr_t dma_addr
)
108 dma_unmap_single_attrs(dp
->dev
, dma_addr
,
109 dp
->fl_bufsz
- NFP_NET_RX_BUF_NON_DATA
,
110 dp
->rx_dma_dir
, DMA_ATTR_SKIP_CPU_SYNC
);
113 static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp
*dp
, dma_addr_t dma_addr
,
116 dma_sync_single_for_cpu(dp
->dev
, dma_addr
- NFP_NET_RX_BUF_HEADROOM
,
117 len
, dp
->rx_dma_dir
);
122 * Firmware reconfig may take a while so we have two versions of it -
123 * synchronous and asynchronous (posted). All synchronous callers are holding
124 * RTNL so we don't have to worry about serializing them.
126 static void nfp_net_reconfig_start(struct nfp_net
*nn
, u32 update
)
128 nn_writel(nn
, NFP_NET_CFG_UPDATE
, update
);
129 /* ensure update is written before pinging HW */
131 nfp_qcp_wr_ptr_add(nn
->qcp_cfg
, 1);
134 /* Pass 0 as update to run posted reconfigs. */
135 static void nfp_net_reconfig_start_async(struct nfp_net
*nn
, u32 update
)
137 update
|= nn
->reconfig_posted
;
138 nn
->reconfig_posted
= 0;
140 nfp_net_reconfig_start(nn
, update
);
142 nn
->reconfig_timer_active
= true;
143 mod_timer(&nn
->reconfig_timer
, jiffies
+ NFP_NET_POLL_TIMEOUT
* HZ
);
146 static bool nfp_net_reconfig_check_done(struct nfp_net
*nn
, bool last_check
)
150 reg
= nn_readl(nn
, NFP_NET_CFG_UPDATE
);
153 if (reg
& NFP_NET_CFG_UPDATE_ERR
) {
154 nn_err(nn
, "Reconfig error: 0x%08x\n", reg
);
156 } else if (last_check
) {
157 nn_err(nn
, "Reconfig timeout: 0x%08x\n", reg
);
164 static int nfp_net_reconfig_wait(struct nfp_net
*nn
, unsigned long deadline
)
166 bool timed_out
= false;
168 /* Poll update field, waiting for NFP to ack the config */
169 while (!nfp_net_reconfig_check_done(nn
, timed_out
)) {
171 timed_out
= time_is_before_eq_jiffies(deadline
);
174 if (nn_readl(nn
, NFP_NET_CFG_UPDATE
) & NFP_NET_CFG_UPDATE_ERR
)
177 return timed_out
? -EIO
: 0;
180 static void nfp_net_reconfig_timer(unsigned long data
)
182 struct nfp_net
*nn
= (void *)data
;
184 spin_lock_bh(&nn
->reconfig_lock
);
186 nn
->reconfig_timer_active
= false;
188 /* If sync caller is present it will take over from us */
189 if (nn
->reconfig_sync_present
)
192 /* Read reconfig status and report errors */
193 nfp_net_reconfig_check_done(nn
, true);
195 if (nn
->reconfig_posted
)
196 nfp_net_reconfig_start_async(nn
, 0);
198 spin_unlock_bh(&nn
->reconfig_lock
);
202 * nfp_net_reconfig_post() - Post async reconfig request
203 * @nn: NFP Net device to reconfigure
204 * @update: The value for the update field in the BAR config
206 * Record FW reconfiguration request. Reconfiguration will be kicked off
207 * whenever reconfiguration machinery is idle. Multiple requests can be
210 static void nfp_net_reconfig_post(struct nfp_net
*nn
, u32 update
)
212 spin_lock_bh(&nn
->reconfig_lock
);
214 /* Sync caller will kick off async reconf when it's done, just post */
215 if (nn
->reconfig_sync_present
) {
216 nn
->reconfig_posted
|= update
;
220 /* Opportunistically check if the previous command is done */
221 if (!nn
->reconfig_timer_active
||
222 nfp_net_reconfig_check_done(nn
, false))
223 nfp_net_reconfig_start_async(nn
, update
);
225 nn
->reconfig_posted
|= update
;
227 spin_unlock_bh(&nn
->reconfig_lock
);
231 * nfp_net_reconfig() - Reconfigure the firmware
232 * @nn: NFP Net device to reconfigure
233 * @update: The value for the update field in the BAR config
235 * Write the update word to the BAR and ping the reconfig queue. The
236 * poll until the firmware has acknowledged the update by zeroing the
239 * Return: Negative errno on error, 0 on success
241 int nfp_net_reconfig(struct nfp_net
*nn
, u32 update
)
243 bool cancelled_timer
= false;
244 u32 pre_posted_requests
;
247 spin_lock_bh(&nn
->reconfig_lock
);
249 nn
->reconfig_sync_present
= true;
251 if (nn
->reconfig_timer_active
) {
252 del_timer(&nn
->reconfig_timer
);
253 nn
->reconfig_timer_active
= false;
254 cancelled_timer
= true;
256 pre_posted_requests
= nn
->reconfig_posted
;
257 nn
->reconfig_posted
= 0;
259 spin_unlock_bh(&nn
->reconfig_lock
);
262 nfp_net_reconfig_wait(nn
, nn
->reconfig_timer
.expires
);
264 /* Run the posted reconfigs which were issued before we started */
265 if (pre_posted_requests
) {
266 nfp_net_reconfig_start(nn
, pre_posted_requests
);
267 nfp_net_reconfig_wait(nn
, jiffies
+ HZ
* NFP_NET_POLL_TIMEOUT
);
270 nfp_net_reconfig_start(nn
, update
);
271 ret
= nfp_net_reconfig_wait(nn
, jiffies
+ HZ
* NFP_NET_POLL_TIMEOUT
);
273 spin_lock_bh(&nn
->reconfig_lock
);
275 if (nn
->reconfig_posted
)
276 nfp_net_reconfig_start_async(nn
, 0);
278 nn
->reconfig_sync_present
= false;
280 spin_unlock_bh(&nn
->reconfig_lock
);
286 * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox
287 * @nn: NFP Net device to reconfigure
288 * @mbox_cmd: The value for the mailbox command
290 * Helper function for mailbox updates
292 * Return: Negative errno on error, 0 on success
294 static int nfp_net_reconfig_mbox(struct nfp_net
*nn
, u32 mbox_cmd
)
298 nn_writeq(nn
, NFP_NET_CFG_MBOX_CMD
, mbox_cmd
);
300 ret
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_MBOX
);
302 nn_err(nn
, "Mailbox update error\n");
306 return -nn_readl(nn
, NFP_NET_CFG_MBOX_RET
);
309 /* Interrupt configuration and handling
313 * nfp_net_irq_unmask() - Unmask automasked interrupt
314 * @nn: NFP Network structure
315 * @entry_nr: MSI-X table entry
317 * Clear the ICR for the IRQ entry.
319 static void nfp_net_irq_unmask(struct nfp_net
*nn
, unsigned int entry_nr
)
321 nn_writeb(nn
, NFP_NET_CFG_ICR(entry_nr
), NFP_NET_CFG_ICR_UNMASKED
);
326 * nfp_net_irqs_alloc() - allocates MSI-X irqs
327 * @pdev: PCI device structure
328 * @irq_entries: Array to be initialized and used to hold the irq entries
329 * @min_irqs: Minimal acceptable number of interrupts
330 * @wanted_irqs: Target number of interrupts to allocate
332 * Return: Number of irqs obtained or 0 on error.
335 nfp_net_irqs_alloc(struct pci_dev
*pdev
, struct msix_entry
*irq_entries
,
336 unsigned int min_irqs
, unsigned int wanted_irqs
)
341 for (i
= 0; i
< wanted_irqs
; i
++)
342 irq_entries
[i
].entry
= i
;
344 got_irqs
= pci_enable_msix_range(pdev
, irq_entries
,
345 min_irqs
, wanted_irqs
);
347 dev_err(&pdev
->dev
, "Failed to enable %d-%d MSI-X (err=%d)\n",
348 min_irqs
, wanted_irqs
, got_irqs
);
352 if (got_irqs
< wanted_irqs
)
353 dev_warn(&pdev
->dev
, "Unable to allocate %d IRQs got only %d\n",
354 wanted_irqs
, got_irqs
);
360 * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev
361 * @nn: NFP Network structure
362 * @irq_entries: Table of allocated interrupts
363 * @n: Size of @irq_entries (number of entries to grab)
365 * After interrupts are allocated with nfp_net_irqs_alloc() this function
366 * should be called to assign them to a specific netdev (port).
369 nfp_net_irqs_assign(struct nfp_net
*nn
, struct msix_entry
*irq_entries
,
372 struct nfp_net_dp
*dp
= &nn
->dp
;
374 nn
->max_r_vecs
= n
- NFP_NET_NON_Q_VECTORS
;
375 dp
->num_r_vecs
= nn
->max_r_vecs
;
377 memcpy(nn
->irq_entries
, irq_entries
, sizeof(*irq_entries
) * n
);
379 if (dp
->num_rx_rings
> dp
->num_r_vecs
||
380 dp
->num_tx_rings
> dp
->num_r_vecs
)
381 dev_warn(nn
->dp
.dev
, "More rings (%d,%d) than vectors (%d).\n",
382 dp
->num_rx_rings
, dp
->num_tx_rings
,
385 dp
->num_rx_rings
= min(dp
->num_r_vecs
, dp
->num_rx_rings
);
386 dp
->num_tx_rings
= min(dp
->num_r_vecs
, dp
->num_tx_rings
);
387 dp
->num_stack_tx_rings
= dp
->num_tx_rings
;
391 * nfp_net_irqs_disable() - Disable interrupts
392 * @pdev: PCI device structure
394 * Undoes what @nfp_net_irqs_alloc() does.
396 void nfp_net_irqs_disable(struct pci_dev
*pdev
)
398 pci_disable_msix(pdev
);
402 * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
404 * @data: Opaque data structure
406 * Return: Indicate if the interrupt has been handled.
408 static irqreturn_t
nfp_net_irq_rxtx(int irq
, void *data
)
410 struct nfp_net_r_vector
*r_vec
= data
;
412 napi_schedule_irqoff(&r_vec
->napi
);
414 /* The FW auto-masks any interrupt, either via the MASK bit in
415 * the MSI-X table or via the per entry ICR field. So there
416 * is no need to disable interrupts here.
421 static irqreturn_t
nfp_ctrl_irq_rxtx(int irq
, void *data
)
423 struct nfp_net_r_vector
*r_vec
= data
;
425 tasklet_schedule(&r_vec
->tasklet
);
431 * nfp_net_read_link_status() - Reread link status from control BAR
432 * @nn: NFP Network structure
434 static void nfp_net_read_link_status(struct nfp_net
*nn
)
440 spin_lock_irqsave(&nn
->link_status_lock
, flags
);
442 sts
= nn_readl(nn
, NFP_NET_CFG_STS
);
443 link_up
= !!(sts
& NFP_NET_CFG_STS_LINK
);
445 if (nn
->link_up
== link_up
)
448 nn
->link_up
= link_up
;
450 set_bit(NFP_PORT_CHANGED
, &nn
->port
->flags
);
453 netif_carrier_on(nn
->dp
.netdev
);
454 netdev_info(nn
->dp
.netdev
, "NIC Link is Up\n");
456 netif_carrier_off(nn
->dp
.netdev
);
457 netdev_info(nn
->dp
.netdev
, "NIC Link is Down\n");
460 spin_unlock_irqrestore(&nn
->link_status_lock
, flags
);
464 * nfp_net_irq_lsc() - Interrupt service routine for link state changes
466 * @data: Opaque data structure
468 * Return: Indicate if the interrupt has been handled.
470 static irqreturn_t
nfp_net_irq_lsc(int irq
, void *data
)
472 struct nfp_net
*nn
= data
;
473 struct msix_entry
*entry
;
475 entry
= &nn
->irq_entries
[NFP_NET_IRQ_LSC_IDX
];
477 nfp_net_read_link_status(nn
);
479 nfp_net_irq_unmask(nn
, entry
->entry
);
485 * nfp_net_irq_exn() - Interrupt service routine for exceptions
487 * @data: Opaque data structure
489 * Return: Indicate if the interrupt has been handled.
491 static irqreturn_t
nfp_net_irq_exn(int irq
, void *data
)
493 struct nfp_net
*nn
= data
;
495 nn_err(nn
, "%s: UNIMPLEMENTED.\n", __func__
);
496 /* XXX TO BE IMPLEMENTED */
501 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
502 * @tx_ring: TX ring structure
503 * @r_vec: IRQ vector servicing this ring
505 * @is_xdp: Is this an XDP TX ring?
508 nfp_net_tx_ring_init(struct nfp_net_tx_ring
*tx_ring
,
509 struct nfp_net_r_vector
*r_vec
, unsigned int idx
,
512 struct nfp_net
*nn
= r_vec
->nfp_net
;
515 tx_ring
->r_vec
= r_vec
;
516 tx_ring
->is_xdp
= is_xdp
;
517 u64_stats_init(&tx_ring
->r_vec
->tx_sync
);
519 tx_ring
->qcidx
= tx_ring
->idx
* nn
->stride_tx
;
520 tx_ring
->qcp_q
= nn
->tx_bar
+ NFP_QCP_QUEUE_OFF(tx_ring
->qcidx
);
524 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
525 * @rx_ring: RX ring structure
526 * @r_vec: IRQ vector servicing this ring
530 nfp_net_rx_ring_init(struct nfp_net_rx_ring
*rx_ring
,
531 struct nfp_net_r_vector
*r_vec
, unsigned int idx
)
533 struct nfp_net
*nn
= r_vec
->nfp_net
;
536 rx_ring
->r_vec
= r_vec
;
537 u64_stats_init(&rx_ring
->r_vec
->rx_sync
);
539 rx_ring
->fl_qcidx
= rx_ring
->idx
* nn
->stride_rx
;
540 rx_ring
->qcp_fl
= nn
->rx_bar
+ NFP_QCP_QUEUE_OFF(rx_ring
->fl_qcidx
);
544 * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
545 * @nn: NFP Network structure
546 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
547 * @format: printf-style format to construct the interrupt name
548 * @name: Pointer to allocated space for interrupt name
549 * @name_sz: Size of space for interrupt name
550 * @vector_idx: Index of MSI-X vector used for this interrupt
551 * @handler: IRQ handler to register for this interrupt
554 nfp_net_aux_irq_request(struct nfp_net
*nn
, u32 ctrl_offset
,
555 const char *format
, char *name
, size_t name_sz
,
556 unsigned int vector_idx
, irq_handler_t handler
)
558 struct msix_entry
*entry
;
561 entry
= &nn
->irq_entries
[vector_idx
];
563 snprintf(name
, name_sz
, format
, nfp_net_name(nn
));
564 err
= request_irq(entry
->vector
, handler
, 0, name
, nn
);
566 nn_err(nn
, "Failed to request IRQ %d (err=%d).\n",
570 nn_writeb(nn
, ctrl_offset
, entry
->entry
);
576 * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
577 * @nn: NFP Network structure
578 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
579 * @vector_idx: Index of MSI-X vector used for this interrupt
581 static void nfp_net_aux_irq_free(struct nfp_net
*nn
, u32 ctrl_offset
,
582 unsigned int vector_idx
)
584 nn_writeb(nn
, ctrl_offset
, 0xff);
585 free_irq(nn
->irq_entries
[vector_idx
].vector
, nn
);
590 * One queue controller peripheral queue is used for transmit. The
591 * driver en-queues packets for transmit by advancing the write
592 * pointer. The device indicates that packets have transmitted by
593 * advancing the read pointer. The driver maintains a local copy of
594 * the read and write pointer in @struct nfp_net_tx_ring. The driver
595 * keeps @wr_p in sync with the queue controller write pointer and can
596 * determine how many packets have been transmitted by comparing its
597 * copy of the read pointer @rd_p with the read pointer maintained by
598 * the queue controller peripheral.
602 * nfp_net_tx_full() - Check if the TX ring is full
603 * @tx_ring: TX ring to check
604 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
606 * This function checks, based on the *host copy* of read/write
607 * pointer if a given TX ring is full. The real TX queue may have
608 * some newly made available slots.
610 * Return: True if the ring is full.
612 static int nfp_net_tx_full(struct nfp_net_tx_ring
*tx_ring
, int dcnt
)
614 return (tx_ring
->wr_p
- tx_ring
->rd_p
) >= (tx_ring
->cnt
- dcnt
);
617 /* Wrappers for deciding when to stop and restart TX queues */
618 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring
*tx_ring
)
620 return !nfp_net_tx_full(tx_ring
, MAX_SKB_FRAGS
* 4);
623 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring
*tx_ring
)
625 return nfp_net_tx_full(tx_ring
, MAX_SKB_FRAGS
+ 1);
629 * nfp_net_tx_ring_stop() - stop tx ring
630 * @nd_q: netdev queue
631 * @tx_ring: driver tx queue structure
633 * Safely stop TX ring. Remember that while we are running .start_xmit()
634 * someone else may be cleaning the TX ring completions so we need to be
635 * extra careful here.
637 static void nfp_net_tx_ring_stop(struct netdev_queue
*nd_q
,
638 struct nfp_net_tx_ring
*tx_ring
)
640 netif_tx_stop_queue(nd_q
);
642 /* We can race with the TX completion out of NAPI so recheck */
644 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring
)))
645 netif_tx_start_queue(nd_q
);
649 * nfp_net_tx_tso() - Set up Tx descriptor for LSO
650 * @r_vec: per-ring structure
651 * @txbuf: Pointer to driver soft TX descriptor
652 * @txd: Pointer to HW TX descriptor
653 * @skb: Pointer to SKB
655 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
656 * Return error on packet header greater than maximum supported LSO header size.
658 static void nfp_net_tx_tso(struct nfp_net_r_vector
*r_vec
,
659 struct nfp_net_tx_buf
*txbuf
,
660 struct nfp_net_tx_desc
*txd
, struct sk_buff
*skb
)
665 if (!skb_is_gso(skb
))
668 if (!skb
->encapsulation
) {
669 txd
->l3_offset
= skb_network_offset(skb
);
670 txd
->l4_offset
= skb_transport_offset(skb
);
671 hdrlen
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
673 txd
->l3_offset
= skb_inner_network_offset(skb
);
674 txd
->l4_offset
= skb_inner_transport_offset(skb
);
675 hdrlen
= skb_inner_transport_header(skb
) - skb
->data
+
676 inner_tcp_hdrlen(skb
);
679 txbuf
->pkt_cnt
= skb_shinfo(skb
)->gso_segs
;
680 txbuf
->real_len
+= hdrlen
* (txbuf
->pkt_cnt
- 1);
682 mss
= skb_shinfo(skb
)->gso_size
& PCIE_DESC_TX_MSS_MASK
;
683 txd
->lso_hdrlen
= hdrlen
;
684 txd
->mss
= cpu_to_le16(mss
);
685 txd
->flags
|= PCIE_DESC_TX_LSO
;
687 u64_stats_update_begin(&r_vec
->tx_sync
);
689 u64_stats_update_end(&r_vec
->tx_sync
);
693 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
694 * @dp: NFP Net data path struct
695 * @r_vec: per-ring structure
696 * @txbuf: Pointer to driver soft TX descriptor
697 * @txd: Pointer to TX descriptor
698 * @skb: Pointer to SKB
700 * This function sets the TX checksum flags in the TX descriptor based
701 * on the configuration and the protocol of the packet to be transmitted.
703 static void nfp_net_tx_csum(struct nfp_net_dp
*dp
,
704 struct nfp_net_r_vector
*r_vec
,
705 struct nfp_net_tx_buf
*txbuf
,
706 struct nfp_net_tx_desc
*txd
, struct sk_buff
*skb
)
708 struct ipv6hdr
*ipv6h
;
712 if (!(dp
->ctrl
& NFP_NET_CFG_CTRL_TXCSUM
))
715 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
718 txd
->flags
|= PCIE_DESC_TX_CSUM
;
719 if (skb
->encapsulation
)
720 txd
->flags
|= PCIE_DESC_TX_ENCAP
;
722 iph
= skb
->encapsulation
? inner_ip_hdr(skb
) : ip_hdr(skb
);
723 ipv6h
= skb
->encapsulation
? inner_ipv6_hdr(skb
) : ipv6_hdr(skb
);
725 if (iph
->version
== 4) {
726 txd
->flags
|= PCIE_DESC_TX_IP4_CSUM
;
727 l4_hdr
= iph
->protocol
;
728 } else if (ipv6h
->version
== 6) {
729 l4_hdr
= ipv6h
->nexthdr
;
731 nn_dp_warn(dp
, "partial checksum but ipv=%x!\n", iph
->version
);
737 txd
->flags
|= PCIE_DESC_TX_TCP_CSUM
;
740 txd
->flags
|= PCIE_DESC_TX_UDP_CSUM
;
743 nn_dp_warn(dp
, "partial checksum but l4 proto=%x!\n", l4_hdr
);
747 u64_stats_update_begin(&r_vec
->tx_sync
);
748 if (skb
->encapsulation
)
749 r_vec
->hw_csum_tx_inner
+= txbuf
->pkt_cnt
;
751 r_vec
->hw_csum_tx
+= txbuf
->pkt_cnt
;
752 u64_stats_update_end(&r_vec
->tx_sync
);
755 static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring
*tx_ring
)
758 nfp_qcp_wr_ptr_add(tx_ring
->qcp_q
, tx_ring
->wr_ptr_add
);
759 tx_ring
->wr_ptr_add
= 0;
762 static int nfp_net_prep_port_id(struct sk_buff
*skb
)
764 struct metadata_dst
*md_dst
= skb_metadata_dst(skb
);
769 if (unlikely(md_dst
->type
!= METADATA_HW_PORT_MUX
))
772 if (unlikely(skb_cow_head(skb
, 8)))
775 data
= skb_push(skb
, 8);
776 put_unaligned_be32(NFP_NET_META_PORTID
, data
);
777 put_unaligned_be32(md_dst
->u
.port_info
.port_id
, data
+ 4);
783 * nfp_net_tx() - Main transmit entry point
784 * @skb: SKB to transmit
785 * @netdev: netdev structure
787 * Return: NETDEV_TX_OK on success.
789 static int nfp_net_tx(struct sk_buff
*skb
, struct net_device
*netdev
)
791 struct nfp_net
*nn
= netdev_priv(netdev
);
792 const struct skb_frag_struct
*frag
;
793 struct nfp_net_tx_desc
*txd
, txdg
;
794 int f
, nr_frags
, wr_idx
, md_bytes
;
795 struct nfp_net_tx_ring
*tx_ring
;
796 struct nfp_net_r_vector
*r_vec
;
797 struct nfp_net_tx_buf
*txbuf
;
798 struct netdev_queue
*nd_q
;
799 struct nfp_net_dp
*dp
;
805 qidx
= skb_get_queue_mapping(skb
);
806 tx_ring
= &dp
->tx_rings
[qidx
];
807 r_vec
= tx_ring
->r_vec
;
808 nd_q
= netdev_get_tx_queue(dp
->netdev
, qidx
);
810 nr_frags
= skb_shinfo(skb
)->nr_frags
;
812 if (unlikely(nfp_net_tx_full(tx_ring
, nr_frags
+ 1))) {
813 nn_dp_warn(dp
, "TX ring %d busy. wrp=%u rdp=%u\n",
814 qidx
, tx_ring
->wr_p
, tx_ring
->rd_p
);
815 netif_tx_stop_queue(nd_q
);
816 nfp_net_tx_xmit_more_flush(tx_ring
);
817 u64_stats_update_begin(&r_vec
->tx_sync
);
819 u64_stats_update_end(&r_vec
->tx_sync
);
820 return NETDEV_TX_BUSY
;
823 md_bytes
= nfp_net_prep_port_id(skb
);
824 if (unlikely(md_bytes
< 0)) {
825 nfp_net_tx_xmit_more_flush(tx_ring
);
826 dev_kfree_skb_any(skb
);
830 /* Start with the head skbuf */
831 dma_addr
= dma_map_single(dp
->dev
, skb
->data
, skb_headlen(skb
),
833 if (dma_mapping_error(dp
->dev
, dma_addr
))
836 wr_idx
= D_IDX(tx_ring
, tx_ring
->wr_p
);
838 /* Stash the soft descriptor of the head then initialize it */
839 txbuf
= &tx_ring
->txbufs
[wr_idx
];
841 txbuf
->dma_addr
= dma_addr
;
844 txbuf
->real_len
= skb
->len
;
846 /* Build TX descriptor */
847 txd
= &tx_ring
->txds
[wr_idx
];
848 txd
->offset_eop
= (nr_frags
? 0 : PCIE_DESC_TX_EOP
) | md_bytes
;
849 txd
->dma_len
= cpu_to_le16(skb_headlen(skb
));
850 nfp_desc_set_dma_addr(txd
, dma_addr
);
851 txd
->data_len
= cpu_to_le16(skb
->len
);
857 /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
858 nfp_net_tx_tso(r_vec
, txbuf
, txd
, skb
);
859 nfp_net_tx_csum(dp
, r_vec
, txbuf
, txd
, skb
);
860 if (skb_vlan_tag_present(skb
) && dp
->ctrl
& NFP_NET_CFG_CTRL_TXVLAN
) {
861 txd
->flags
|= PCIE_DESC_TX_VLAN
;
862 txd
->vlan
= cpu_to_le16(skb_vlan_tag_get(skb
));
867 /* all descs must match except for in addr, length and eop */
870 for (f
= 0; f
< nr_frags
; f
++) {
871 frag
= &skb_shinfo(skb
)->frags
[f
];
872 fsize
= skb_frag_size(frag
);
874 dma_addr
= skb_frag_dma_map(dp
->dev
, frag
, 0,
875 fsize
, DMA_TO_DEVICE
);
876 if (dma_mapping_error(dp
->dev
, dma_addr
))
879 wr_idx
= D_IDX(tx_ring
, wr_idx
+ 1);
880 tx_ring
->txbufs
[wr_idx
].skb
= skb
;
881 tx_ring
->txbufs
[wr_idx
].dma_addr
= dma_addr
;
882 tx_ring
->txbufs
[wr_idx
].fidx
= f
;
884 txd
= &tx_ring
->txds
[wr_idx
];
886 txd
->dma_len
= cpu_to_le16(fsize
);
887 nfp_desc_set_dma_addr(txd
, dma_addr
);
889 (f
== nr_frags
- 1) ? PCIE_DESC_TX_EOP
: 0;
892 u64_stats_update_begin(&r_vec
->tx_sync
);
894 u64_stats_update_end(&r_vec
->tx_sync
);
897 netdev_tx_sent_queue(nd_q
, txbuf
->real_len
);
899 skb_tx_timestamp(skb
);
901 tx_ring
->wr_p
+= nr_frags
+ 1;
902 if (nfp_net_tx_ring_should_stop(tx_ring
))
903 nfp_net_tx_ring_stop(nd_q
, tx_ring
);
905 tx_ring
->wr_ptr_add
+= nr_frags
+ 1;
906 if (!skb
->xmit_more
|| netif_xmit_stopped(nd_q
))
907 nfp_net_tx_xmit_more_flush(tx_ring
);
913 frag
= &skb_shinfo(skb
)->frags
[f
];
914 dma_unmap_page(dp
->dev
, tx_ring
->txbufs
[wr_idx
].dma_addr
,
915 skb_frag_size(frag
), DMA_TO_DEVICE
);
916 tx_ring
->txbufs
[wr_idx
].skb
= NULL
;
917 tx_ring
->txbufs
[wr_idx
].dma_addr
= 0;
918 tx_ring
->txbufs
[wr_idx
].fidx
= -2;
921 wr_idx
+= tx_ring
->cnt
;
923 dma_unmap_single(dp
->dev
, tx_ring
->txbufs
[wr_idx
].dma_addr
,
924 skb_headlen(skb
), DMA_TO_DEVICE
);
925 tx_ring
->txbufs
[wr_idx
].skb
= NULL
;
926 tx_ring
->txbufs
[wr_idx
].dma_addr
= 0;
927 tx_ring
->txbufs
[wr_idx
].fidx
= -2;
929 nn_dp_warn(dp
, "Failed to map DMA TX buffer\n");
930 nfp_net_tx_xmit_more_flush(tx_ring
);
931 u64_stats_update_begin(&r_vec
->tx_sync
);
933 u64_stats_update_end(&r_vec
->tx_sync
);
934 dev_kfree_skb_any(skb
);
939 * nfp_net_tx_complete() - Handled completed TX packets
940 * @tx_ring: TX ring structure
942 * Return: Number of completed TX descriptors
944 static void nfp_net_tx_complete(struct nfp_net_tx_ring
*tx_ring
)
946 struct nfp_net_r_vector
*r_vec
= tx_ring
->r_vec
;
947 struct nfp_net_dp
*dp
= &r_vec
->nfp_net
->dp
;
948 const struct skb_frag_struct
*frag
;
949 struct netdev_queue
*nd_q
;
950 u32 done_pkts
= 0, done_bytes
= 0;
957 if (tx_ring
->wr_p
== tx_ring
->rd_p
)
960 /* Work out how many descriptors have been transmitted */
961 qcp_rd_p
= nfp_qcp_rd_ptr_read(tx_ring
->qcp_q
);
963 if (qcp_rd_p
== tx_ring
->qcp_rd_p
)
966 todo
= D_IDX(tx_ring
, qcp_rd_p
- tx_ring
->qcp_rd_p
);
969 idx
= D_IDX(tx_ring
, tx_ring
->rd_p
++);
971 skb
= tx_ring
->txbufs
[idx
].skb
;
975 nr_frags
= skb_shinfo(skb
)->nr_frags
;
976 fidx
= tx_ring
->txbufs
[idx
].fidx
;
980 dma_unmap_single(dp
->dev
, tx_ring
->txbufs
[idx
].dma_addr
,
981 skb_headlen(skb
), DMA_TO_DEVICE
);
983 done_pkts
+= tx_ring
->txbufs
[idx
].pkt_cnt
;
984 done_bytes
+= tx_ring
->txbufs
[idx
].real_len
;
987 frag
= &skb_shinfo(skb
)->frags
[fidx
];
988 dma_unmap_page(dp
->dev
, tx_ring
->txbufs
[idx
].dma_addr
,
989 skb_frag_size(frag
), DMA_TO_DEVICE
);
992 /* check for last gather fragment */
993 if (fidx
== nr_frags
- 1)
994 dev_consume_skb_any(skb
);
996 tx_ring
->txbufs
[idx
].dma_addr
= 0;
997 tx_ring
->txbufs
[idx
].skb
= NULL
;
998 tx_ring
->txbufs
[idx
].fidx
= -2;
1001 tx_ring
->qcp_rd_p
= qcp_rd_p
;
1003 u64_stats_update_begin(&r_vec
->tx_sync
);
1004 r_vec
->tx_bytes
+= done_bytes
;
1005 r_vec
->tx_pkts
+= done_pkts
;
1006 u64_stats_update_end(&r_vec
->tx_sync
);
1011 nd_q
= netdev_get_tx_queue(dp
->netdev
, tx_ring
->idx
);
1012 netdev_tx_completed_queue(nd_q
, done_pkts
, done_bytes
);
1013 if (nfp_net_tx_ring_should_wake(tx_ring
)) {
1014 /* Make sure TX thread will see updated tx_ring->rd_p */
1017 if (unlikely(netif_tx_queue_stopped(nd_q
)))
1018 netif_tx_wake_queue(nd_q
);
1021 WARN_ONCE(tx_ring
->wr_p
- tx_ring
->rd_p
> tx_ring
->cnt
,
1022 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1023 tx_ring
->rd_p
, tx_ring
->wr_p
, tx_ring
->cnt
);
1026 static bool nfp_net_xdp_complete(struct nfp_net_tx_ring
*tx_ring
)
1028 struct nfp_net_r_vector
*r_vec
= tx_ring
->r_vec
;
1029 u32 done_pkts
= 0, done_bytes
= 0;
1034 /* Work out how many descriptors have been transmitted */
1035 qcp_rd_p
= nfp_qcp_rd_ptr_read(tx_ring
->qcp_q
);
1037 if (qcp_rd_p
== tx_ring
->qcp_rd_p
)
1040 todo
= D_IDX(tx_ring
, qcp_rd_p
- tx_ring
->qcp_rd_p
);
1042 done_all
= todo
<= NFP_NET_XDP_MAX_COMPLETE
;
1043 todo
= min(todo
, NFP_NET_XDP_MAX_COMPLETE
);
1045 tx_ring
->qcp_rd_p
= D_IDX(tx_ring
, tx_ring
->qcp_rd_p
+ todo
);
1049 idx
= D_IDX(tx_ring
, tx_ring
->rd_p
);
1052 done_bytes
+= tx_ring
->txbufs
[idx
].real_len
;
1055 u64_stats_update_begin(&r_vec
->tx_sync
);
1056 r_vec
->tx_bytes
+= done_bytes
;
1057 r_vec
->tx_pkts
+= done_pkts
;
1058 u64_stats_update_end(&r_vec
->tx_sync
);
1060 WARN_ONCE(tx_ring
->wr_p
- tx_ring
->rd_p
> tx_ring
->cnt
,
1061 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1062 tx_ring
->rd_p
, tx_ring
->wr_p
, tx_ring
->cnt
);
1068 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
1069 * @dp: NFP Net data path struct
1070 * @tx_ring: TX ring structure
1072 * Assumes that the device is stopped
1075 nfp_net_tx_ring_reset(struct nfp_net_dp
*dp
, struct nfp_net_tx_ring
*tx_ring
)
1077 const struct skb_frag_struct
*frag
;
1078 struct netdev_queue
*nd_q
;
1080 while (!tx_ring
->is_xdp
&& tx_ring
->rd_p
!= tx_ring
->wr_p
) {
1081 struct nfp_net_tx_buf
*tx_buf
;
1082 struct sk_buff
*skb
;
1085 idx
= D_IDX(tx_ring
, tx_ring
->rd_p
);
1086 tx_buf
= &tx_ring
->txbufs
[idx
];
1088 skb
= tx_ring
->txbufs
[idx
].skb
;
1089 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1091 if (tx_buf
->fidx
== -1) {
1093 dma_unmap_single(dp
->dev
, tx_buf
->dma_addr
,
1094 skb_headlen(skb
), DMA_TO_DEVICE
);
1096 /* unmap fragment */
1097 frag
= &skb_shinfo(skb
)->frags
[tx_buf
->fidx
];
1098 dma_unmap_page(dp
->dev
, tx_buf
->dma_addr
,
1099 skb_frag_size(frag
), DMA_TO_DEVICE
);
1102 /* check for last gather fragment */
1103 if (tx_buf
->fidx
== nr_frags
- 1)
1104 dev_kfree_skb_any(skb
);
1106 tx_buf
->dma_addr
= 0;
1110 tx_ring
->qcp_rd_p
++;
1114 memset(tx_ring
->txds
, 0, sizeof(*tx_ring
->txds
) * tx_ring
->cnt
);
1117 tx_ring
->qcp_rd_p
= 0;
1118 tx_ring
->wr_ptr_add
= 0;
1120 if (tx_ring
->is_xdp
|| !dp
->netdev
)
1123 nd_q
= netdev_get_tx_queue(dp
->netdev
, tx_ring
->idx
);
1124 netdev_tx_reset_queue(nd_q
);
1127 static void nfp_net_tx_timeout(struct net_device
*netdev
)
1129 struct nfp_net
*nn
= netdev_priv(netdev
);
1132 for (i
= 0; i
< nn
->dp
.netdev
->real_num_tx_queues
; i
++) {
1133 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev
, i
)))
1135 nn_warn(nn
, "TX timeout on ring: %d\n", i
);
1137 nn_warn(nn
, "TX watchdog timeout\n");
1140 /* Receive processing
1143 nfp_net_calc_fl_bufsz(struct nfp_net_dp
*dp
)
1145 unsigned int fl_bufsz
;
1147 fl_bufsz
= NFP_NET_RX_BUF_HEADROOM
;
1148 fl_bufsz
+= dp
->rx_dma_off
;
1149 if (dp
->rx_offset
== NFP_NET_CFG_RX_OFFSET_DYNAMIC
)
1150 fl_bufsz
+= NFP_NET_MAX_PREPEND
;
1152 fl_bufsz
+= dp
->rx_offset
;
1153 fl_bufsz
+= ETH_HLEN
+ VLAN_HLEN
* 2 + dp
->mtu
;
1155 fl_bufsz
= SKB_DATA_ALIGN(fl_bufsz
);
1156 fl_bufsz
+= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1162 nfp_net_free_frag(void *frag
, bool xdp
)
1165 skb_free_frag(frag
);
1167 __free_page(virt_to_page(frag
));
1171 * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
1172 * @dp: NFP Net data path struct
1173 * @dma_addr: Pointer to storage for DMA address (output param)
1175 * This function will allcate a new page frag, map it for DMA.
1177 * Return: allocated page frag or NULL on failure.
1179 static void *nfp_net_rx_alloc_one(struct nfp_net_dp
*dp
, dma_addr_t
*dma_addr
)
1183 if (!dp
->xdp_prog
) {
1184 frag
= netdev_alloc_frag(dp
->fl_bufsz
);
1188 page
= alloc_page(GFP_KERNEL
| __GFP_COLD
);
1189 frag
= page
? page_address(page
) : NULL
;
1192 nn_dp_warn(dp
, "Failed to alloc receive page frag\n");
1196 *dma_addr
= nfp_net_dma_map_rx(dp
, frag
);
1197 if (dma_mapping_error(dp
->dev
, *dma_addr
)) {
1198 nfp_net_free_frag(frag
, dp
->xdp_prog
);
1199 nn_dp_warn(dp
, "Failed to map DMA RX buffer\n");
1206 static void *nfp_net_napi_alloc_one(struct nfp_net_dp
*dp
, dma_addr_t
*dma_addr
)
1210 if (!dp
->xdp_prog
) {
1211 frag
= napi_alloc_frag(dp
->fl_bufsz
);
1215 page
= alloc_page(GFP_ATOMIC
| __GFP_COLD
);
1216 frag
= page
? page_address(page
) : NULL
;
1219 nn_dp_warn(dp
, "Failed to alloc receive page frag\n");
1223 *dma_addr
= nfp_net_dma_map_rx(dp
, frag
);
1224 if (dma_mapping_error(dp
->dev
, *dma_addr
)) {
1225 nfp_net_free_frag(frag
, dp
->xdp_prog
);
1226 nn_dp_warn(dp
, "Failed to map DMA RX buffer\n");
1234 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
1235 * @dp: NFP Net data path struct
1236 * @rx_ring: RX ring structure
1237 * @frag: page fragment buffer
1238 * @dma_addr: DMA address of skb mapping
1240 static void nfp_net_rx_give_one(const struct nfp_net_dp
*dp
,
1241 struct nfp_net_rx_ring
*rx_ring
,
1242 void *frag
, dma_addr_t dma_addr
)
1244 unsigned int wr_idx
;
1246 wr_idx
= D_IDX(rx_ring
, rx_ring
->wr_p
);
1248 nfp_net_dma_sync_dev_rx(dp
, dma_addr
);
1250 /* Stash SKB and DMA address away */
1251 rx_ring
->rxbufs
[wr_idx
].frag
= frag
;
1252 rx_ring
->rxbufs
[wr_idx
].dma_addr
= dma_addr
;
1254 /* Fill freelist descriptor */
1255 rx_ring
->rxds
[wr_idx
].fld
.reserved
= 0;
1256 rx_ring
->rxds
[wr_idx
].fld
.meta_len_dd
= 0;
1257 nfp_desc_set_dma_addr(&rx_ring
->rxds
[wr_idx
].fld
,
1258 dma_addr
+ dp
->rx_dma_off
);
1261 if (!(rx_ring
->wr_p
% NFP_NET_FL_BATCH
)) {
1262 /* Update write pointer of the freelist queue. Make
1263 * sure all writes are flushed before telling the hardware.
1266 nfp_qcp_wr_ptr_add(rx_ring
->qcp_fl
, NFP_NET_FL_BATCH
);
1271 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
1272 * @rx_ring: RX ring structure
1274 * Warning: Do *not* call if ring buffers were never put on the FW freelist
1275 * (i.e. device was not enabled)!
1277 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring
*rx_ring
)
1279 unsigned int wr_idx
, last_idx
;
1281 /* Move the empty entry to the end of the list */
1282 wr_idx
= D_IDX(rx_ring
, rx_ring
->wr_p
);
1283 last_idx
= rx_ring
->cnt
- 1;
1284 rx_ring
->rxbufs
[wr_idx
].dma_addr
= rx_ring
->rxbufs
[last_idx
].dma_addr
;
1285 rx_ring
->rxbufs
[wr_idx
].frag
= rx_ring
->rxbufs
[last_idx
].frag
;
1286 rx_ring
->rxbufs
[last_idx
].dma_addr
= 0;
1287 rx_ring
->rxbufs
[last_idx
].frag
= NULL
;
1289 memset(rx_ring
->rxds
, 0, sizeof(*rx_ring
->rxds
) * rx_ring
->cnt
);
1295 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
1296 * @dp: NFP Net data path struct
1297 * @rx_ring: RX ring to remove buffers from
1299 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
1300 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
1301 * to restore required ring geometry.
1304 nfp_net_rx_ring_bufs_free(struct nfp_net_dp
*dp
,
1305 struct nfp_net_rx_ring
*rx_ring
)
1309 for (i
= 0; i
< rx_ring
->cnt
- 1; i
++) {
1310 /* NULL skb can only happen when initial filling of the ring
1311 * fails to allocate enough buffers and calls here to free
1312 * already allocated ones.
1314 if (!rx_ring
->rxbufs
[i
].frag
)
1317 nfp_net_dma_unmap_rx(dp
, rx_ring
->rxbufs
[i
].dma_addr
);
1318 nfp_net_free_frag(rx_ring
->rxbufs
[i
].frag
, dp
->xdp_prog
);
1319 rx_ring
->rxbufs
[i
].dma_addr
= 0;
1320 rx_ring
->rxbufs
[i
].frag
= NULL
;
1325 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
1326 * @dp: NFP Net data path struct
1327 * @rx_ring: RX ring to remove buffers from
1330 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp
*dp
,
1331 struct nfp_net_rx_ring
*rx_ring
)
1333 struct nfp_net_rx_buf
*rxbufs
;
1336 rxbufs
= rx_ring
->rxbufs
;
1338 for (i
= 0; i
< rx_ring
->cnt
- 1; i
++) {
1339 rxbufs
[i
].frag
= nfp_net_rx_alloc_one(dp
, &rxbufs
[i
].dma_addr
);
1340 if (!rxbufs
[i
].frag
) {
1341 nfp_net_rx_ring_bufs_free(dp
, rx_ring
);
1350 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
1351 * @dp: NFP Net data path struct
1352 * @rx_ring: RX ring to fill
1355 nfp_net_rx_ring_fill_freelist(struct nfp_net_dp
*dp
,
1356 struct nfp_net_rx_ring
*rx_ring
)
1360 for (i
= 0; i
< rx_ring
->cnt
- 1; i
++)
1361 nfp_net_rx_give_one(dp
, rx_ring
, rx_ring
->rxbufs
[i
].frag
,
1362 rx_ring
->rxbufs
[i
].dma_addr
);
1366 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
1367 * @flags: RX descriptor flags field in CPU byte order
1369 static int nfp_net_rx_csum_has_errors(u16 flags
)
1371 u16 csum_all_checked
, csum_all_ok
;
1373 csum_all_checked
= flags
& __PCIE_DESC_RX_CSUM_ALL
;
1374 csum_all_ok
= flags
& __PCIE_DESC_RX_CSUM_ALL_OK
;
1376 return csum_all_checked
!= (csum_all_ok
<< PCIE_DESC_RX_CSUM_OK_SHIFT
);
1380 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
1381 * @dp: NFP Net data path struct
1382 * @r_vec: per-ring structure
1383 * @rxd: Pointer to RX descriptor
1384 * @meta: Parsed metadata prepend
1385 * @skb: Pointer to SKB
1387 static void nfp_net_rx_csum(struct nfp_net_dp
*dp
,
1388 struct nfp_net_r_vector
*r_vec
,
1389 struct nfp_net_rx_desc
*rxd
,
1390 struct nfp_meta_parsed
*meta
, struct sk_buff
*skb
)
1392 skb_checksum_none_assert(skb
);
1394 if (!(dp
->netdev
->features
& NETIF_F_RXCSUM
))
1397 if (meta
->csum_type
) {
1398 skb
->ip_summed
= meta
->csum_type
;
1399 skb
->csum
= meta
->csum
;
1400 u64_stats_update_begin(&r_vec
->rx_sync
);
1401 r_vec
->hw_csum_rx_ok
++;
1402 u64_stats_update_end(&r_vec
->rx_sync
);
1406 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd
->rxd
.flags
))) {
1407 u64_stats_update_begin(&r_vec
->rx_sync
);
1408 r_vec
->hw_csum_rx_error
++;
1409 u64_stats_update_end(&r_vec
->rx_sync
);
1413 /* Assume that the firmware will never report inner CSUM_OK unless outer
1414 * L4 headers were successfully parsed. FW will always report zero UDP
1415 * checksum as CSUM_OK.
1417 if (rxd
->rxd
.flags
& PCIE_DESC_RX_TCP_CSUM_OK
||
1418 rxd
->rxd
.flags
& PCIE_DESC_RX_UDP_CSUM_OK
) {
1419 __skb_incr_checksum_unnecessary(skb
);
1420 u64_stats_update_begin(&r_vec
->rx_sync
);
1421 r_vec
->hw_csum_rx_ok
++;
1422 u64_stats_update_end(&r_vec
->rx_sync
);
1425 if (rxd
->rxd
.flags
& PCIE_DESC_RX_I_TCP_CSUM_OK
||
1426 rxd
->rxd
.flags
& PCIE_DESC_RX_I_UDP_CSUM_OK
) {
1427 __skb_incr_checksum_unnecessary(skb
);
1428 u64_stats_update_begin(&r_vec
->rx_sync
);
1429 r_vec
->hw_csum_rx_inner_ok
++;
1430 u64_stats_update_end(&r_vec
->rx_sync
);
1435 nfp_net_set_hash(struct net_device
*netdev
, struct nfp_meta_parsed
*meta
,
1436 unsigned int type
, __be32
*hash
)
1438 if (!(netdev
->features
& NETIF_F_RXHASH
))
1442 case NFP_NET_RSS_IPV4
:
1443 case NFP_NET_RSS_IPV6
:
1444 case NFP_NET_RSS_IPV6_EX
:
1445 meta
->hash_type
= PKT_HASH_TYPE_L3
;
1448 meta
->hash_type
= PKT_HASH_TYPE_L4
;
1452 meta
->hash
= get_unaligned_be32(hash
);
1456 nfp_net_set_hash_desc(struct net_device
*netdev
, struct nfp_meta_parsed
*meta
,
1457 void *data
, struct nfp_net_rx_desc
*rxd
)
1459 struct nfp_net_rx_hash
*rx_hash
= data
;
1461 if (!(rxd
->rxd
.flags
& PCIE_DESC_RX_RSS
))
1464 nfp_net_set_hash(netdev
, meta
, get_unaligned_be32(&rx_hash
->hash_type
),
1469 nfp_net_parse_meta(struct net_device
*netdev
, struct nfp_meta_parsed
*meta
,
1470 void *data
, int meta_len
)
1474 meta_info
= get_unaligned_be32(data
);
1478 switch (meta_info
& NFP_NET_META_FIELD_MASK
) {
1479 case NFP_NET_META_HASH
:
1480 meta_info
>>= NFP_NET_META_FIELD_SIZE
;
1481 nfp_net_set_hash(netdev
, meta
,
1482 meta_info
& NFP_NET_META_FIELD_MASK
,
1486 case NFP_NET_META_MARK
:
1487 meta
->mark
= get_unaligned_be32(data
);
1490 case NFP_NET_META_PORTID
:
1491 meta
->portid
= get_unaligned_be32(data
);
1494 case NFP_NET_META_CSUM
:
1495 meta
->csum_type
= CHECKSUM_COMPLETE
;
1497 (__force __wsum
)__get_unaligned_cpu32(data
);
1504 meta_info
>>= NFP_NET_META_FIELD_SIZE
;
1511 nfp_net_rx_drop(const struct nfp_net_dp
*dp
, struct nfp_net_r_vector
*r_vec
,
1512 struct nfp_net_rx_ring
*rx_ring
, struct nfp_net_rx_buf
*rxbuf
,
1513 struct sk_buff
*skb
)
1515 u64_stats_update_begin(&r_vec
->rx_sync
);
1517 u64_stats_update_end(&r_vec
->rx_sync
);
1519 /* skb is build based on the frag, free_skb() would free the frag
1520 * so to be able to reuse it we need an extra ref.
1522 if (skb
&& rxbuf
&& skb
->head
== rxbuf
->frag
)
1523 page_ref_inc(virt_to_head_page(rxbuf
->frag
));
1525 nfp_net_rx_give_one(dp
, rx_ring
, rxbuf
->frag
, rxbuf
->dma_addr
);
1527 dev_kfree_skb_any(skb
);
1531 nfp_net_tx_xdp_buf(struct nfp_net_dp
*dp
, struct nfp_net_rx_ring
*rx_ring
,
1532 struct nfp_net_tx_ring
*tx_ring
,
1533 struct nfp_net_rx_buf
*rxbuf
, unsigned int dma_off
,
1534 unsigned int pkt_len
, bool *completed
)
1536 struct nfp_net_tx_buf
*txbuf
;
1537 struct nfp_net_tx_desc
*txd
;
1540 if (unlikely(nfp_net_tx_full(tx_ring
, 1))) {
1542 nfp_net_xdp_complete(tx_ring
);
1546 if (unlikely(nfp_net_tx_full(tx_ring
, 1))) {
1547 nfp_net_rx_drop(dp
, rx_ring
->r_vec
, rx_ring
, rxbuf
,
1553 wr_idx
= D_IDX(tx_ring
, tx_ring
->wr_p
);
1555 /* Stash the soft descriptor of the head then initialize it */
1556 txbuf
= &tx_ring
->txbufs
[wr_idx
];
1558 nfp_net_rx_give_one(dp
, rx_ring
, txbuf
->frag
, txbuf
->dma_addr
);
1560 txbuf
->frag
= rxbuf
->frag
;
1561 txbuf
->dma_addr
= rxbuf
->dma_addr
;
1564 txbuf
->real_len
= pkt_len
;
1566 dma_sync_single_for_device(dp
->dev
, rxbuf
->dma_addr
+ dma_off
,
1567 pkt_len
, DMA_BIDIRECTIONAL
);
1569 /* Build TX descriptor */
1570 txd
= &tx_ring
->txds
[wr_idx
];
1571 txd
->offset_eop
= PCIE_DESC_TX_EOP
;
1572 txd
->dma_len
= cpu_to_le16(pkt_len
);
1573 nfp_desc_set_dma_addr(txd
, rxbuf
->dma_addr
+ dma_off
);
1574 txd
->data_len
= cpu_to_le16(pkt_len
);
1578 txd
->lso_hdrlen
= 0;
1581 tx_ring
->wr_ptr_add
++;
1585 static int nfp_net_run_xdp(struct bpf_prog
*prog
, void *data
, void *hard_start
,
1586 unsigned int *off
, unsigned int *len
)
1588 struct xdp_buff xdp
;
1592 xdp
.data_hard_start
= hard_start
;
1593 xdp
.data
= data
+ *off
;
1594 xdp
.data_end
= data
+ *off
+ *len
;
1596 orig_data
= xdp
.data
;
1597 ret
= bpf_prog_run_xdp(prog
, &xdp
);
1599 *len
-= xdp
.data
- orig_data
;
1600 *off
+= xdp
.data
- orig_data
;
1606 * nfp_net_rx() - receive up to @budget packets on @rx_ring
1607 * @rx_ring: RX ring to receive from
1608 * @budget: NAPI budget
1610 * Note, this function is separated out from the napi poll function to
1611 * more cleanly separate packet receive code from other bookkeeping
1612 * functions performed in the napi poll function.
1614 * Return: Number of packets received.
1616 static int nfp_net_rx(struct nfp_net_rx_ring
*rx_ring
, int budget
)
1618 struct nfp_net_r_vector
*r_vec
= rx_ring
->r_vec
;
1619 struct nfp_net_dp
*dp
= &r_vec
->nfp_net
->dp
;
1620 struct nfp_net_tx_ring
*tx_ring
;
1621 struct bpf_prog
*xdp_prog
;
1622 bool xdp_tx_cmpl
= false;
1623 unsigned int true_bufsz
;
1624 struct sk_buff
*skb
;
1625 int pkts_polled
= 0;
1629 xdp_prog
= READ_ONCE(dp
->xdp_prog
);
1630 true_bufsz
= xdp_prog
? PAGE_SIZE
: dp
->fl_bufsz
;
1631 tx_ring
= r_vec
->xdp_ring
;
1633 while (pkts_polled
< budget
) {
1634 unsigned int meta_len
, data_len
, meta_off
, pkt_len
, pkt_off
;
1635 struct nfp_net_rx_buf
*rxbuf
;
1636 struct nfp_net_rx_desc
*rxd
;
1637 struct nfp_meta_parsed meta
;
1638 struct net_device
*netdev
;
1639 dma_addr_t new_dma_addr
;
1642 idx
= D_IDX(rx_ring
, rx_ring
->rd_p
);
1644 rxd
= &rx_ring
->rxds
[idx
];
1645 if (!(rxd
->rxd
.meta_len_dd
& PCIE_DESC_RX_DD
))
1648 /* Memory barrier to ensure that we won't do other reads
1649 * before the DD bit.
1653 memset(&meta
, 0, sizeof(meta
));
1658 rxbuf
= &rx_ring
->rxbufs
[idx
];
1660 * <-- [rx_offset] -->
1661 * ---------------------------------------------------------
1662 * | [XX] | metadata | packet | XXXX |
1663 * ---------------------------------------------------------
1664 * <---------------- data_len --------------->
1666 * The rx_offset is fixed for all packets, the meta_len can vary
1667 * on a packet by packet basis. If rx_offset is set to zero
1668 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
1669 * buffer and is immediately followed by the packet (no [XX]).
1671 meta_len
= rxd
->rxd
.meta_len_dd
& PCIE_DESC_RX_META_LEN_MASK
;
1672 data_len
= le16_to_cpu(rxd
->rxd
.data_len
);
1673 pkt_len
= data_len
- meta_len
;
1675 pkt_off
= NFP_NET_RX_BUF_HEADROOM
+ dp
->rx_dma_off
;
1676 if (dp
->rx_offset
== NFP_NET_CFG_RX_OFFSET_DYNAMIC
)
1677 pkt_off
+= meta_len
;
1679 pkt_off
+= dp
->rx_offset
;
1680 meta_off
= pkt_off
- meta_len
;
1683 u64_stats_update_begin(&r_vec
->rx_sync
);
1685 r_vec
->rx_bytes
+= pkt_len
;
1686 u64_stats_update_end(&r_vec
->rx_sync
);
1688 if (unlikely(meta_len
> NFP_NET_MAX_PREPEND
||
1689 (dp
->rx_offset
&& meta_len
> dp
->rx_offset
))) {
1690 nn_dp_warn(dp
, "oversized RX packet metadata %u\n",
1692 nfp_net_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, NULL
);
1696 nfp_net_dma_sync_cpu_rx(dp
, rxbuf
->dma_addr
+ meta_off
,
1699 if (!dp
->chained_metadata_format
) {
1700 nfp_net_set_hash_desc(dp
->netdev
, &meta
,
1701 rxbuf
->frag
+ meta_off
, rxd
);
1702 } else if (meta_len
) {
1705 end
= nfp_net_parse_meta(dp
->netdev
, &meta
,
1706 rxbuf
->frag
+ meta_off
,
1708 if (unlikely(end
!= rxbuf
->frag
+ pkt_off
)) {
1709 nn_dp_warn(dp
, "invalid RX packet metadata\n");
1710 nfp_net_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
,
1716 if (xdp_prog
&& !(rxd
->rxd
.flags
& PCIE_DESC_RX_BPF
&&
1717 dp
->bpf_offload_xdp
) && !meta
.portid
) {
1718 unsigned int dma_off
;
1722 hard_start
= rxbuf
->frag
+ NFP_NET_RX_BUF_HEADROOM
;
1724 act
= nfp_net_run_xdp(xdp_prog
, rxbuf
->frag
, hard_start
,
1725 &pkt_off
, &pkt_len
);
1730 dma_off
= pkt_off
- NFP_NET_RX_BUF_HEADROOM
;
1731 if (unlikely(!nfp_net_tx_xdp_buf(dp
, rx_ring
,
1736 trace_xdp_exception(dp
->netdev
,
1740 bpf_warn_invalid_xdp_action(act
);
1743 trace_xdp_exception(dp
->netdev
, xdp_prog
, act
);
1746 nfp_net_rx_give_one(dp
, rx_ring
, rxbuf
->frag
,
1752 skb
= build_skb(rxbuf
->frag
, true_bufsz
);
1753 if (unlikely(!skb
)) {
1754 nfp_net_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, NULL
);
1757 new_frag
= nfp_net_napi_alloc_one(dp
, &new_dma_addr
);
1758 if (unlikely(!new_frag
)) {
1759 nfp_net_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, skb
);
1763 nfp_net_dma_unmap_rx(dp
, rxbuf
->dma_addr
);
1765 nfp_net_rx_give_one(dp
, rx_ring
, new_frag
, new_dma_addr
);
1767 if (likely(!meta
.portid
)) {
1768 netdev
= dp
->netdev
;
1772 nn
= netdev_priv(dp
->netdev
);
1773 netdev
= nfp_app_repr_get(nn
->app
, meta
.portid
);
1774 if (unlikely(!netdev
)) {
1775 nfp_net_rx_drop(dp
, r_vec
, rx_ring
, NULL
, skb
);
1778 nfp_repr_inc_rx_stats(netdev
, pkt_len
);
1781 skb_reserve(skb
, pkt_off
);
1782 skb_put(skb
, pkt_len
);
1784 skb
->mark
= meta
.mark
;
1785 skb_set_hash(skb
, meta
.hash
, meta
.hash_type
);
1787 skb_record_rx_queue(skb
, rx_ring
->idx
);
1788 skb
->protocol
= eth_type_trans(skb
, netdev
);
1790 nfp_net_rx_csum(dp
, r_vec
, rxd
, &meta
, skb
);
1792 if (rxd
->rxd
.flags
& PCIE_DESC_RX_VLAN
)
1793 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
1794 le16_to_cpu(rxd
->rxd
.vlan
));
1796 napi_gro_receive(&rx_ring
->r_vec
->napi
, skb
);
1800 if (tx_ring
->wr_ptr_add
)
1801 nfp_net_tx_xmit_more_flush(tx_ring
);
1802 else if (unlikely(tx_ring
->wr_p
!= tx_ring
->rd_p
) &&
1804 if (!nfp_net_xdp_complete(tx_ring
))
1805 pkts_polled
= budget
;
1813 * nfp_net_poll() - napi poll function
1814 * @napi: NAPI structure
1815 * @budget: NAPI budget
1817 * Return: number of packets polled.
1819 static int nfp_net_poll(struct napi_struct
*napi
, int budget
)
1821 struct nfp_net_r_vector
*r_vec
=
1822 container_of(napi
, struct nfp_net_r_vector
, napi
);
1823 unsigned int pkts_polled
= 0;
1826 nfp_net_tx_complete(r_vec
->tx_ring
);
1828 pkts_polled
= nfp_net_rx(r_vec
->rx_ring
, budget
);
1830 if (pkts_polled
< budget
)
1831 if (napi_complete_done(napi
, pkts_polled
))
1832 nfp_net_irq_unmask(r_vec
->nfp_net
, r_vec
->irq_entry
);
1837 /* Control device data path
1841 nfp_ctrl_tx_one(struct nfp_net
*nn
, struct nfp_net_r_vector
*r_vec
,
1842 struct sk_buff
*skb
, bool old
)
1844 unsigned int real_len
= skb
->len
, meta_len
= 0;
1845 struct nfp_net_tx_ring
*tx_ring
;
1846 struct nfp_net_tx_buf
*txbuf
;
1847 struct nfp_net_tx_desc
*txd
;
1848 struct nfp_net_dp
*dp
;
1849 dma_addr_t dma_addr
;
1852 dp
= &r_vec
->nfp_net
->dp
;
1853 tx_ring
= r_vec
->tx_ring
;
1855 if (WARN_ON_ONCE(skb_shinfo(skb
)->nr_frags
)) {
1856 nn_dp_warn(dp
, "Driver's CTRL TX does not implement gather\n");
1860 if (unlikely(nfp_net_tx_full(tx_ring
, 1))) {
1861 u64_stats_update_begin(&r_vec
->tx_sync
);
1863 u64_stats_update_end(&r_vec
->tx_sync
);
1865 __skb_queue_tail(&r_vec
->queue
, skb
);
1867 __skb_queue_head(&r_vec
->queue
, skb
);
1871 if (nfp_app_ctrl_has_meta(nn
->app
)) {
1872 if (unlikely(skb_headroom(skb
) < 8)) {
1873 nn_dp_warn(dp
, "CTRL TX on skb without headroom\n");
1877 put_unaligned_be32(NFP_META_PORT_ID_CTRL
, skb_push(skb
, 4));
1878 put_unaligned_be32(NFP_NET_META_PORTID
, skb_push(skb
, 4));
1881 /* Start with the head skbuf */
1882 dma_addr
= dma_map_single(dp
->dev
, skb
->data
, skb_headlen(skb
),
1884 if (dma_mapping_error(dp
->dev
, dma_addr
))
1887 wr_idx
= D_IDX(tx_ring
, tx_ring
->wr_p
);
1889 /* Stash the soft descriptor of the head then initialize it */
1890 txbuf
= &tx_ring
->txbufs
[wr_idx
];
1892 txbuf
->dma_addr
= dma_addr
;
1895 txbuf
->real_len
= real_len
;
1897 /* Build TX descriptor */
1898 txd
= &tx_ring
->txds
[wr_idx
];
1899 txd
->offset_eop
= meta_len
| PCIE_DESC_TX_EOP
;
1900 txd
->dma_len
= cpu_to_le16(skb_headlen(skb
));
1901 nfp_desc_set_dma_addr(txd
, dma_addr
);
1902 txd
->data_len
= cpu_to_le16(skb
->len
);
1906 txd
->lso_hdrlen
= 0;
1909 tx_ring
->wr_ptr_add
++;
1910 nfp_net_tx_xmit_more_flush(tx_ring
);
1915 nn_dp_warn(dp
, "Failed to DMA map TX CTRL buffer\n");
1917 u64_stats_update_begin(&r_vec
->tx_sync
);
1919 u64_stats_update_end(&r_vec
->tx_sync
);
1920 dev_kfree_skb_any(skb
);
1924 bool nfp_ctrl_tx(struct nfp_net
*nn
, struct sk_buff
*skb
)
1926 struct nfp_net_r_vector
*r_vec
= &nn
->r_vecs
[0];
1929 spin_lock_bh(&r_vec
->lock
);
1930 ret
= nfp_ctrl_tx_one(nn
, r_vec
, skb
, false);
1931 spin_unlock_bh(&r_vec
->lock
);
1936 static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector
*r_vec
)
1938 struct sk_buff
*skb
;
1940 while ((skb
= __skb_dequeue(&r_vec
->queue
)))
1941 if (nfp_ctrl_tx_one(r_vec
->nfp_net
, r_vec
, skb
, true))
1946 nfp_ctrl_meta_ok(struct nfp_net
*nn
, void *data
, unsigned int meta_len
)
1948 u32 meta_type
, meta_tag
;
1950 if (!nfp_app_ctrl_has_meta(nn
->app
))
1956 meta_type
= get_unaligned_be32(data
);
1957 meta_tag
= get_unaligned_be32(data
+ 4);
1959 return (meta_type
== NFP_NET_META_PORTID
&&
1960 meta_tag
== NFP_META_PORT_ID_CTRL
);
1964 nfp_ctrl_rx_one(struct nfp_net
*nn
, struct nfp_net_dp
*dp
,
1965 struct nfp_net_r_vector
*r_vec
, struct nfp_net_rx_ring
*rx_ring
)
1967 unsigned int meta_len
, data_len
, meta_off
, pkt_len
, pkt_off
;
1968 struct nfp_net_rx_buf
*rxbuf
;
1969 struct nfp_net_rx_desc
*rxd
;
1970 dma_addr_t new_dma_addr
;
1971 struct sk_buff
*skb
;
1975 idx
= D_IDX(rx_ring
, rx_ring
->rd_p
);
1977 rxd
= &rx_ring
->rxds
[idx
];
1978 if (!(rxd
->rxd
.meta_len_dd
& PCIE_DESC_RX_DD
))
1981 /* Memory barrier to ensure that we won't do other reads
1982 * before the DD bit.
1988 rxbuf
= &rx_ring
->rxbufs
[idx
];
1989 meta_len
= rxd
->rxd
.meta_len_dd
& PCIE_DESC_RX_META_LEN_MASK
;
1990 data_len
= le16_to_cpu(rxd
->rxd
.data_len
);
1991 pkt_len
= data_len
- meta_len
;
1993 pkt_off
= NFP_NET_RX_BUF_HEADROOM
+ dp
->rx_dma_off
;
1994 if (dp
->rx_offset
== NFP_NET_CFG_RX_OFFSET_DYNAMIC
)
1995 pkt_off
+= meta_len
;
1997 pkt_off
+= dp
->rx_offset
;
1998 meta_off
= pkt_off
- meta_len
;
2001 u64_stats_update_begin(&r_vec
->rx_sync
);
2003 r_vec
->rx_bytes
+= pkt_len
;
2004 u64_stats_update_end(&r_vec
->rx_sync
);
2006 nfp_net_dma_sync_cpu_rx(dp
, rxbuf
->dma_addr
+ meta_off
, data_len
);
2008 if (unlikely(!nfp_ctrl_meta_ok(nn
, rxbuf
->frag
+ meta_off
, meta_len
))) {
2009 nn_dp_warn(dp
, "incorrect metadata for ctrl packet (%d)\n",
2011 nfp_net_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, NULL
);
2015 skb
= build_skb(rxbuf
->frag
, dp
->fl_bufsz
);
2016 if (unlikely(!skb
)) {
2017 nfp_net_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, NULL
);
2020 new_frag
= nfp_net_napi_alloc_one(dp
, &new_dma_addr
);
2021 if (unlikely(!new_frag
)) {
2022 nfp_net_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, skb
);
2026 nfp_net_dma_unmap_rx(dp
, rxbuf
->dma_addr
);
2028 nfp_net_rx_give_one(dp
, rx_ring
, new_frag
, new_dma_addr
);
2030 skb_reserve(skb
, pkt_off
);
2031 skb_put(skb
, pkt_len
);
2033 nfp_app_ctrl_rx(nn
->app
, skb
);
2038 static void nfp_ctrl_rx(struct nfp_net_r_vector
*r_vec
)
2040 struct nfp_net_rx_ring
*rx_ring
= r_vec
->rx_ring
;
2041 struct nfp_net
*nn
= r_vec
->nfp_net
;
2042 struct nfp_net_dp
*dp
= &nn
->dp
;
2044 while (nfp_ctrl_rx_one(nn
, dp
, r_vec
, rx_ring
))
2048 static void nfp_ctrl_poll(unsigned long arg
)
2050 struct nfp_net_r_vector
*r_vec
= (void *)arg
;
2052 spin_lock_bh(&r_vec
->lock
);
2053 nfp_net_tx_complete(r_vec
->tx_ring
);
2054 __nfp_ctrl_tx_queued(r_vec
);
2055 spin_unlock_bh(&r_vec
->lock
);
2059 nfp_net_irq_unmask(r_vec
->nfp_net
, r_vec
->irq_entry
);
2062 /* Setup and Configuration
2066 * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
2067 * @nn: NFP Network structure
2069 static void nfp_net_vecs_init(struct nfp_net
*nn
)
2071 struct nfp_net_r_vector
*r_vec
;
2074 nn
->lsc_handler
= nfp_net_irq_lsc
;
2075 nn
->exn_handler
= nfp_net_irq_exn
;
2077 for (r
= 0; r
< nn
->max_r_vecs
; r
++) {
2078 struct msix_entry
*entry
;
2080 entry
= &nn
->irq_entries
[NFP_NET_NON_Q_VECTORS
+ r
];
2082 r_vec
= &nn
->r_vecs
[r
];
2083 r_vec
->nfp_net
= nn
;
2084 r_vec
->irq_entry
= entry
->entry
;
2085 r_vec
->irq_vector
= entry
->vector
;
2087 if (nn
->dp
.netdev
) {
2088 r_vec
->handler
= nfp_net_irq_rxtx
;
2090 r_vec
->handler
= nfp_ctrl_irq_rxtx
;
2092 __skb_queue_head_init(&r_vec
->queue
);
2093 spin_lock_init(&r_vec
->lock
);
2094 tasklet_init(&r_vec
->tasklet
, nfp_ctrl_poll
,
2095 (unsigned long)r_vec
);
2096 tasklet_disable(&r_vec
->tasklet
);
2099 cpumask_set_cpu(r
, &r_vec
->affinity_mask
);
2104 * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
2105 * @tx_ring: TX ring to free
2107 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring
*tx_ring
)
2109 struct nfp_net_r_vector
*r_vec
= tx_ring
->r_vec
;
2110 struct nfp_net_dp
*dp
= &r_vec
->nfp_net
->dp
;
2112 kfree(tx_ring
->txbufs
);
2115 dma_free_coherent(dp
->dev
, tx_ring
->size
,
2116 tx_ring
->txds
, tx_ring
->dma
);
2119 tx_ring
->txbufs
= NULL
;
2120 tx_ring
->txds
= NULL
;
2126 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
2127 * @dp: NFP Net data path struct
2128 * @tx_ring: TX Ring structure to allocate
2130 * Return: 0 on success, negative errno otherwise.
2133 nfp_net_tx_ring_alloc(struct nfp_net_dp
*dp
, struct nfp_net_tx_ring
*tx_ring
)
2135 struct nfp_net_r_vector
*r_vec
= tx_ring
->r_vec
;
2138 tx_ring
->cnt
= dp
->txd_cnt
;
2140 tx_ring
->size
= sizeof(*tx_ring
->txds
) * tx_ring
->cnt
;
2141 tx_ring
->txds
= dma_zalloc_coherent(dp
->dev
, tx_ring
->size
,
2142 &tx_ring
->dma
, GFP_KERNEL
);
2146 sz
= sizeof(*tx_ring
->txbufs
) * tx_ring
->cnt
;
2147 tx_ring
->txbufs
= kzalloc(sz
, GFP_KERNEL
);
2148 if (!tx_ring
->txbufs
)
2151 if (!tx_ring
->is_xdp
&& dp
->netdev
)
2152 netif_set_xps_queue(dp
->netdev
, &r_vec
->affinity_mask
,
2158 nfp_net_tx_ring_free(tx_ring
);
2163 nfp_net_tx_ring_bufs_free(struct nfp_net_dp
*dp
,
2164 struct nfp_net_tx_ring
*tx_ring
)
2168 if (!tx_ring
->is_xdp
)
2171 for (i
= 0; i
< tx_ring
->cnt
; i
++) {
2172 if (!tx_ring
->txbufs
[i
].frag
)
2175 nfp_net_dma_unmap_rx(dp
, tx_ring
->txbufs
[i
].dma_addr
);
2176 __free_page(virt_to_page(tx_ring
->txbufs
[i
].frag
));
2181 nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp
*dp
,
2182 struct nfp_net_tx_ring
*tx_ring
)
2184 struct nfp_net_tx_buf
*txbufs
= tx_ring
->txbufs
;
2187 if (!tx_ring
->is_xdp
)
2190 for (i
= 0; i
< tx_ring
->cnt
; i
++) {
2191 txbufs
[i
].frag
= nfp_net_rx_alloc_one(dp
, &txbufs
[i
].dma_addr
);
2192 if (!txbufs
[i
].frag
) {
2193 nfp_net_tx_ring_bufs_free(dp
, tx_ring
);
2201 static int nfp_net_tx_rings_prepare(struct nfp_net
*nn
, struct nfp_net_dp
*dp
)
2205 dp
->tx_rings
= kcalloc(dp
->num_tx_rings
, sizeof(*dp
->tx_rings
),
2210 for (r
= 0; r
< dp
->num_tx_rings
; r
++) {
2213 if (r
>= dp
->num_stack_tx_rings
)
2214 bias
= dp
->num_stack_tx_rings
;
2216 nfp_net_tx_ring_init(&dp
->tx_rings
[r
], &nn
->r_vecs
[r
- bias
],
2219 if (nfp_net_tx_ring_alloc(dp
, &dp
->tx_rings
[r
]))
2222 if (nfp_net_tx_ring_bufs_alloc(dp
, &dp
->tx_rings
[r
]))
2230 nfp_net_tx_ring_bufs_free(dp
, &dp
->tx_rings
[r
]);
2232 nfp_net_tx_ring_free(&dp
->tx_rings
[r
]);
2234 kfree(dp
->tx_rings
);
2238 static void nfp_net_tx_rings_free(struct nfp_net_dp
*dp
)
2242 for (r
= 0; r
< dp
->num_tx_rings
; r
++) {
2243 nfp_net_tx_ring_bufs_free(dp
, &dp
->tx_rings
[r
]);
2244 nfp_net_tx_ring_free(&dp
->tx_rings
[r
]);
2247 kfree(dp
->tx_rings
);
2251 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
2252 * @rx_ring: RX ring to free
2254 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring
*rx_ring
)
2256 struct nfp_net_r_vector
*r_vec
= rx_ring
->r_vec
;
2257 struct nfp_net_dp
*dp
= &r_vec
->nfp_net
->dp
;
2259 kfree(rx_ring
->rxbufs
);
2262 dma_free_coherent(dp
->dev
, rx_ring
->size
,
2263 rx_ring
->rxds
, rx_ring
->dma
);
2266 rx_ring
->rxbufs
= NULL
;
2267 rx_ring
->rxds
= NULL
;
2273 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
2274 * @dp: NFP Net data path struct
2275 * @rx_ring: RX ring to allocate
2277 * Return: 0 on success, negative errno otherwise.
2280 nfp_net_rx_ring_alloc(struct nfp_net_dp
*dp
, struct nfp_net_rx_ring
*rx_ring
)
2284 rx_ring
->cnt
= dp
->rxd_cnt
;
2285 rx_ring
->size
= sizeof(*rx_ring
->rxds
) * rx_ring
->cnt
;
2286 rx_ring
->rxds
= dma_zalloc_coherent(dp
->dev
, rx_ring
->size
,
2287 &rx_ring
->dma
, GFP_KERNEL
);
2291 sz
= sizeof(*rx_ring
->rxbufs
) * rx_ring
->cnt
;
2292 rx_ring
->rxbufs
= kzalloc(sz
, GFP_KERNEL
);
2293 if (!rx_ring
->rxbufs
)
2299 nfp_net_rx_ring_free(rx_ring
);
2303 static int nfp_net_rx_rings_prepare(struct nfp_net
*nn
, struct nfp_net_dp
*dp
)
2307 dp
->rx_rings
= kcalloc(dp
->num_rx_rings
, sizeof(*dp
->rx_rings
),
2312 for (r
= 0; r
< dp
->num_rx_rings
; r
++) {
2313 nfp_net_rx_ring_init(&dp
->rx_rings
[r
], &nn
->r_vecs
[r
], r
);
2315 if (nfp_net_rx_ring_alloc(dp
, &dp
->rx_rings
[r
]))
2318 if (nfp_net_rx_ring_bufs_alloc(dp
, &dp
->rx_rings
[r
]))
2326 nfp_net_rx_ring_bufs_free(dp
, &dp
->rx_rings
[r
]);
2328 nfp_net_rx_ring_free(&dp
->rx_rings
[r
]);
2330 kfree(dp
->rx_rings
);
2334 static void nfp_net_rx_rings_free(struct nfp_net_dp
*dp
)
2338 for (r
= 0; r
< dp
->num_rx_rings
; r
++) {
2339 nfp_net_rx_ring_bufs_free(dp
, &dp
->rx_rings
[r
]);
2340 nfp_net_rx_ring_free(&dp
->rx_rings
[r
]);
2343 kfree(dp
->rx_rings
);
2347 nfp_net_vector_assign_rings(struct nfp_net_dp
*dp
,
2348 struct nfp_net_r_vector
*r_vec
, int idx
)
2350 r_vec
->rx_ring
= idx
< dp
->num_rx_rings
? &dp
->rx_rings
[idx
] : NULL
;
2352 idx
< dp
->num_stack_tx_rings
? &dp
->tx_rings
[idx
] : NULL
;
2354 r_vec
->xdp_ring
= idx
< dp
->num_tx_rings
- dp
->num_stack_tx_rings
?
2355 &dp
->tx_rings
[dp
->num_stack_tx_rings
+ idx
] : NULL
;
2359 nfp_net_prepare_vector(struct nfp_net
*nn
, struct nfp_net_r_vector
*r_vec
,
2366 netif_napi_add(nn
->dp
.netdev
, &r_vec
->napi
,
2367 nfp_net_poll
, NAPI_POLL_WEIGHT
);
2369 tasklet_enable(&r_vec
->tasklet
);
2371 snprintf(r_vec
->name
, sizeof(r_vec
->name
),
2372 "%s-rxtx-%d", nfp_net_name(nn
), idx
);
2373 err
= request_irq(r_vec
->irq_vector
, r_vec
->handler
, 0, r_vec
->name
,
2377 netif_napi_del(&r_vec
->napi
);
2379 tasklet_disable(&r_vec
->tasklet
);
2381 nn_err(nn
, "Error requesting IRQ %d\n", r_vec
->irq_vector
);
2384 disable_irq(r_vec
->irq_vector
);
2386 irq_set_affinity_hint(r_vec
->irq_vector
, &r_vec
->affinity_mask
);
2388 nn_dbg(nn
, "RV%02d: irq=%03d/%03d\n", idx
, r_vec
->irq_vector
,
2395 nfp_net_cleanup_vector(struct nfp_net
*nn
, struct nfp_net_r_vector
*r_vec
)
2397 irq_set_affinity_hint(r_vec
->irq_vector
, NULL
);
2399 netif_napi_del(&r_vec
->napi
);
2401 tasklet_disable(&r_vec
->tasklet
);
2403 free_irq(r_vec
->irq_vector
, r_vec
);
2407 * nfp_net_rss_write_itbl() - Write RSS indirection table to device
2408 * @nn: NFP Net device to reconfigure
2410 void nfp_net_rss_write_itbl(struct nfp_net
*nn
)
2414 for (i
= 0; i
< NFP_NET_CFG_RSS_ITBL_SZ
; i
+= 4)
2415 nn_writel(nn
, NFP_NET_CFG_RSS_ITBL
+ i
,
2416 get_unaligned_le32(nn
->rss_itbl
+ i
));
2420 * nfp_net_rss_write_key() - Write RSS hash key to device
2421 * @nn: NFP Net device to reconfigure
2423 void nfp_net_rss_write_key(struct nfp_net
*nn
)
2427 for (i
= 0; i
< nfp_net_rss_key_sz(nn
); i
+= 4)
2428 nn_writel(nn
, NFP_NET_CFG_RSS_KEY
+ i
,
2429 get_unaligned_le32(nn
->rss_key
+ i
));
2433 * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
2434 * @nn: NFP Net device to reconfigure
2436 void nfp_net_coalesce_write_cfg(struct nfp_net
*nn
)
2442 /* Compute factor used to convert coalesce '_usecs' parameters to
2443 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
2446 factor
= nn
->me_freq_mhz
/ 16;
2448 /* copy RX interrupt coalesce parameters */
2449 value
= (nn
->rx_coalesce_max_frames
<< 16) |
2450 (factor
* nn
->rx_coalesce_usecs
);
2451 for (i
= 0; i
< nn
->dp
.num_rx_rings
; i
++)
2452 nn_writel(nn
, NFP_NET_CFG_RXR_IRQ_MOD(i
), value
);
2454 /* copy TX interrupt coalesce parameters */
2455 value
= (nn
->tx_coalesce_max_frames
<< 16) |
2456 (factor
* nn
->tx_coalesce_usecs
);
2457 for (i
= 0; i
< nn
->dp
.num_tx_rings
; i
++)
2458 nn_writel(nn
, NFP_NET_CFG_TXR_IRQ_MOD(i
), value
);
2462 * nfp_net_write_mac_addr() - Write mac address to the device control BAR
2463 * @nn: NFP Net device to reconfigure
2464 * @addr: MAC address to write
2466 * Writes the MAC address from the netdev to the device control BAR. Does not
2467 * perform the required reconfig. We do a bit of byte swapping dance because
2470 static void nfp_net_write_mac_addr(struct nfp_net
*nn
, const u8
*addr
)
2472 nn_writel(nn
, NFP_NET_CFG_MACADDR
+ 0, get_unaligned_be32(addr
));
2473 nn_writew(nn
, NFP_NET_CFG_MACADDR
+ 6, get_unaligned_be16(addr
+ 4));
2476 static void nfp_net_vec_clear_ring_data(struct nfp_net
*nn
, unsigned int idx
)
2478 nn_writeq(nn
, NFP_NET_CFG_RXR_ADDR(idx
), 0);
2479 nn_writeb(nn
, NFP_NET_CFG_RXR_SZ(idx
), 0);
2480 nn_writeb(nn
, NFP_NET_CFG_RXR_VEC(idx
), 0);
2482 nn_writeq(nn
, NFP_NET_CFG_TXR_ADDR(idx
), 0);
2483 nn_writeb(nn
, NFP_NET_CFG_TXR_SZ(idx
), 0);
2484 nn_writeb(nn
, NFP_NET_CFG_TXR_VEC(idx
), 0);
2488 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
2489 * @nn: NFP Net device to reconfigure
2491 static void nfp_net_clear_config_and_disable(struct nfp_net
*nn
)
2493 u32 new_ctrl
, update
;
2497 new_ctrl
= nn
->dp
.ctrl
;
2498 new_ctrl
&= ~NFP_NET_CFG_CTRL_ENABLE
;
2499 update
= NFP_NET_CFG_UPDATE_GEN
;
2500 update
|= NFP_NET_CFG_UPDATE_MSIX
;
2501 update
|= NFP_NET_CFG_UPDATE_RING
;
2503 if (nn
->cap
& NFP_NET_CFG_CTRL_RINGCFG
)
2504 new_ctrl
&= ~NFP_NET_CFG_CTRL_RINGCFG
;
2506 nn_writeq(nn
, NFP_NET_CFG_TXRS_ENABLE
, 0);
2507 nn_writeq(nn
, NFP_NET_CFG_RXRS_ENABLE
, 0);
2509 nn_writel(nn
, NFP_NET_CFG_CTRL
, new_ctrl
);
2510 err
= nfp_net_reconfig(nn
, update
);
2512 nn_err(nn
, "Could not disable device: %d\n", err
);
2514 for (r
= 0; r
< nn
->dp
.num_rx_rings
; r
++)
2515 nfp_net_rx_ring_reset(&nn
->dp
.rx_rings
[r
]);
2516 for (r
= 0; r
< nn
->dp
.num_tx_rings
; r
++)
2517 nfp_net_tx_ring_reset(&nn
->dp
, &nn
->dp
.tx_rings
[r
]);
2518 for (r
= 0; r
< nn
->dp
.num_r_vecs
; r
++)
2519 nfp_net_vec_clear_ring_data(nn
, r
);
2521 nn
->dp
.ctrl
= new_ctrl
;
2525 nfp_net_rx_ring_hw_cfg_write(struct nfp_net
*nn
,
2526 struct nfp_net_rx_ring
*rx_ring
, unsigned int idx
)
2528 /* Write the DMA address, size and MSI-X info to the device */
2529 nn_writeq(nn
, NFP_NET_CFG_RXR_ADDR(idx
), rx_ring
->dma
);
2530 nn_writeb(nn
, NFP_NET_CFG_RXR_SZ(idx
), ilog2(rx_ring
->cnt
));
2531 nn_writeb(nn
, NFP_NET_CFG_RXR_VEC(idx
), rx_ring
->r_vec
->irq_entry
);
2535 nfp_net_tx_ring_hw_cfg_write(struct nfp_net
*nn
,
2536 struct nfp_net_tx_ring
*tx_ring
, unsigned int idx
)
2538 nn_writeq(nn
, NFP_NET_CFG_TXR_ADDR(idx
), tx_ring
->dma
);
2539 nn_writeb(nn
, NFP_NET_CFG_TXR_SZ(idx
), ilog2(tx_ring
->cnt
));
2540 nn_writeb(nn
, NFP_NET_CFG_TXR_VEC(idx
), tx_ring
->r_vec
->irq_entry
);
2544 * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
2545 * @nn: NFP Net device to reconfigure
2547 static int nfp_net_set_config_and_enable(struct nfp_net
*nn
)
2549 u32 bufsz
, new_ctrl
, update
= 0;
2553 new_ctrl
= nn
->dp
.ctrl
;
2555 if (nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_RSS_ANY
) {
2556 nfp_net_rss_write_key(nn
);
2557 nfp_net_rss_write_itbl(nn
);
2558 nn_writel(nn
, NFP_NET_CFG_RSS_CTRL
, nn
->rss_cfg
);
2559 update
|= NFP_NET_CFG_UPDATE_RSS
;
2562 if (nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_IRQMOD
) {
2563 nfp_net_coalesce_write_cfg(nn
);
2564 update
|= NFP_NET_CFG_UPDATE_IRQMOD
;
2567 for (r
= 0; r
< nn
->dp
.num_tx_rings
; r
++)
2568 nfp_net_tx_ring_hw_cfg_write(nn
, &nn
->dp
.tx_rings
[r
], r
);
2569 for (r
= 0; r
< nn
->dp
.num_rx_rings
; r
++)
2570 nfp_net_rx_ring_hw_cfg_write(nn
, &nn
->dp
.rx_rings
[r
], r
);
2572 nn_writeq(nn
, NFP_NET_CFG_TXRS_ENABLE
, nn
->dp
.num_tx_rings
== 64 ?
2573 0xffffffffffffffffULL
: ((u64
)1 << nn
->dp
.num_tx_rings
) - 1);
2575 nn_writeq(nn
, NFP_NET_CFG_RXRS_ENABLE
, nn
->dp
.num_rx_rings
== 64 ?
2576 0xffffffffffffffffULL
: ((u64
)1 << nn
->dp
.num_rx_rings
) - 1);
2579 nfp_net_write_mac_addr(nn
, nn
->dp
.netdev
->dev_addr
);
2581 nn_writel(nn
, NFP_NET_CFG_MTU
, nn
->dp
.mtu
);
2583 bufsz
= nn
->dp
.fl_bufsz
- nn
->dp
.rx_dma_off
- NFP_NET_RX_BUF_NON_DATA
;
2584 nn_writel(nn
, NFP_NET_CFG_FLBUFSZ
, bufsz
);
2587 new_ctrl
|= NFP_NET_CFG_CTRL_ENABLE
;
2588 update
|= NFP_NET_CFG_UPDATE_GEN
;
2589 update
|= NFP_NET_CFG_UPDATE_MSIX
;
2590 update
|= NFP_NET_CFG_UPDATE_RING
;
2591 if (nn
->cap
& NFP_NET_CFG_CTRL_RINGCFG
)
2592 new_ctrl
|= NFP_NET_CFG_CTRL_RINGCFG
;
2594 nn_writel(nn
, NFP_NET_CFG_CTRL
, new_ctrl
);
2595 err
= nfp_net_reconfig(nn
, update
);
2597 nfp_net_clear_config_and_disable(nn
);
2601 nn
->dp
.ctrl
= new_ctrl
;
2603 for (r
= 0; r
< nn
->dp
.num_rx_rings
; r
++)
2604 nfp_net_rx_ring_fill_freelist(&nn
->dp
, &nn
->dp
.rx_rings
[r
]);
2606 /* Since reconfiguration requests while NFP is down are ignored we
2607 * have to wipe the entire VXLAN configuration and reinitialize it.
2609 if (nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_VXLAN
) {
2610 memset(&nn
->vxlan_ports
, 0, sizeof(nn
->vxlan_ports
));
2611 memset(&nn
->vxlan_usecnt
, 0, sizeof(nn
->vxlan_usecnt
));
2612 udp_tunnel_get_rx_info(nn
->dp
.netdev
);
2619 * nfp_net_close_stack() - Quiesce the stack (part of close)
2620 * @nn: NFP Net device to reconfigure
2622 static void nfp_net_close_stack(struct nfp_net
*nn
)
2626 disable_irq(nn
->irq_entries
[NFP_NET_IRQ_LSC_IDX
].vector
);
2627 netif_carrier_off(nn
->dp
.netdev
);
2628 nn
->link_up
= false;
2630 for (r
= 0; r
< nn
->dp
.num_r_vecs
; r
++) {
2631 disable_irq(nn
->r_vecs
[r
].irq_vector
);
2632 napi_disable(&nn
->r_vecs
[r
].napi
);
2635 netif_tx_disable(nn
->dp
.netdev
);
2639 * nfp_net_close_free_all() - Free all runtime resources
2640 * @nn: NFP Net device to reconfigure
2642 static void nfp_net_close_free_all(struct nfp_net
*nn
)
2646 nfp_net_tx_rings_free(&nn
->dp
);
2647 nfp_net_rx_rings_free(&nn
->dp
);
2649 for (r
= 0; r
< nn
->dp
.num_r_vecs
; r
++)
2650 nfp_net_cleanup_vector(nn
, &nn
->r_vecs
[r
]);
2652 nfp_net_aux_irq_free(nn
, NFP_NET_CFG_LSC
, NFP_NET_IRQ_LSC_IDX
);
2653 nfp_net_aux_irq_free(nn
, NFP_NET_CFG_EXN
, NFP_NET_IRQ_EXN_IDX
);
2657 * nfp_net_netdev_close() - Called when the device is downed
2658 * @netdev: netdev structure
2660 static int nfp_net_netdev_close(struct net_device
*netdev
)
2662 struct nfp_net
*nn
= netdev_priv(netdev
);
2664 /* Step 1: Disable RX and TX rings from the Linux kernel perspective
2666 nfp_net_close_stack(nn
);
2670 nfp_net_clear_config_and_disable(nn
);
2671 nfp_port_configure(netdev
, false);
2673 /* Step 3: Free resources
2675 nfp_net_close_free_all(nn
);
2677 nn_dbg(nn
, "%s down", netdev
->name
);
2681 void nfp_ctrl_close(struct nfp_net
*nn
)
2687 for (r
= 0; r
< nn
->dp
.num_r_vecs
; r
++) {
2688 disable_irq(nn
->r_vecs
[r
].irq_vector
);
2689 tasklet_disable(&nn
->r_vecs
[r
].tasklet
);
2692 nfp_net_clear_config_and_disable(nn
);
2694 nfp_net_close_free_all(nn
);
2700 * nfp_net_open_stack() - Start the device from stack's perspective
2701 * @nn: NFP Net device to reconfigure
2703 static void nfp_net_open_stack(struct nfp_net
*nn
)
2707 for (r
= 0; r
< nn
->dp
.num_r_vecs
; r
++) {
2708 napi_enable(&nn
->r_vecs
[r
].napi
);
2709 enable_irq(nn
->r_vecs
[r
].irq_vector
);
2712 netif_tx_wake_all_queues(nn
->dp
.netdev
);
2714 enable_irq(nn
->irq_entries
[NFP_NET_IRQ_LSC_IDX
].vector
);
2715 nfp_net_read_link_status(nn
);
2718 static int nfp_net_open_alloc_all(struct nfp_net
*nn
)
2722 err
= nfp_net_aux_irq_request(nn
, NFP_NET_CFG_EXN
, "%s-exn",
2723 nn
->exn_name
, sizeof(nn
->exn_name
),
2724 NFP_NET_IRQ_EXN_IDX
, nn
->exn_handler
);
2727 err
= nfp_net_aux_irq_request(nn
, NFP_NET_CFG_LSC
, "%s-lsc",
2728 nn
->lsc_name
, sizeof(nn
->lsc_name
),
2729 NFP_NET_IRQ_LSC_IDX
, nn
->lsc_handler
);
2732 disable_irq(nn
->irq_entries
[NFP_NET_IRQ_LSC_IDX
].vector
);
2734 for (r
= 0; r
< nn
->dp
.num_r_vecs
; r
++) {
2735 err
= nfp_net_prepare_vector(nn
, &nn
->r_vecs
[r
], r
);
2737 goto err_cleanup_vec_p
;
2740 err
= nfp_net_rx_rings_prepare(nn
, &nn
->dp
);
2742 goto err_cleanup_vec
;
2744 err
= nfp_net_tx_rings_prepare(nn
, &nn
->dp
);
2746 goto err_free_rx_rings
;
2748 for (r
= 0; r
< nn
->max_r_vecs
; r
++)
2749 nfp_net_vector_assign_rings(&nn
->dp
, &nn
->r_vecs
[r
], r
);
2754 nfp_net_rx_rings_free(&nn
->dp
);
2756 r
= nn
->dp
.num_r_vecs
;
2759 nfp_net_cleanup_vector(nn
, &nn
->r_vecs
[r
]);
2760 nfp_net_aux_irq_free(nn
, NFP_NET_CFG_LSC
, NFP_NET_IRQ_LSC_IDX
);
2762 nfp_net_aux_irq_free(nn
, NFP_NET_CFG_EXN
, NFP_NET_IRQ_EXN_IDX
);
2766 static int nfp_net_netdev_open(struct net_device
*netdev
)
2768 struct nfp_net
*nn
= netdev_priv(netdev
);
2771 /* Step 1: Allocate resources for rings and the like
2772 * - Request interrupts
2773 * - Allocate RX and TX ring resources
2774 * - Setup initial RSS table
2776 err
= nfp_net_open_alloc_all(nn
);
2780 err
= netif_set_real_num_tx_queues(netdev
, nn
->dp
.num_stack_tx_rings
);
2784 err
= netif_set_real_num_rx_queues(netdev
, nn
->dp
.num_rx_rings
);
2788 /* Step 2: Configure the NFP
2789 * - Ifup the physical interface if it exists
2790 * - Enable rings from 0 to tx_rings/rx_rings - 1.
2791 * - Write MAC address (in case it changed)
2793 * - Set the Freelist buffer size
2796 err
= nfp_port_configure(netdev
, true);
2800 err
= nfp_net_set_config_and_enable(nn
);
2802 goto err_port_disable
;
2804 /* Step 3: Enable for kernel
2805 * - put some freelist descriptors on each RX ring
2806 * - enable NAPI on each ring
2807 * - enable all TX queues
2810 nfp_net_open_stack(nn
);
2815 nfp_port_configure(netdev
, false);
2817 nfp_net_close_free_all(nn
);
2821 int nfp_ctrl_open(struct nfp_net
*nn
)
2825 /* ring dumping depends on vNICs being opened/closed under rtnl */
2828 err
= nfp_net_open_alloc_all(nn
);
2832 err
= nfp_net_set_config_and_enable(nn
);
2836 for (r
= 0; r
< nn
->dp
.num_r_vecs
; r
++)
2837 enable_irq(nn
->r_vecs
[r
].irq_vector
);
2844 nfp_net_close_free_all(nn
);
2850 static void nfp_net_set_rx_mode(struct net_device
*netdev
)
2852 struct nfp_net
*nn
= netdev_priv(netdev
);
2855 new_ctrl
= nn
->dp
.ctrl
;
2857 if (netdev
->flags
& IFF_PROMISC
) {
2858 if (nn
->cap
& NFP_NET_CFG_CTRL_PROMISC
)
2859 new_ctrl
|= NFP_NET_CFG_CTRL_PROMISC
;
2861 nn_warn(nn
, "FW does not support promiscuous mode\n");
2863 new_ctrl
&= ~NFP_NET_CFG_CTRL_PROMISC
;
2866 if (new_ctrl
== nn
->dp
.ctrl
)
2869 nn_writel(nn
, NFP_NET_CFG_CTRL
, new_ctrl
);
2870 nfp_net_reconfig_post(nn
, NFP_NET_CFG_UPDATE_GEN
);
2872 nn
->dp
.ctrl
= new_ctrl
;
2875 static void nfp_net_rss_init_itbl(struct nfp_net
*nn
)
2879 for (i
= 0; i
< sizeof(nn
->rss_itbl
); i
++)
2881 ethtool_rxfh_indir_default(i
, nn
->dp
.num_rx_rings
);
2884 static void nfp_net_dp_swap(struct nfp_net
*nn
, struct nfp_net_dp
*dp
)
2886 struct nfp_net_dp new_dp
= *dp
;
2891 nn
->dp
.netdev
->mtu
= new_dp
.mtu
;
2893 if (!netif_is_rxfh_configured(nn
->dp
.netdev
))
2894 nfp_net_rss_init_itbl(nn
);
2897 static int nfp_net_dp_swap_enable(struct nfp_net
*nn
, struct nfp_net_dp
*dp
)
2902 nfp_net_dp_swap(nn
, dp
);
2904 for (r
= 0; r
< nn
->max_r_vecs
; r
++)
2905 nfp_net_vector_assign_rings(&nn
->dp
, &nn
->r_vecs
[r
], r
);
2907 err
= netif_set_real_num_rx_queues(nn
->dp
.netdev
, nn
->dp
.num_rx_rings
);
2911 if (nn
->dp
.netdev
->real_num_tx_queues
!= nn
->dp
.num_stack_tx_rings
) {
2912 err
= netif_set_real_num_tx_queues(nn
->dp
.netdev
,
2913 nn
->dp
.num_stack_tx_rings
);
2918 return nfp_net_set_config_and_enable(nn
);
2921 struct nfp_net_dp
*nfp_net_clone_dp(struct nfp_net
*nn
)
2923 struct nfp_net_dp
*new;
2925 new = kmalloc(sizeof(*new), GFP_KERNEL
);
2931 /* Clear things which need to be recomputed */
2933 new->tx_rings
= NULL
;
2934 new->rx_rings
= NULL
;
2935 new->num_r_vecs
= 0;
2936 new->num_stack_tx_rings
= 0;
2942 nfp_net_check_config(struct nfp_net
*nn
, struct nfp_net_dp
*dp
,
2943 struct netlink_ext_ack
*extack
)
2945 /* XDP-enabled tests */
2948 if (dp
->fl_bufsz
> PAGE_SIZE
) {
2949 NL_SET_ERR_MSG_MOD(extack
, "MTU too large w/ XDP enabled");
2952 if (dp
->num_tx_rings
> nn
->max_tx_rings
) {
2953 NL_SET_ERR_MSG_MOD(extack
, "Insufficient number of TX rings w/ XDP enabled");
2960 int nfp_net_ring_reconfig(struct nfp_net
*nn
, struct nfp_net_dp
*dp
,
2961 struct netlink_ext_ack
*extack
)
2965 dp
->fl_bufsz
= nfp_net_calc_fl_bufsz(dp
);
2967 dp
->num_stack_tx_rings
= dp
->num_tx_rings
;
2969 dp
->num_stack_tx_rings
-= dp
->num_rx_rings
;
2971 dp
->num_r_vecs
= max(dp
->num_rx_rings
, dp
->num_stack_tx_rings
);
2973 err
= nfp_net_check_config(nn
, dp
, extack
);
2977 if (!netif_running(dp
->netdev
)) {
2978 nfp_net_dp_swap(nn
, dp
);
2983 /* Prepare new rings */
2984 for (r
= nn
->dp
.num_r_vecs
; r
< dp
->num_r_vecs
; r
++) {
2985 err
= nfp_net_prepare_vector(nn
, &nn
->r_vecs
[r
], r
);
2988 goto err_cleanup_vecs
;
2992 err
= nfp_net_rx_rings_prepare(nn
, dp
);
2994 goto err_cleanup_vecs
;
2996 err
= nfp_net_tx_rings_prepare(nn
, dp
);
3000 /* Stop device, swap in new rings, try to start the firmware */
3001 nfp_net_close_stack(nn
);
3002 nfp_net_clear_config_and_disable(nn
);
3004 err
= nfp_net_dp_swap_enable(nn
, dp
);
3008 nfp_net_clear_config_and_disable(nn
);
3010 /* Try with old configuration and old rings */
3011 err2
= nfp_net_dp_swap_enable(nn
, dp
);
3013 nn_err(nn
, "Can't restore ring config - FW communication failed (%d,%d)\n",
3016 for (r
= dp
->num_r_vecs
- 1; r
>= nn
->dp
.num_r_vecs
; r
--)
3017 nfp_net_cleanup_vector(nn
, &nn
->r_vecs
[r
]);
3019 nfp_net_rx_rings_free(dp
);
3020 nfp_net_tx_rings_free(dp
);
3022 nfp_net_open_stack(nn
);
3029 nfp_net_rx_rings_free(dp
);
3031 for (r
= dp
->num_r_vecs
- 1; r
>= nn
->dp
.num_r_vecs
; r
--)
3032 nfp_net_cleanup_vector(nn
, &nn
->r_vecs
[r
]);
3037 static int nfp_net_change_mtu(struct net_device
*netdev
, int new_mtu
)
3039 struct nfp_net
*nn
= netdev_priv(netdev
);
3040 struct nfp_net_dp
*dp
;
3042 dp
= nfp_net_clone_dp(nn
);
3048 return nfp_net_ring_reconfig(nn
, dp
, NULL
);
3052 nfp_net_vlan_rx_add_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
3054 struct nfp_net
*nn
= netdev_priv(netdev
);
3056 /* Priority tagged packets with vlan id 0 are processed by the
3057 * NFP as untagged packets
3062 nn_writew(nn
, NFP_NET_CFG_VLAN_FILTER_VID
, vid
);
3063 nn_writew(nn
, NFP_NET_CFG_VLAN_FILTER_PROTO
, ETH_P_8021Q
);
3065 return nfp_net_reconfig_mbox(nn
, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD
);
3069 nfp_net_vlan_rx_kill_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
3071 struct nfp_net
*nn
= netdev_priv(netdev
);
3073 /* Priority tagged packets with vlan id 0 are processed by the
3074 * NFP as untagged packets
3079 nn_writew(nn
, NFP_NET_CFG_VLAN_FILTER_VID
, vid
);
3080 nn_writew(nn
, NFP_NET_CFG_VLAN_FILTER_PROTO
, ETH_P_8021Q
);
3082 return nfp_net_reconfig_mbox(nn
, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL
);
3085 static void nfp_net_stat64(struct net_device
*netdev
,
3086 struct rtnl_link_stats64
*stats
)
3088 struct nfp_net
*nn
= netdev_priv(netdev
);
3091 for (r
= 0; r
< nn
->dp
.num_r_vecs
; r
++) {
3092 struct nfp_net_r_vector
*r_vec
= &nn
->r_vecs
[r
];
3097 start
= u64_stats_fetch_begin(&r_vec
->rx_sync
);
3098 data
[0] = r_vec
->rx_pkts
;
3099 data
[1] = r_vec
->rx_bytes
;
3100 data
[2] = r_vec
->rx_drops
;
3101 } while (u64_stats_fetch_retry(&r_vec
->rx_sync
, start
));
3102 stats
->rx_packets
+= data
[0];
3103 stats
->rx_bytes
+= data
[1];
3104 stats
->rx_dropped
+= data
[2];
3107 start
= u64_stats_fetch_begin(&r_vec
->tx_sync
);
3108 data
[0] = r_vec
->tx_pkts
;
3109 data
[1] = r_vec
->tx_bytes
;
3110 data
[2] = r_vec
->tx_errors
;
3111 } while (u64_stats_fetch_retry(&r_vec
->tx_sync
, start
));
3112 stats
->tx_packets
+= data
[0];
3113 stats
->tx_bytes
+= data
[1];
3114 stats
->tx_errors
+= data
[2];
3118 static int nfp_net_set_features(struct net_device
*netdev
,
3119 netdev_features_t features
)
3121 netdev_features_t changed
= netdev
->features
^ features
;
3122 struct nfp_net
*nn
= netdev_priv(netdev
);
3126 /* Assume this is not called with features we have not advertised */
3128 new_ctrl
= nn
->dp
.ctrl
;
3130 if (changed
& NETIF_F_RXCSUM
) {
3131 if (features
& NETIF_F_RXCSUM
)
3132 new_ctrl
|= nn
->cap
& NFP_NET_CFG_CTRL_RXCSUM_ANY
;
3134 new_ctrl
&= ~NFP_NET_CFG_CTRL_RXCSUM_ANY
;
3137 if (changed
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)) {
3138 if (features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
))
3139 new_ctrl
|= NFP_NET_CFG_CTRL_TXCSUM
;
3141 new_ctrl
&= ~NFP_NET_CFG_CTRL_TXCSUM
;
3144 if (changed
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
3145 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
))
3146 new_ctrl
|= nn
->cap
& NFP_NET_CFG_CTRL_LSO2
?:
3147 NFP_NET_CFG_CTRL_LSO
;
3149 new_ctrl
&= ~NFP_NET_CFG_CTRL_LSO_ANY
;
3152 if (changed
& NETIF_F_HW_VLAN_CTAG_RX
) {
3153 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
3154 new_ctrl
|= NFP_NET_CFG_CTRL_RXVLAN
;
3156 new_ctrl
&= ~NFP_NET_CFG_CTRL_RXVLAN
;
3159 if (changed
& NETIF_F_HW_VLAN_CTAG_TX
) {
3160 if (features
& NETIF_F_HW_VLAN_CTAG_TX
)
3161 new_ctrl
|= NFP_NET_CFG_CTRL_TXVLAN
;
3163 new_ctrl
&= ~NFP_NET_CFG_CTRL_TXVLAN
;
3166 if (changed
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
3167 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
3168 new_ctrl
|= NFP_NET_CFG_CTRL_CTAG_FILTER
;
3170 new_ctrl
&= ~NFP_NET_CFG_CTRL_CTAG_FILTER
;
3173 if (changed
& NETIF_F_SG
) {
3174 if (features
& NETIF_F_SG
)
3175 new_ctrl
|= NFP_NET_CFG_CTRL_GATHER
;
3177 new_ctrl
&= ~NFP_NET_CFG_CTRL_GATHER
;
3180 if (changed
& NETIF_F_HW_TC
&& nfp_app_tc_busy(nn
->app
, nn
)) {
3181 nn_err(nn
, "Cannot disable HW TC offload while in use\n");
3185 nn_dbg(nn
, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
3186 netdev
->features
, features
, changed
);
3188 if (new_ctrl
== nn
->dp
.ctrl
)
3191 nn_dbg(nn
, "NIC ctrl: 0x%x -> 0x%x\n", nn
->dp
.ctrl
, new_ctrl
);
3192 nn_writel(nn
, NFP_NET_CFG_CTRL
, new_ctrl
);
3193 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_GEN
);
3197 nn
->dp
.ctrl
= new_ctrl
;
3202 static netdev_features_t
3203 nfp_net_features_check(struct sk_buff
*skb
, struct net_device
*dev
,
3204 netdev_features_t features
)
3208 /* We can't do TSO over double tagged packets (802.1AD) */
3209 features
&= vlan_features_check(skb
, features
);
3211 if (!skb
->encapsulation
)
3214 /* Ensure that inner L4 header offset fits into TX descriptor field */
3215 if (skb_is_gso(skb
)) {
3218 hdrlen
= skb_inner_transport_header(skb
) - skb
->data
+
3219 inner_tcp_hdrlen(skb
);
3221 if (unlikely(hdrlen
> NFP_NET_LSO_MAX_HDR_SZ
))
3222 features
&= ~NETIF_F_GSO_MASK
;
3225 /* VXLAN/GRE check */
3226 switch (vlan_get_protocol(skb
)) {
3227 case htons(ETH_P_IP
):
3228 l4_hdr
= ip_hdr(skb
)->protocol
;
3230 case htons(ETH_P_IPV6
):
3231 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
3234 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3237 if (skb
->inner_protocol_type
!= ENCAP_TYPE_ETHER
||
3238 skb
->inner_protocol
!= htons(ETH_P_TEB
) ||
3239 (l4_hdr
!= IPPROTO_UDP
&& l4_hdr
!= IPPROTO_GRE
) ||
3240 (l4_hdr
== IPPROTO_UDP
&&
3241 (skb_inner_mac_header(skb
) - skb_transport_header(skb
) !=
3242 sizeof(struct udphdr
) + sizeof(struct vxlanhdr
))))
3243 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3249 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
3250 * @nn: NFP Net device to reconfigure
3251 * @idx: Index into the port table where new port should be written
3252 * @port: UDP port to configure (pass zero to remove VXLAN port)
3254 static void nfp_net_set_vxlan_port(struct nfp_net
*nn
, int idx
, __be16 port
)
3258 nn
->vxlan_ports
[idx
] = port
;
3260 if (!(nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_VXLAN
))
3263 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS
& 1);
3264 for (i
= 0; i
< NFP_NET_N_VXLAN_PORTS
; i
+= 2)
3265 nn_writel(nn
, NFP_NET_CFG_VXLAN_PORT
+ i
* sizeof(port
),
3266 be16_to_cpu(nn
->vxlan_ports
[i
+ 1]) << 16 |
3267 be16_to_cpu(nn
->vxlan_ports
[i
]));
3269 nfp_net_reconfig_post(nn
, NFP_NET_CFG_UPDATE_VXLAN
);
3273 * nfp_net_find_vxlan_idx() - find table entry of the port or a free one
3274 * @nn: NFP Network structure
3275 * @port: UDP port to look for
3277 * Return: if the port is already in the table -- it's position;
3278 * if the port is not in the table -- free position to use;
3279 * if the table is full -- -ENOSPC.
3281 static int nfp_net_find_vxlan_idx(struct nfp_net
*nn
, __be16 port
)
3283 int i
, free_idx
= -ENOSPC
;
3285 for (i
= 0; i
< NFP_NET_N_VXLAN_PORTS
; i
++) {
3286 if (nn
->vxlan_ports
[i
] == port
)
3288 if (!nn
->vxlan_usecnt
[i
])
3295 static void nfp_net_add_vxlan_port(struct net_device
*netdev
,
3296 struct udp_tunnel_info
*ti
)
3298 struct nfp_net
*nn
= netdev_priv(netdev
);
3301 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3304 idx
= nfp_net_find_vxlan_idx(nn
, ti
->port
);
3308 if (!nn
->vxlan_usecnt
[idx
]++)
3309 nfp_net_set_vxlan_port(nn
, idx
, ti
->port
);
3312 static void nfp_net_del_vxlan_port(struct net_device
*netdev
,
3313 struct udp_tunnel_info
*ti
)
3315 struct nfp_net
*nn
= netdev_priv(netdev
);
3318 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3321 idx
= nfp_net_find_vxlan_idx(nn
, ti
->port
);
3322 if (idx
== -ENOSPC
|| !nn
->vxlan_usecnt
[idx
])
3325 if (!--nn
->vxlan_usecnt
[idx
])
3326 nfp_net_set_vxlan_port(nn
, idx
, 0);
3330 nfp_net_xdp_setup_drv(struct nfp_net
*nn
, struct bpf_prog
*prog
,
3331 struct netlink_ext_ack
*extack
)
3333 struct nfp_net_dp
*dp
;
3335 if (!prog
== !nn
->dp
.xdp_prog
) {
3336 WRITE_ONCE(nn
->dp
.xdp_prog
, prog
);
3340 dp
= nfp_net_clone_dp(nn
);
3344 dp
->xdp_prog
= prog
;
3345 dp
->num_tx_rings
+= prog
? nn
->dp
.num_rx_rings
: -nn
->dp
.num_rx_rings
;
3346 dp
->rx_dma_dir
= prog
? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE
;
3347 dp
->rx_dma_off
= prog
? XDP_PACKET_HEADROOM
- nn
->dp
.rx_offset
: 0;
3349 /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
3350 return nfp_net_ring_reconfig(nn
, dp
, extack
);
3354 nfp_net_xdp_setup(struct nfp_net
*nn
, struct bpf_prog
*prog
, u32 flags
,
3355 struct netlink_ext_ack
*extack
)
3357 struct bpf_prog
*drv_prog
, *offload_prog
;
3360 if (nn
->xdp_prog
&& (flags
^ nn
->xdp_flags
) & XDP_FLAGS_MODES
)
3363 /* Load both when no flags set to allow easy activation of driver path
3364 * when program is replaced by one which can't be offloaded.
3366 drv_prog
= flags
& XDP_FLAGS_HW_MODE
? NULL
: prog
;
3367 offload_prog
= flags
& XDP_FLAGS_DRV_MODE
? NULL
: prog
;
3369 err
= nfp_net_xdp_setup_drv(nn
, drv_prog
, extack
);
3373 err
= nfp_app_xdp_offload(nn
->app
, nn
, offload_prog
);
3374 if (err
&& flags
& XDP_FLAGS_HW_MODE
)
3378 bpf_prog_put(nn
->xdp_prog
);
3379 nn
->xdp_prog
= prog
;
3380 nn
->xdp_flags
= flags
;
3385 static int nfp_net_xdp(struct net_device
*netdev
, struct netdev_xdp
*xdp
)
3387 struct nfp_net
*nn
= netdev_priv(netdev
);
3389 switch (xdp
->command
) {
3390 case XDP_SETUP_PROG
:
3391 case XDP_SETUP_PROG_HW
:
3392 return nfp_net_xdp_setup(nn
, xdp
->prog
, xdp
->flags
,
3394 case XDP_QUERY_PROG
:
3395 xdp
->prog_attached
= !!nn
->xdp_prog
;
3396 if (nn
->dp
.bpf_offload_xdp
)
3397 xdp
->prog_attached
= XDP_ATTACHED_HW
;
3398 xdp
->prog_id
= nn
->xdp_prog
? nn
->xdp_prog
->aux
->id
: 0;
3405 static int nfp_net_set_mac_address(struct net_device
*netdev
, void *addr
)
3407 struct nfp_net
*nn
= netdev_priv(netdev
);
3408 struct sockaddr
*saddr
= addr
;
3411 err
= eth_prepare_mac_addr_change(netdev
, addr
);
3415 nfp_net_write_mac_addr(nn
, saddr
->sa_data
);
3417 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_MACADDR
);
3421 eth_commit_mac_addr_change(netdev
, addr
);
3426 const struct net_device_ops nfp_net_netdev_ops
= {
3427 .ndo_open
= nfp_net_netdev_open
,
3428 .ndo_stop
= nfp_net_netdev_close
,
3429 .ndo_start_xmit
= nfp_net_tx
,
3430 .ndo_get_stats64
= nfp_net_stat64
,
3431 .ndo_vlan_rx_add_vid
= nfp_net_vlan_rx_add_vid
,
3432 .ndo_vlan_rx_kill_vid
= nfp_net_vlan_rx_kill_vid
,
3433 .ndo_set_vf_mac
= nfp_app_set_vf_mac
,
3434 .ndo_set_vf_vlan
= nfp_app_set_vf_vlan
,
3435 .ndo_set_vf_spoofchk
= nfp_app_set_vf_spoofchk
,
3436 .ndo_get_vf_config
= nfp_app_get_vf_config
,
3437 .ndo_set_vf_link_state
= nfp_app_set_vf_link_state
,
3438 .ndo_setup_tc
= nfp_port_setup_tc
,
3439 .ndo_tx_timeout
= nfp_net_tx_timeout
,
3440 .ndo_set_rx_mode
= nfp_net_set_rx_mode
,
3441 .ndo_change_mtu
= nfp_net_change_mtu
,
3442 .ndo_set_mac_address
= nfp_net_set_mac_address
,
3443 .ndo_set_features
= nfp_net_set_features
,
3444 .ndo_features_check
= nfp_net_features_check
,
3445 .ndo_get_phys_port_name
= nfp_port_get_phys_port_name
,
3446 .ndo_udp_tunnel_add
= nfp_net_add_vxlan_port
,
3447 .ndo_udp_tunnel_del
= nfp_net_del_vxlan_port
,
3448 .ndo_xdp
= nfp_net_xdp
,
3452 * nfp_net_info() - Print general info about the NIC
3453 * @nn: NFP Net device to reconfigure
3455 void nfp_net_info(struct nfp_net
*nn
)
3457 nn_info(nn
, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
3458 nn
->dp
.is_vf
? "VF " : "",
3459 nn
->dp
.num_tx_rings
, nn
->max_tx_rings
,
3460 nn
->dp
.num_rx_rings
, nn
->max_rx_rings
);
3461 nn_info(nn
, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
3462 nn
->fw_ver
.resv
, nn
->fw_ver
.class,
3463 nn
->fw_ver
.major
, nn
->fw_ver
.minor
,
3465 nn_info(nn
, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
3467 nn
->cap
& NFP_NET_CFG_CTRL_PROMISC
? "PROMISC " : "",
3468 nn
->cap
& NFP_NET_CFG_CTRL_L2BC
? "L2BCFILT " : "",
3469 nn
->cap
& NFP_NET_CFG_CTRL_L2MC
? "L2MCFILT " : "",
3470 nn
->cap
& NFP_NET_CFG_CTRL_RXCSUM
? "RXCSUM " : "",
3471 nn
->cap
& NFP_NET_CFG_CTRL_TXCSUM
? "TXCSUM " : "",
3472 nn
->cap
& NFP_NET_CFG_CTRL_RXVLAN
? "RXVLAN " : "",
3473 nn
->cap
& NFP_NET_CFG_CTRL_TXVLAN
? "TXVLAN " : "",
3474 nn
->cap
& NFP_NET_CFG_CTRL_SCATTER
? "SCATTER " : "",
3475 nn
->cap
& NFP_NET_CFG_CTRL_GATHER
? "GATHER " : "",
3476 nn
->cap
& NFP_NET_CFG_CTRL_LSO
? "TSO1 " : "",
3477 nn
->cap
& NFP_NET_CFG_CTRL_LSO2
? "TSO2 " : "",
3478 nn
->cap
& NFP_NET_CFG_CTRL_RSS
? "RSS1 " : "",
3479 nn
->cap
& NFP_NET_CFG_CTRL_RSS2
? "RSS2 " : "",
3480 nn
->cap
& NFP_NET_CFG_CTRL_CTAG_FILTER
? "CTAG_FILTER " : "",
3481 nn
->cap
& NFP_NET_CFG_CTRL_L2SWITCH
? "L2SWITCH " : "",
3482 nn
->cap
& NFP_NET_CFG_CTRL_MSIXAUTO
? "AUTOMASK " : "",
3483 nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
? "IRQMOD " : "",
3484 nn
->cap
& NFP_NET_CFG_CTRL_VXLAN
? "VXLAN " : "",
3485 nn
->cap
& NFP_NET_CFG_CTRL_NVGRE
? "NVGRE " : "",
3486 nn
->cap
& NFP_NET_CFG_CTRL_CSUM_COMPLETE
?
3487 "RXCSUM_COMPLETE " : "",
3488 nn
->cap
& NFP_NET_CFG_CTRL_LIVE_ADDR
? "LIVE_ADDR " : "",
3489 nfp_app_extra_cap(nn
->app
, nn
));
3493 * nfp_net_alloc() - Allocate netdev and related structure
3495 * @needs_netdev: Whether to allocate a netdev for this vNIC
3496 * @max_tx_rings: Maximum number of TX rings supported by device
3497 * @max_rx_rings: Maximum number of RX rings supported by device
3499 * This function allocates a netdev device and fills in the initial
3500 * part of the @struct nfp_net structure. In case of control device
3501 * nfp_net structure is allocated without the netdev.
3503 * Return: NFP Net device structure, or ERR_PTR on error.
3505 struct nfp_net
*nfp_net_alloc(struct pci_dev
*pdev
, bool needs_netdev
,
3506 unsigned int max_tx_rings
,
3507 unsigned int max_rx_rings
)
3512 struct net_device
*netdev
;
3514 netdev
= alloc_etherdev_mqs(sizeof(struct nfp_net
),
3515 max_tx_rings
, max_rx_rings
);
3517 return ERR_PTR(-ENOMEM
);
3519 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3520 nn
= netdev_priv(netdev
);
3521 nn
->dp
.netdev
= netdev
;
3523 nn
= vzalloc(sizeof(*nn
));
3525 return ERR_PTR(-ENOMEM
);
3528 nn
->dp
.dev
= &pdev
->dev
;
3531 nn
->max_tx_rings
= max_tx_rings
;
3532 nn
->max_rx_rings
= max_rx_rings
;
3534 nn
->dp
.num_tx_rings
= min_t(unsigned int,
3535 max_tx_rings
, num_online_cpus());
3536 nn
->dp
.num_rx_rings
= min_t(unsigned int, max_rx_rings
,
3537 netif_get_num_default_rss_queues());
3539 nn
->dp
.num_r_vecs
= max(nn
->dp
.num_tx_rings
, nn
->dp
.num_rx_rings
);
3540 nn
->dp
.num_r_vecs
= min_t(unsigned int,
3541 nn
->dp
.num_r_vecs
, num_online_cpus());
3543 nn
->dp
.txd_cnt
= NFP_NET_TX_DESCS_DEFAULT
;
3544 nn
->dp
.rxd_cnt
= NFP_NET_RX_DESCS_DEFAULT
;
3546 spin_lock_init(&nn
->reconfig_lock
);
3547 spin_lock_init(&nn
->link_status_lock
);
3549 setup_timer(&nn
->reconfig_timer
,
3550 nfp_net_reconfig_timer
, (unsigned long)nn
);
3556 * nfp_net_free() - Undo what @nfp_net_alloc() did
3557 * @nn: NFP Net device to reconfigure
3559 void nfp_net_free(struct nfp_net
*nn
)
3562 bpf_prog_put(nn
->xdp_prog
);
3565 free_netdev(nn
->dp
.netdev
);
3571 * nfp_net_rss_key_sz() - Get current size of the RSS key
3572 * @nn: NFP Net device instance
3574 * Return: size of the RSS key for currently selected hash function.
3576 unsigned int nfp_net_rss_key_sz(struct nfp_net
*nn
)
3578 switch (nn
->rss_hfunc
) {
3579 case ETH_RSS_HASH_TOP
:
3580 return NFP_NET_CFG_RSS_KEY_SZ
;
3581 case ETH_RSS_HASH_XOR
:
3583 case ETH_RSS_HASH_CRC32
:
3587 nn_warn(nn
, "Unknown hash function: %u\n", nn
->rss_hfunc
);
3592 * nfp_net_rss_init() - Set the initial RSS parameters
3593 * @nn: NFP Net device to reconfigure
3595 static void nfp_net_rss_init(struct nfp_net
*nn
)
3597 unsigned long func_bit
, rss_cap_hfunc
;
3600 /* Read the RSS function capability and select first supported func */
3601 reg
= nn_readl(nn
, NFP_NET_CFG_RSS_CAP
);
3602 rss_cap_hfunc
= FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC
, reg
);
3604 rss_cap_hfunc
= FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC
,
3605 NFP_NET_CFG_RSS_TOEPLITZ
);
3607 func_bit
= find_first_bit(&rss_cap_hfunc
, NFP_NET_CFG_RSS_HFUNCS
);
3608 if (func_bit
== NFP_NET_CFG_RSS_HFUNCS
) {
3609 dev_warn(nn
->dp
.dev
,
3610 "Bad RSS config, defaulting to Toeplitz hash\n");
3611 func_bit
= ETH_RSS_HASH_TOP_BIT
;
3613 nn
->rss_hfunc
= 1 << func_bit
;
3615 netdev_rss_key_fill(nn
->rss_key
, nfp_net_rss_key_sz(nn
));
3617 nfp_net_rss_init_itbl(nn
);
3619 /* Enable IPv4/IPv6 TCP by default */
3620 nn
->rss_cfg
= NFP_NET_CFG_RSS_IPV4_TCP
|
3621 NFP_NET_CFG_RSS_IPV6_TCP
|
3622 FIELD_PREP(NFP_NET_CFG_RSS_HFUNC
, nn
->rss_hfunc
) |
3623 NFP_NET_CFG_RSS_MASK
;
3627 * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
3628 * @nn: NFP Net device to reconfigure
3630 static void nfp_net_irqmod_init(struct nfp_net
*nn
)
3632 nn
->rx_coalesce_usecs
= 50;
3633 nn
->rx_coalesce_max_frames
= 64;
3634 nn
->tx_coalesce_usecs
= 50;
3635 nn
->tx_coalesce_max_frames
= 64;
3638 static void nfp_net_netdev_init(struct nfp_net
*nn
)
3640 struct net_device
*netdev
= nn
->dp
.netdev
;
3642 nfp_net_write_mac_addr(nn
, nn
->dp
.netdev
->dev_addr
);
3644 netdev
->mtu
= nn
->dp
.mtu
;
3646 /* Advertise/enable offloads based on capabilities
3648 * Note: netdev->features show the currently enabled features
3649 * and netdev->hw_features advertises which features are
3650 * supported. By default we enable most features.
3652 if (nn
->cap
& NFP_NET_CFG_CTRL_LIVE_ADDR
)
3653 netdev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
3655 netdev
->hw_features
= NETIF_F_HIGHDMA
;
3656 if (nn
->cap
& NFP_NET_CFG_CTRL_RXCSUM_ANY
) {
3657 netdev
->hw_features
|= NETIF_F_RXCSUM
;
3658 nn
->dp
.ctrl
|= nn
->cap
& NFP_NET_CFG_CTRL_RXCSUM_ANY
;
3660 if (nn
->cap
& NFP_NET_CFG_CTRL_TXCSUM
) {
3661 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
3662 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_TXCSUM
;
3664 if (nn
->cap
& NFP_NET_CFG_CTRL_GATHER
) {
3665 netdev
->hw_features
|= NETIF_F_SG
;
3666 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_GATHER
;
3668 if ((nn
->cap
& NFP_NET_CFG_CTRL_LSO
&& nn
->fw_ver
.major
> 2) ||
3669 nn
->cap
& NFP_NET_CFG_CTRL_LSO2
) {
3670 netdev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
3671 nn
->dp
.ctrl
|= nn
->cap
& NFP_NET_CFG_CTRL_LSO2
?:
3672 NFP_NET_CFG_CTRL_LSO
;
3674 if (nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
)
3675 netdev
->hw_features
|= NETIF_F_RXHASH
;
3676 if (nn
->cap
& NFP_NET_CFG_CTRL_VXLAN
&&
3677 nn
->cap
& NFP_NET_CFG_CTRL_NVGRE
) {
3678 if (nn
->cap
& NFP_NET_CFG_CTRL_LSO
)
3679 netdev
->hw_features
|= NETIF_F_GSO_GRE
|
3680 NETIF_F_GSO_UDP_TUNNEL
;
3681 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_VXLAN
| NFP_NET_CFG_CTRL_NVGRE
;
3683 netdev
->hw_enc_features
= netdev
->hw_features
;
3686 netdev
->vlan_features
= netdev
->hw_features
;
3688 if (nn
->cap
& NFP_NET_CFG_CTRL_RXVLAN
) {
3689 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
3690 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_RXVLAN
;
3692 if (nn
->cap
& NFP_NET_CFG_CTRL_TXVLAN
) {
3693 if (nn
->cap
& NFP_NET_CFG_CTRL_LSO2
) {
3694 nn_warn(nn
, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
3696 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
3697 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_TXVLAN
;
3700 if (nn
->cap
& NFP_NET_CFG_CTRL_CTAG_FILTER
) {
3701 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
3702 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_CTAG_FILTER
;
3705 netdev
->features
= netdev
->hw_features
;
3707 if (nfp_app_has_tc(nn
->app
))
3708 netdev
->hw_features
|= NETIF_F_HW_TC
;
3710 /* Advertise but disable TSO by default. */
3711 netdev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
);
3712 nn
->dp
.ctrl
&= ~NFP_NET_CFG_CTRL_LSO_ANY
;
3714 /* Finalise the netdev setup */
3715 netdev
->netdev_ops
= &nfp_net_netdev_ops
;
3716 netdev
->watchdog_timeo
= msecs_to_jiffies(5 * 1000);
3718 SWITCHDEV_SET_OPS(netdev
, &nfp_port_switchdev_ops
);
3720 /* MTU range: 68 - hw-specific max */
3721 netdev
->min_mtu
= ETH_MIN_MTU
;
3722 netdev
->max_mtu
= nn
->max_mtu
;
3724 netif_carrier_off(netdev
);
3726 nfp_net_set_ethtool_ops(netdev
);
3730 * nfp_net_init() - Initialise/finalise the nfp_net structure
3731 * @nn: NFP Net device structure
3733 * Return: 0 on success or negative errno on error.
3735 int nfp_net_init(struct nfp_net
*nn
)
3739 nn
->dp
.rx_dma_dir
= DMA_FROM_DEVICE
;
3741 /* Get some of the read-only fields from the BAR */
3742 nn
->cap
= nn_readl(nn
, NFP_NET_CFG_CAP
);
3743 nn
->max_mtu
= nn_readl(nn
, NFP_NET_CFG_MAX_MTU
);
3745 /* ABI 4.x and ctrl vNIC always use chained metadata, in other cases
3746 * we allow use of non-chained metadata if RSS(v1) is the only
3747 * advertised capability requiring metadata.
3749 nn
->dp
.chained_metadata_format
= nn
->fw_ver
.major
== 4 ||
3751 !(nn
->cap
& NFP_NET_CFG_CTRL_RSS
) ||
3752 nn
->cap
& NFP_NET_CFG_CTRL_CHAIN_META
;
3753 /* RSS(v1) uses non-chained metadata format, except in ABI 4.x where
3754 * it has the same meaning as RSSv2.
3756 if (nn
->dp
.chained_metadata_format
&& nn
->fw_ver
.major
!= 4)
3757 nn
->cap
&= ~NFP_NET_CFG_CTRL_RSS
;
3759 /* Determine RX packet/metadata boundary offset */
3760 if (nn
->fw_ver
.major
>= 2) {
3763 reg
= nn_readl(nn
, NFP_NET_CFG_RX_OFFSET
);
3764 if (reg
> NFP_NET_MAX_PREPEND
) {
3765 nn_err(nn
, "Invalid rx offset: %d\n", reg
);
3768 nn
->dp
.rx_offset
= reg
;
3770 nn
->dp
.rx_offset
= NFP_NET_RX_OFFSET
;
3773 /* Set default MTU and Freelist buffer size */
3774 if (nn
->max_mtu
< NFP_NET_DEFAULT_MTU
)
3775 nn
->dp
.mtu
= nn
->max_mtu
;
3777 nn
->dp
.mtu
= NFP_NET_DEFAULT_MTU
;
3778 nn
->dp
.fl_bufsz
= nfp_net_calc_fl_bufsz(&nn
->dp
);
3780 if (nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
) {
3781 nfp_net_rss_init(nn
);
3782 nn
->dp
.ctrl
|= nn
->cap
& NFP_NET_CFG_CTRL_RSS2
?:
3783 NFP_NET_CFG_CTRL_RSS
;
3786 /* Allow L2 Broadcast and Multicast through by default, if supported */
3787 if (nn
->cap
& NFP_NET_CFG_CTRL_L2BC
)
3788 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_L2BC
;
3789 if (nn
->cap
& NFP_NET_CFG_CTRL_L2MC
)
3790 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_L2MC
;
3792 /* Allow IRQ moderation, if supported */
3793 if (nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
) {
3794 nfp_net_irqmod_init(nn
);
3795 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_IRQMOD
;
3799 nfp_net_netdev_init(nn
);
3801 /* Stash the re-configuration queue away. First odd queue in TX Bar */
3802 nn
->qcp_cfg
= nn
->tx_bar
+ NFP_QCP_QUEUE_ADDR_SZ
;
3804 /* Make sure the FW knows the netdev is supposed to be disabled here */
3805 nn_writel(nn
, NFP_NET_CFG_CTRL
, 0);
3806 nn_writeq(nn
, NFP_NET_CFG_TXRS_ENABLE
, 0);
3807 nn_writeq(nn
, NFP_NET_CFG_RXRS_ENABLE
, 0);
3808 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_RING
|
3809 NFP_NET_CFG_UPDATE_GEN
);
3813 nfp_net_vecs_init(nn
);
3817 return register_netdev(nn
->dp
.netdev
);
3821 * nfp_net_clean() - Undo what nfp_net_init() did.
3822 * @nn: NFP Net device structure
3824 void nfp_net_clean(struct nfp_net
*nn
)
3829 unregister_netdev(nn
->dp
.netdev
);