Merge 4.14.24 into android-4.14
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2017 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27 #include <linux/etherdevice.h>
28 #include <linux/of_net.h>
29 #include <linux/pci.h>
30 #include <linux/bpf.h>
31
32 /* Local includes */
33 #include "i40e.h"
34 #include "i40e_diag.h"
35 #include <net/udp_tunnel.h>
36 /* All i40e tracepoints are defined by the include below, which
37 * must be included exactly once across the whole kernel with
38 * CREATE_TRACE_POINTS defined
39 */
40 #define CREATE_TRACE_POINTS
41 #include "i40e_trace.h"
42
43 const char i40e_driver_name[] = "i40e";
44 static const char i40e_driver_string[] =
45 "Intel(R) Ethernet Connection XL710 Network Driver";
46
47 #define DRV_KERN "-k"
48
49 #define DRV_VERSION_MAJOR 2
50 #define DRV_VERSION_MINOR 1
51 #define DRV_VERSION_BUILD 14
52 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
53 __stringify(DRV_VERSION_MINOR) "." \
54 __stringify(DRV_VERSION_BUILD) DRV_KERN
55 const char i40e_driver_version_str[] = DRV_VERSION;
56 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
57
58 /* a bit of forward declarations */
59 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
60 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
61 static int i40e_add_vsi(struct i40e_vsi *vsi);
62 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
63 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
64 static int i40e_setup_misc_vector(struct i40e_pf *pf);
65 static void i40e_determine_queue_usage(struct i40e_pf *pf);
66 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
67 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
68 static int i40e_reset(struct i40e_pf *pf);
69 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
70 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
71 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
72
73 /* i40e_pci_tbl - PCI Device ID Table
74 *
75 * Last entry must be all 0s
76 *
77 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
78 * Class, Class Mask, private data (not used) }
79 */
80 static const struct pci_device_id i40e_pci_tbl[] = {
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
93 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
94 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
95 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
96 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
97 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
98 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
99 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
100 /* required last entry */
101 {0, }
102 };
103 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
104
105 #define I40E_MAX_VF_COUNT 128
106 static int debug = -1;
107 module_param(debug, uint, 0);
108 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
109
110 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
111 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
112 MODULE_LICENSE("GPL");
113 MODULE_VERSION(DRV_VERSION);
114
115 static struct workqueue_struct *i40e_wq;
116
117 /**
118 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
119 * @hw: pointer to the HW structure
120 * @mem: ptr to mem struct to fill out
121 * @size: size of memory requested
122 * @alignment: what to align the allocation to
123 **/
124 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
125 u64 size, u32 alignment)
126 {
127 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
128
129 mem->size = ALIGN(size, alignment);
130 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
131 &mem->pa, GFP_KERNEL);
132 if (!mem->va)
133 return -ENOMEM;
134
135 return 0;
136 }
137
138 /**
139 * i40e_free_dma_mem_d - OS specific memory free for shared code
140 * @hw: pointer to the HW structure
141 * @mem: ptr to mem struct to free
142 **/
143 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
144 {
145 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
146
147 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
148 mem->va = NULL;
149 mem->pa = 0;
150 mem->size = 0;
151
152 return 0;
153 }
154
155 /**
156 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
157 * @hw: pointer to the HW structure
158 * @mem: ptr to mem struct to fill out
159 * @size: size of memory requested
160 **/
161 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
162 u32 size)
163 {
164 mem->size = size;
165 mem->va = kzalloc(size, GFP_KERNEL);
166
167 if (!mem->va)
168 return -ENOMEM;
169
170 return 0;
171 }
172
173 /**
174 * i40e_free_virt_mem_d - OS specific memory free for shared code
175 * @hw: pointer to the HW structure
176 * @mem: ptr to mem struct to free
177 **/
178 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
179 {
180 /* it's ok to kfree a NULL pointer */
181 kfree(mem->va);
182 mem->va = NULL;
183 mem->size = 0;
184
185 return 0;
186 }
187
188 /**
189 * i40e_get_lump - find a lump of free generic resource
190 * @pf: board private structure
191 * @pile: the pile of resource to search
192 * @needed: the number of items needed
193 * @id: an owner id to stick on the items assigned
194 *
195 * Returns the base item index of the lump, or negative for error
196 *
197 * The search_hint trick and lack of advanced fit-finding only work
198 * because we're highly likely to have all the same size lump requests.
199 * Linear search time and any fragmentation should be minimal.
200 **/
201 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
202 u16 needed, u16 id)
203 {
204 int ret = -ENOMEM;
205 int i, j;
206
207 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
208 dev_info(&pf->pdev->dev,
209 "param err: pile=%p needed=%d id=0x%04x\n",
210 pile, needed, id);
211 return -EINVAL;
212 }
213
214 /* start the linear search with an imperfect hint */
215 i = pile->search_hint;
216 while (i < pile->num_entries) {
217 /* skip already allocated entries */
218 if (pile->list[i] & I40E_PILE_VALID_BIT) {
219 i++;
220 continue;
221 }
222
223 /* do we have enough in this lump? */
224 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
225 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
226 break;
227 }
228
229 if (j == needed) {
230 /* there was enough, so assign it to the requestor */
231 for (j = 0; j < needed; j++)
232 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
233 ret = i;
234 pile->search_hint = i + j;
235 break;
236 }
237
238 /* not enough, so skip over it and continue looking */
239 i += j;
240 }
241
242 return ret;
243 }
244
245 /**
246 * i40e_put_lump - return a lump of generic resource
247 * @pile: the pile of resource to search
248 * @index: the base item index
249 * @id: the owner id of the items assigned
250 *
251 * Returns the count of items in the lump
252 **/
253 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
254 {
255 int valid_id = (id | I40E_PILE_VALID_BIT);
256 int count = 0;
257 int i;
258
259 if (!pile || index >= pile->num_entries)
260 return -EINVAL;
261
262 for (i = index;
263 i < pile->num_entries && pile->list[i] == valid_id;
264 i++) {
265 pile->list[i] = 0;
266 count++;
267 }
268
269 if (count && index < pile->search_hint)
270 pile->search_hint = index;
271
272 return count;
273 }
274
275 /**
276 * i40e_find_vsi_from_id - searches for the vsi with the given id
277 * @pf - the pf structure to search for the vsi
278 * @id - id of the vsi it is searching for
279 **/
280 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
281 {
282 int i;
283
284 for (i = 0; i < pf->num_alloc_vsi; i++)
285 if (pf->vsi[i] && (pf->vsi[i]->id == id))
286 return pf->vsi[i];
287
288 return NULL;
289 }
290
291 /**
292 * i40e_service_event_schedule - Schedule the service task to wake up
293 * @pf: board private structure
294 *
295 * If not already scheduled, this puts the task into the work queue
296 **/
297 void i40e_service_event_schedule(struct i40e_pf *pf)
298 {
299 if (!test_bit(__I40E_DOWN, pf->state) &&
300 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
301 queue_work(i40e_wq, &pf->service_task);
302 }
303
304 /**
305 * i40e_tx_timeout - Respond to a Tx Hang
306 * @netdev: network interface device structure
307 *
308 * If any port has noticed a Tx timeout, it is likely that the whole
309 * device is munged, not just the one netdev port, so go for the full
310 * reset.
311 **/
312 static void i40e_tx_timeout(struct net_device *netdev)
313 {
314 struct i40e_netdev_priv *np = netdev_priv(netdev);
315 struct i40e_vsi *vsi = np->vsi;
316 struct i40e_pf *pf = vsi->back;
317 struct i40e_ring *tx_ring = NULL;
318 unsigned int i, hung_queue = 0;
319 u32 head, val;
320
321 pf->tx_timeout_count++;
322
323 /* find the stopped queue the same way the stack does */
324 for (i = 0; i < netdev->num_tx_queues; i++) {
325 struct netdev_queue *q;
326 unsigned long trans_start;
327
328 q = netdev_get_tx_queue(netdev, i);
329 trans_start = q->trans_start;
330 if (netif_xmit_stopped(q) &&
331 time_after(jiffies,
332 (trans_start + netdev->watchdog_timeo))) {
333 hung_queue = i;
334 break;
335 }
336 }
337
338 if (i == netdev->num_tx_queues) {
339 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
340 } else {
341 /* now that we have an index, find the tx_ring struct */
342 for (i = 0; i < vsi->num_queue_pairs; i++) {
343 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
344 if (hung_queue ==
345 vsi->tx_rings[i]->queue_index) {
346 tx_ring = vsi->tx_rings[i];
347 break;
348 }
349 }
350 }
351 }
352
353 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
354 pf->tx_timeout_recovery_level = 1; /* reset after some time */
355 else if (time_before(jiffies,
356 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
357 return; /* don't do any new action before the next timeout */
358
359 if (tx_ring) {
360 head = i40e_get_head(tx_ring);
361 /* Read interrupt register */
362 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
363 val = rd32(&pf->hw,
364 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
365 tx_ring->vsi->base_vector - 1));
366 else
367 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
368
369 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
370 vsi->seid, hung_queue, tx_ring->next_to_clean,
371 head, tx_ring->next_to_use,
372 readl(tx_ring->tail), val);
373 }
374
375 pf->tx_timeout_last_recovery = jiffies;
376 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
377 pf->tx_timeout_recovery_level, hung_queue);
378
379 switch (pf->tx_timeout_recovery_level) {
380 case 1:
381 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
382 break;
383 case 2:
384 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
385 break;
386 case 3:
387 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
388 break;
389 default:
390 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
391 break;
392 }
393
394 i40e_service_event_schedule(pf);
395 pf->tx_timeout_recovery_level++;
396 }
397
398 /**
399 * i40e_get_vsi_stats_struct - Get System Network Statistics
400 * @vsi: the VSI we care about
401 *
402 * Returns the address of the device statistics structure.
403 * The statistics are actually updated from the service task.
404 **/
405 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
406 {
407 return &vsi->net_stats;
408 }
409
410 /**
411 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
412 * @ring: Tx ring to get statistics from
413 * @stats: statistics entry to be updated
414 **/
415 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
416 struct rtnl_link_stats64 *stats)
417 {
418 u64 bytes, packets;
419 unsigned int start;
420
421 do {
422 start = u64_stats_fetch_begin_irq(&ring->syncp);
423 packets = ring->stats.packets;
424 bytes = ring->stats.bytes;
425 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
426
427 stats->tx_packets += packets;
428 stats->tx_bytes += bytes;
429 }
430
431 /**
432 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
433 * @netdev: network interface device structure
434 *
435 * Returns the address of the device statistics structure.
436 * The statistics are actually updated from the service task.
437 **/
438 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
439 struct rtnl_link_stats64 *stats)
440 {
441 struct i40e_netdev_priv *np = netdev_priv(netdev);
442 struct i40e_ring *tx_ring, *rx_ring;
443 struct i40e_vsi *vsi = np->vsi;
444 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
445 int i;
446
447 if (test_bit(__I40E_VSI_DOWN, vsi->state))
448 return;
449
450 if (!vsi->tx_rings)
451 return;
452
453 rcu_read_lock();
454 for (i = 0; i < vsi->num_queue_pairs; i++) {
455 u64 bytes, packets;
456 unsigned int start;
457
458 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
459 if (!tx_ring)
460 continue;
461 i40e_get_netdev_stats_struct_tx(tx_ring, stats);
462
463 rx_ring = &tx_ring[1];
464
465 do {
466 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
467 packets = rx_ring->stats.packets;
468 bytes = rx_ring->stats.bytes;
469 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
470
471 stats->rx_packets += packets;
472 stats->rx_bytes += bytes;
473
474 if (i40e_enabled_xdp_vsi(vsi))
475 i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
476 }
477 rcu_read_unlock();
478
479 /* following stats updated by i40e_watchdog_subtask() */
480 stats->multicast = vsi_stats->multicast;
481 stats->tx_errors = vsi_stats->tx_errors;
482 stats->tx_dropped = vsi_stats->tx_dropped;
483 stats->rx_errors = vsi_stats->rx_errors;
484 stats->rx_dropped = vsi_stats->rx_dropped;
485 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
486 stats->rx_length_errors = vsi_stats->rx_length_errors;
487 }
488
489 /**
490 * i40e_vsi_reset_stats - Resets all stats of the given vsi
491 * @vsi: the VSI to have its stats reset
492 **/
493 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
494 {
495 struct rtnl_link_stats64 *ns;
496 int i;
497
498 if (!vsi)
499 return;
500
501 ns = i40e_get_vsi_stats_struct(vsi);
502 memset(ns, 0, sizeof(*ns));
503 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
504 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
505 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
506 if (vsi->rx_rings && vsi->rx_rings[0]) {
507 for (i = 0; i < vsi->num_queue_pairs; i++) {
508 memset(&vsi->rx_rings[i]->stats, 0,
509 sizeof(vsi->rx_rings[i]->stats));
510 memset(&vsi->rx_rings[i]->rx_stats, 0,
511 sizeof(vsi->rx_rings[i]->rx_stats));
512 memset(&vsi->tx_rings[i]->stats, 0,
513 sizeof(vsi->tx_rings[i]->stats));
514 memset(&vsi->tx_rings[i]->tx_stats, 0,
515 sizeof(vsi->tx_rings[i]->tx_stats));
516 }
517 }
518 vsi->stat_offsets_loaded = false;
519 }
520
521 /**
522 * i40e_pf_reset_stats - Reset all of the stats for the given PF
523 * @pf: the PF to be reset
524 **/
525 void i40e_pf_reset_stats(struct i40e_pf *pf)
526 {
527 int i;
528
529 memset(&pf->stats, 0, sizeof(pf->stats));
530 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
531 pf->stat_offsets_loaded = false;
532
533 for (i = 0; i < I40E_MAX_VEB; i++) {
534 if (pf->veb[i]) {
535 memset(&pf->veb[i]->stats, 0,
536 sizeof(pf->veb[i]->stats));
537 memset(&pf->veb[i]->stats_offsets, 0,
538 sizeof(pf->veb[i]->stats_offsets));
539 pf->veb[i]->stat_offsets_loaded = false;
540 }
541 }
542 pf->hw_csum_rx_error = 0;
543 }
544
545 /**
546 * i40e_stat_update48 - read and update a 48 bit stat from the chip
547 * @hw: ptr to the hardware info
548 * @hireg: the high 32 bit reg to read
549 * @loreg: the low 32 bit reg to read
550 * @offset_loaded: has the initial offset been loaded yet
551 * @offset: ptr to current offset value
552 * @stat: ptr to the stat
553 *
554 * Since the device stats are not reset at PFReset, they likely will not
555 * be zeroed when the driver starts. We'll save the first values read
556 * and use them as offsets to be subtracted from the raw values in order
557 * to report stats that count from zero. In the process, we also manage
558 * the potential roll-over.
559 **/
560 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
561 bool offset_loaded, u64 *offset, u64 *stat)
562 {
563 u64 new_data;
564
565 if (hw->device_id == I40E_DEV_ID_QEMU) {
566 new_data = rd32(hw, loreg);
567 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
568 } else {
569 new_data = rd64(hw, loreg);
570 }
571 if (!offset_loaded)
572 *offset = new_data;
573 if (likely(new_data >= *offset))
574 *stat = new_data - *offset;
575 else
576 *stat = (new_data + BIT_ULL(48)) - *offset;
577 *stat &= 0xFFFFFFFFFFFFULL;
578 }
579
580 /**
581 * i40e_stat_update32 - read and update a 32 bit stat from the chip
582 * @hw: ptr to the hardware info
583 * @reg: the hw reg to read
584 * @offset_loaded: has the initial offset been loaded yet
585 * @offset: ptr to current offset value
586 * @stat: ptr to the stat
587 **/
588 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
589 bool offset_loaded, u64 *offset, u64 *stat)
590 {
591 u32 new_data;
592
593 new_data = rd32(hw, reg);
594 if (!offset_loaded)
595 *offset = new_data;
596 if (likely(new_data >= *offset))
597 *stat = (u32)(new_data - *offset);
598 else
599 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
600 }
601
602 /**
603 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
604 * @vsi: the VSI to be updated
605 **/
606 void i40e_update_eth_stats(struct i40e_vsi *vsi)
607 {
608 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
609 struct i40e_pf *pf = vsi->back;
610 struct i40e_hw *hw = &pf->hw;
611 struct i40e_eth_stats *oes;
612 struct i40e_eth_stats *es; /* device's eth stats */
613
614 es = &vsi->eth_stats;
615 oes = &vsi->eth_stats_offsets;
616
617 /* Gather up the stats that the hw collects */
618 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
619 vsi->stat_offsets_loaded,
620 &oes->tx_errors, &es->tx_errors);
621 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
622 vsi->stat_offsets_loaded,
623 &oes->rx_discards, &es->rx_discards);
624 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
625 vsi->stat_offsets_loaded,
626 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
627 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
628 vsi->stat_offsets_loaded,
629 &oes->tx_errors, &es->tx_errors);
630
631 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
632 I40E_GLV_GORCL(stat_idx),
633 vsi->stat_offsets_loaded,
634 &oes->rx_bytes, &es->rx_bytes);
635 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
636 I40E_GLV_UPRCL(stat_idx),
637 vsi->stat_offsets_loaded,
638 &oes->rx_unicast, &es->rx_unicast);
639 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
640 I40E_GLV_MPRCL(stat_idx),
641 vsi->stat_offsets_loaded,
642 &oes->rx_multicast, &es->rx_multicast);
643 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
644 I40E_GLV_BPRCL(stat_idx),
645 vsi->stat_offsets_loaded,
646 &oes->rx_broadcast, &es->rx_broadcast);
647
648 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
649 I40E_GLV_GOTCL(stat_idx),
650 vsi->stat_offsets_loaded,
651 &oes->tx_bytes, &es->tx_bytes);
652 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
653 I40E_GLV_UPTCL(stat_idx),
654 vsi->stat_offsets_loaded,
655 &oes->tx_unicast, &es->tx_unicast);
656 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
657 I40E_GLV_MPTCL(stat_idx),
658 vsi->stat_offsets_loaded,
659 &oes->tx_multicast, &es->tx_multicast);
660 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
661 I40E_GLV_BPTCL(stat_idx),
662 vsi->stat_offsets_loaded,
663 &oes->tx_broadcast, &es->tx_broadcast);
664 vsi->stat_offsets_loaded = true;
665 }
666
667 /**
668 * i40e_update_veb_stats - Update Switch component statistics
669 * @veb: the VEB being updated
670 **/
671 static void i40e_update_veb_stats(struct i40e_veb *veb)
672 {
673 struct i40e_pf *pf = veb->pf;
674 struct i40e_hw *hw = &pf->hw;
675 struct i40e_eth_stats *oes;
676 struct i40e_eth_stats *es; /* device's eth stats */
677 struct i40e_veb_tc_stats *veb_oes;
678 struct i40e_veb_tc_stats *veb_es;
679 int i, idx = 0;
680
681 idx = veb->stats_idx;
682 es = &veb->stats;
683 oes = &veb->stats_offsets;
684 veb_es = &veb->tc_stats;
685 veb_oes = &veb->tc_stats_offsets;
686
687 /* Gather up the stats that the hw collects */
688 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
689 veb->stat_offsets_loaded,
690 &oes->tx_discards, &es->tx_discards);
691 if (hw->revision_id > 0)
692 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
693 veb->stat_offsets_loaded,
694 &oes->rx_unknown_protocol,
695 &es->rx_unknown_protocol);
696 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
697 veb->stat_offsets_loaded,
698 &oes->rx_bytes, &es->rx_bytes);
699 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
700 veb->stat_offsets_loaded,
701 &oes->rx_unicast, &es->rx_unicast);
702 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
703 veb->stat_offsets_loaded,
704 &oes->rx_multicast, &es->rx_multicast);
705 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
706 veb->stat_offsets_loaded,
707 &oes->rx_broadcast, &es->rx_broadcast);
708
709 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
710 veb->stat_offsets_loaded,
711 &oes->tx_bytes, &es->tx_bytes);
712 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
713 veb->stat_offsets_loaded,
714 &oes->tx_unicast, &es->tx_unicast);
715 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
716 veb->stat_offsets_loaded,
717 &oes->tx_multicast, &es->tx_multicast);
718 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
719 veb->stat_offsets_loaded,
720 &oes->tx_broadcast, &es->tx_broadcast);
721 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
722 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
723 I40E_GLVEBTC_RPCL(i, idx),
724 veb->stat_offsets_loaded,
725 &veb_oes->tc_rx_packets[i],
726 &veb_es->tc_rx_packets[i]);
727 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
728 I40E_GLVEBTC_RBCL(i, idx),
729 veb->stat_offsets_loaded,
730 &veb_oes->tc_rx_bytes[i],
731 &veb_es->tc_rx_bytes[i]);
732 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
733 I40E_GLVEBTC_TPCL(i, idx),
734 veb->stat_offsets_loaded,
735 &veb_oes->tc_tx_packets[i],
736 &veb_es->tc_tx_packets[i]);
737 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
738 I40E_GLVEBTC_TBCL(i, idx),
739 veb->stat_offsets_loaded,
740 &veb_oes->tc_tx_bytes[i],
741 &veb_es->tc_tx_bytes[i]);
742 }
743 veb->stat_offsets_loaded = true;
744 }
745
746 /**
747 * i40e_update_vsi_stats - Update the vsi statistics counters.
748 * @vsi: the VSI to be updated
749 *
750 * There are a few instances where we store the same stat in a
751 * couple of different structs. This is partly because we have
752 * the netdev stats that need to be filled out, which is slightly
753 * different from the "eth_stats" defined by the chip and used in
754 * VF communications. We sort it out here.
755 **/
756 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
757 {
758 struct i40e_pf *pf = vsi->back;
759 struct rtnl_link_stats64 *ons;
760 struct rtnl_link_stats64 *ns; /* netdev stats */
761 struct i40e_eth_stats *oes;
762 struct i40e_eth_stats *es; /* device's eth stats */
763 u32 tx_restart, tx_busy;
764 struct i40e_ring *p;
765 u32 rx_page, rx_buf;
766 u64 bytes, packets;
767 unsigned int start;
768 u64 tx_linearize;
769 u64 tx_force_wb;
770 u64 rx_p, rx_b;
771 u64 tx_p, tx_b;
772 u16 q;
773
774 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
775 test_bit(__I40E_CONFIG_BUSY, pf->state))
776 return;
777
778 ns = i40e_get_vsi_stats_struct(vsi);
779 ons = &vsi->net_stats_offsets;
780 es = &vsi->eth_stats;
781 oes = &vsi->eth_stats_offsets;
782
783 /* Gather up the netdev and vsi stats that the driver collects
784 * on the fly during packet processing
785 */
786 rx_b = rx_p = 0;
787 tx_b = tx_p = 0;
788 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
789 rx_page = 0;
790 rx_buf = 0;
791 rcu_read_lock();
792 for (q = 0; q < vsi->num_queue_pairs; q++) {
793 /* locate Tx ring */
794 p = ACCESS_ONCE(vsi->tx_rings[q]);
795
796 do {
797 start = u64_stats_fetch_begin_irq(&p->syncp);
798 packets = p->stats.packets;
799 bytes = p->stats.bytes;
800 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
801 tx_b += bytes;
802 tx_p += packets;
803 tx_restart += p->tx_stats.restart_queue;
804 tx_busy += p->tx_stats.tx_busy;
805 tx_linearize += p->tx_stats.tx_linearize;
806 tx_force_wb += p->tx_stats.tx_force_wb;
807
808 /* Rx queue is part of the same block as Tx queue */
809 p = &p[1];
810 do {
811 start = u64_stats_fetch_begin_irq(&p->syncp);
812 packets = p->stats.packets;
813 bytes = p->stats.bytes;
814 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
815 rx_b += bytes;
816 rx_p += packets;
817 rx_buf += p->rx_stats.alloc_buff_failed;
818 rx_page += p->rx_stats.alloc_page_failed;
819 }
820 rcu_read_unlock();
821 vsi->tx_restart = tx_restart;
822 vsi->tx_busy = tx_busy;
823 vsi->tx_linearize = tx_linearize;
824 vsi->tx_force_wb = tx_force_wb;
825 vsi->rx_page_failed = rx_page;
826 vsi->rx_buf_failed = rx_buf;
827
828 ns->rx_packets = rx_p;
829 ns->rx_bytes = rx_b;
830 ns->tx_packets = tx_p;
831 ns->tx_bytes = tx_b;
832
833 /* update netdev stats from eth stats */
834 i40e_update_eth_stats(vsi);
835 ons->tx_errors = oes->tx_errors;
836 ns->tx_errors = es->tx_errors;
837 ons->multicast = oes->rx_multicast;
838 ns->multicast = es->rx_multicast;
839 ons->rx_dropped = oes->rx_discards;
840 ns->rx_dropped = es->rx_discards;
841 ons->tx_dropped = oes->tx_discards;
842 ns->tx_dropped = es->tx_discards;
843
844 /* pull in a couple PF stats if this is the main vsi */
845 if (vsi == pf->vsi[pf->lan_vsi]) {
846 ns->rx_crc_errors = pf->stats.crc_errors;
847 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
848 ns->rx_length_errors = pf->stats.rx_length_errors;
849 }
850 }
851
852 /**
853 * i40e_update_pf_stats - Update the PF statistics counters.
854 * @pf: the PF to be updated
855 **/
856 static void i40e_update_pf_stats(struct i40e_pf *pf)
857 {
858 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
859 struct i40e_hw_port_stats *nsd = &pf->stats;
860 struct i40e_hw *hw = &pf->hw;
861 u32 val;
862 int i;
863
864 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
865 I40E_GLPRT_GORCL(hw->port),
866 pf->stat_offsets_loaded,
867 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
868 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
869 I40E_GLPRT_GOTCL(hw->port),
870 pf->stat_offsets_loaded,
871 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
872 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
873 pf->stat_offsets_loaded,
874 &osd->eth.rx_discards,
875 &nsd->eth.rx_discards);
876 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
877 I40E_GLPRT_UPRCL(hw->port),
878 pf->stat_offsets_loaded,
879 &osd->eth.rx_unicast,
880 &nsd->eth.rx_unicast);
881 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
882 I40E_GLPRT_MPRCL(hw->port),
883 pf->stat_offsets_loaded,
884 &osd->eth.rx_multicast,
885 &nsd->eth.rx_multicast);
886 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
887 I40E_GLPRT_BPRCL(hw->port),
888 pf->stat_offsets_loaded,
889 &osd->eth.rx_broadcast,
890 &nsd->eth.rx_broadcast);
891 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
892 I40E_GLPRT_UPTCL(hw->port),
893 pf->stat_offsets_loaded,
894 &osd->eth.tx_unicast,
895 &nsd->eth.tx_unicast);
896 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
897 I40E_GLPRT_MPTCL(hw->port),
898 pf->stat_offsets_loaded,
899 &osd->eth.tx_multicast,
900 &nsd->eth.tx_multicast);
901 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
902 I40E_GLPRT_BPTCL(hw->port),
903 pf->stat_offsets_loaded,
904 &osd->eth.tx_broadcast,
905 &nsd->eth.tx_broadcast);
906
907 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
908 pf->stat_offsets_loaded,
909 &osd->tx_dropped_link_down,
910 &nsd->tx_dropped_link_down);
911
912 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
913 pf->stat_offsets_loaded,
914 &osd->crc_errors, &nsd->crc_errors);
915
916 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->illegal_bytes, &nsd->illegal_bytes);
919
920 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
921 pf->stat_offsets_loaded,
922 &osd->mac_local_faults,
923 &nsd->mac_local_faults);
924 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->mac_remote_faults,
927 &nsd->mac_remote_faults);
928
929 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
930 pf->stat_offsets_loaded,
931 &osd->rx_length_errors,
932 &nsd->rx_length_errors);
933
934 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
935 pf->stat_offsets_loaded,
936 &osd->link_xon_rx, &nsd->link_xon_rx);
937 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
938 pf->stat_offsets_loaded,
939 &osd->link_xon_tx, &nsd->link_xon_tx);
940 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
941 pf->stat_offsets_loaded,
942 &osd->link_xoff_rx, &nsd->link_xoff_rx);
943 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->link_xoff_tx, &nsd->link_xoff_tx);
946
947 for (i = 0; i < 8; i++) {
948 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
949 pf->stat_offsets_loaded,
950 &osd->priority_xoff_rx[i],
951 &nsd->priority_xoff_rx[i]);
952 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
953 pf->stat_offsets_loaded,
954 &osd->priority_xon_rx[i],
955 &nsd->priority_xon_rx[i]);
956 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
957 pf->stat_offsets_loaded,
958 &osd->priority_xon_tx[i],
959 &nsd->priority_xon_tx[i]);
960 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
961 pf->stat_offsets_loaded,
962 &osd->priority_xoff_tx[i],
963 &nsd->priority_xoff_tx[i]);
964 i40e_stat_update32(hw,
965 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
966 pf->stat_offsets_loaded,
967 &osd->priority_xon_2_xoff[i],
968 &nsd->priority_xon_2_xoff[i]);
969 }
970
971 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
972 I40E_GLPRT_PRC64L(hw->port),
973 pf->stat_offsets_loaded,
974 &osd->rx_size_64, &nsd->rx_size_64);
975 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
976 I40E_GLPRT_PRC127L(hw->port),
977 pf->stat_offsets_loaded,
978 &osd->rx_size_127, &nsd->rx_size_127);
979 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
980 I40E_GLPRT_PRC255L(hw->port),
981 pf->stat_offsets_loaded,
982 &osd->rx_size_255, &nsd->rx_size_255);
983 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
984 I40E_GLPRT_PRC511L(hw->port),
985 pf->stat_offsets_loaded,
986 &osd->rx_size_511, &nsd->rx_size_511);
987 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
988 I40E_GLPRT_PRC1023L(hw->port),
989 pf->stat_offsets_loaded,
990 &osd->rx_size_1023, &nsd->rx_size_1023);
991 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
992 I40E_GLPRT_PRC1522L(hw->port),
993 pf->stat_offsets_loaded,
994 &osd->rx_size_1522, &nsd->rx_size_1522);
995 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
996 I40E_GLPRT_PRC9522L(hw->port),
997 pf->stat_offsets_loaded,
998 &osd->rx_size_big, &nsd->rx_size_big);
999
1000 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1001 I40E_GLPRT_PTC64L(hw->port),
1002 pf->stat_offsets_loaded,
1003 &osd->tx_size_64, &nsd->tx_size_64);
1004 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1005 I40E_GLPRT_PTC127L(hw->port),
1006 pf->stat_offsets_loaded,
1007 &osd->tx_size_127, &nsd->tx_size_127);
1008 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1009 I40E_GLPRT_PTC255L(hw->port),
1010 pf->stat_offsets_loaded,
1011 &osd->tx_size_255, &nsd->tx_size_255);
1012 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1013 I40E_GLPRT_PTC511L(hw->port),
1014 pf->stat_offsets_loaded,
1015 &osd->tx_size_511, &nsd->tx_size_511);
1016 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1017 I40E_GLPRT_PTC1023L(hw->port),
1018 pf->stat_offsets_loaded,
1019 &osd->tx_size_1023, &nsd->tx_size_1023);
1020 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1021 I40E_GLPRT_PTC1522L(hw->port),
1022 pf->stat_offsets_loaded,
1023 &osd->tx_size_1522, &nsd->tx_size_1522);
1024 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1025 I40E_GLPRT_PTC9522L(hw->port),
1026 pf->stat_offsets_loaded,
1027 &osd->tx_size_big, &nsd->tx_size_big);
1028
1029 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1030 pf->stat_offsets_loaded,
1031 &osd->rx_undersize, &nsd->rx_undersize);
1032 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1033 pf->stat_offsets_loaded,
1034 &osd->rx_fragments, &nsd->rx_fragments);
1035 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->rx_oversize, &nsd->rx_oversize);
1038 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1039 pf->stat_offsets_loaded,
1040 &osd->rx_jabber, &nsd->rx_jabber);
1041
1042 /* FDIR stats */
1043 i40e_stat_update32(hw,
1044 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1045 pf->stat_offsets_loaded,
1046 &osd->fd_atr_match, &nsd->fd_atr_match);
1047 i40e_stat_update32(hw,
1048 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1049 pf->stat_offsets_loaded,
1050 &osd->fd_sb_match, &nsd->fd_sb_match);
1051 i40e_stat_update32(hw,
1052 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1053 pf->stat_offsets_loaded,
1054 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1055
1056 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1057 nsd->tx_lpi_status =
1058 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1059 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1060 nsd->rx_lpi_status =
1061 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1062 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1063 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1064 pf->stat_offsets_loaded,
1065 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1066 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1067 pf->stat_offsets_loaded,
1068 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1069
1070 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1071 !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED))
1072 nsd->fd_sb_status = true;
1073 else
1074 nsd->fd_sb_status = false;
1075
1076 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1077 !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
1078 nsd->fd_atr_status = true;
1079 else
1080 nsd->fd_atr_status = false;
1081
1082 pf->stat_offsets_loaded = true;
1083 }
1084
1085 /**
1086 * i40e_update_stats - Update the various statistics counters.
1087 * @vsi: the VSI to be updated
1088 *
1089 * Update the various stats for this VSI and its related entities.
1090 **/
1091 void i40e_update_stats(struct i40e_vsi *vsi)
1092 {
1093 struct i40e_pf *pf = vsi->back;
1094
1095 if (vsi == pf->vsi[pf->lan_vsi])
1096 i40e_update_pf_stats(pf);
1097
1098 i40e_update_vsi_stats(vsi);
1099 }
1100
1101 /**
1102 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1103 * @vsi: the VSI to be searched
1104 * @macaddr: the MAC address
1105 * @vlan: the vlan
1106 *
1107 * Returns ptr to the filter object or NULL
1108 **/
1109 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1110 const u8 *macaddr, s16 vlan)
1111 {
1112 struct i40e_mac_filter *f;
1113 u64 key;
1114
1115 if (!vsi || !macaddr)
1116 return NULL;
1117
1118 key = i40e_addr_to_hkey(macaddr);
1119 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1120 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1121 (vlan == f->vlan))
1122 return f;
1123 }
1124 return NULL;
1125 }
1126
1127 /**
1128 * i40e_find_mac - Find a mac addr in the macvlan filters list
1129 * @vsi: the VSI to be searched
1130 * @macaddr: the MAC address we are searching for
1131 *
1132 * Returns the first filter with the provided MAC address or NULL if
1133 * MAC address was not found
1134 **/
1135 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1136 {
1137 struct i40e_mac_filter *f;
1138 u64 key;
1139
1140 if (!vsi || !macaddr)
1141 return NULL;
1142
1143 key = i40e_addr_to_hkey(macaddr);
1144 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1145 if ((ether_addr_equal(macaddr, f->macaddr)))
1146 return f;
1147 }
1148 return NULL;
1149 }
1150
1151 /**
1152 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1153 * @vsi: the VSI to be searched
1154 *
1155 * Returns true if VSI is in vlan mode or false otherwise
1156 **/
1157 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1158 {
1159 /* If we have a PVID, always operate in VLAN mode */
1160 if (vsi->info.pvid)
1161 return true;
1162
1163 /* We need to operate in VLAN mode whenever we have any filters with
1164 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1165 * time, incurring search cost repeatedly. However, we can notice two
1166 * things:
1167 *
1168 * 1) the only place where we can gain a VLAN filter is in
1169 * i40e_add_filter.
1170 *
1171 * 2) the only place where filters are actually removed is in
1172 * i40e_sync_filters_subtask.
1173 *
1174 * Thus, we can simply use a boolean value, has_vlan_filters which we
1175 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1176 * we have to perform the full search after deleting filters in
1177 * i40e_sync_filters_subtask, but we already have to search
1178 * filters here and can perform the check at the same time. This
1179 * results in avoiding embedding a loop for VLAN mode inside another
1180 * loop over all the filters, and should maintain correctness as noted
1181 * above.
1182 */
1183 return vsi->has_vlan_filter;
1184 }
1185
1186 /**
1187 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1188 * @vsi: the VSI to configure
1189 * @tmp_add_list: list of filters ready to be added
1190 * @tmp_del_list: list of filters ready to be deleted
1191 * @vlan_filters: the number of active VLAN filters
1192 *
1193 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1194 * behave as expected. If we have any active VLAN filters remaining or about
1195 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1196 * so that they only match against untagged traffic. If we no longer have any
1197 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1198 * so that they match against both tagged and untagged traffic. In this way,
1199 * we ensure that we correctly receive the desired traffic. This ensures that
1200 * when we have an active VLAN we will receive only untagged traffic and
1201 * traffic matching active VLANs. If we have no active VLANs then we will
1202 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1203 *
1204 * Finally, in a similar fashion, this function also corrects filters when
1205 * there is an active PVID assigned to this VSI.
1206 *
1207 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1208 *
1209 * This function is only expected to be called from within
1210 * i40e_sync_vsi_filters.
1211 *
1212 * NOTE: This function expects to be called while under the
1213 * mac_filter_hash_lock
1214 */
1215 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1216 struct hlist_head *tmp_add_list,
1217 struct hlist_head *tmp_del_list,
1218 int vlan_filters)
1219 {
1220 s16 pvid = le16_to_cpu(vsi->info.pvid);
1221 struct i40e_mac_filter *f, *add_head;
1222 struct i40e_new_mac_filter *new;
1223 struct hlist_node *h;
1224 int bkt, new_vlan;
1225
1226 /* To determine if a particular filter needs to be replaced we
1227 * have the three following conditions:
1228 *
1229 * a) if we have a PVID assigned, then all filters which are
1230 * not marked as VLAN=PVID must be replaced with filters that
1231 * are.
1232 * b) otherwise, if we have any active VLANS, all filters
1233 * which are marked as VLAN=-1 must be replaced with
1234 * filters marked as VLAN=0
1235 * c) finally, if we do not have any active VLANS, all filters
1236 * which are marked as VLAN=0 must be replaced with filters
1237 * marked as VLAN=-1
1238 */
1239
1240 /* Update the filters about to be added in place */
1241 hlist_for_each_entry(new, tmp_add_list, hlist) {
1242 if (pvid && new->f->vlan != pvid)
1243 new->f->vlan = pvid;
1244 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1245 new->f->vlan = 0;
1246 else if (!vlan_filters && new->f->vlan == 0)
1247 new->f->vlan = I40E_VLAN_ANY;
1248 }
1249
1250 /* Update the remaining active filters */
1251 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1252 /* Combine the checks for whether a filter needs to be changed
1253 * and then determine the new VLAN inside the if block, in
1254 * order to avoid duplicating code for adding the new filter
1255 * then deleting the old filter.
1256 */
1257 if ((pvid && f->vlan != pvid) ||
1258 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1259 (!vlan_filters && f->vlan == 0)) {
1260 /* Determine the new vlan we will be adding */
1261 if (pvid)
1262 new_vlan = pvid;
1263 else if (vlan_filters)
1264 new_vlan = 0;
1265 else
1266 new_vlan = I40E_VLAN_ANY;
1267
1268 /* Create the new filter */
1269 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1270 if (!add_head)
1271 return -ENOMEM;
1272
1273 /* Create a temporary i40e_new_mac_filter */
1274 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1275 if (!new)
1276 return -ENOMEM;
1277
1278 new->f = add_head;
1279 new->state = add_head->state;
1280
1281 /* Add the new filter to the tmp list */
1282 hlist_add_head(&new->hlist, tmp_add_list);
1283
1284 /* Put the original filter into the delete list */
1285 f->state = I40E_FILTER_REMOVE;
1286 hash_del(&f->hlist);
1287 hlist_add_head(&f->hlist, tmp_del_list);
1288 }
1289 }
1290
1291 vsi->has_vlan_filter = !!vlan_filters;
1292
1293 return 0;
1294 }
1295
1296 /**
1297 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1298 * @vsi: the PF Main VSI - inappropriate for any other VSI
1299 * @macaddr: the MAC address
1300 *
1301 * Remove whatever filter the firmware set up so the driver can manage
1302 * its own filtering intelligently.
1303 **/
1304 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1305 {
1306 struct i40e_aqc_remove_macvlan_element_data element;
1307 struct i40e_pf *pf = vsi->back;
1308
1309 /* Only appropriate for the PF main VSI */
1310 if (vsi->type != I40E_VSI_MAIN)
1311 return;
1312
1313 memset(&element, 0, sizeof(element));
1314 ether_addr_copy(element.mac_addr, macaddr);
1315 element.vlan_tag = 0;
1316 /* Ignore error returns, some firmware does it this way... */
1317 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1318 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1319
1320 memset(&element, 0, sizeof(element));
1321 ether_addr_copy(element.mac_addr, macaddr);
1322 element.vlan_tag = 0;
1323 /* ...and some firmware does it this way. */
1324 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1325 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1326 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1327 }
1328
1329 /**
1330 * i40e_add_filter - Add a mac/vlan filter to the VSI
1331 * @vsi: the VSI to be searched
1332 * @macaddr: the MAC address
1333 * @vlan: the vlan
1334 *
1335 * Returns ptr to the filter object or NULL when no memory available.
1336 *
1337 * NOTE: This function is expected to be called with mac_filter_hash_lock
1338 * being held.
1339 **/
1340 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1341 const u8 *macaddr, s16 vlan)
1342 {
1343 struct i40e_mac_filter *f;
1344 u64 key;
1345
1346 if (!vsi || !macaddr)
1347 return NULL;
1348
1349 f = i40e_find_filter(vsi, macaddr, vlan);
1350 if (!f) {
1351 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1352 if (!f)
1353 return NULL;
1354
1355 /* Update the boolean indicating if we need to function in
1356 * VLAN mode.
1357 */
1358 if (vlan >= 0)
1359 vsi->has_vlan_filter = true;
1360
1361 ether_addr_copy(f->macaddr, macaddr);
1362 f->vlan = vlan;
1363 /* If we're in overflow promisc mode, set the state directly
1364 * to failed, so we don't bother to try sending the filter
1365 * to the hardware.
1366 */
1367 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
1368 f->state = I40E_FILTER_FAILED;
1369 else
1370 f->state = I40E_FILTER_NEW;
1371 INIT_HLIST_NODE(&f->hlist);
1372
1373 key = i40e_addr_to_hkey(macaddr);
1374 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1375
1376 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1377 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1378 }
1379
1380 /* If we're asked to add a filter that has been marked for removal, it
1381 * is safe to simply restore it to active state. __i40e_del_filter
1382 * will have simply deleted any filters which were previously marked
1383 * NEW or FAILED, so if it is currently marked REMOVE it must have
1384 * previously been ACTIVE. Since we haven't yet run the sync filters
1385 * task, just restore this filter to the ACTIVE state so that the
1386 * sync task leaves it in place
1387 */
1388 if (f->state == I40E_FILTER_REMOVE)
1389 f->state = I40E_FILTER_ACTIVE;
1390
1391 return f;
1392 }
1393
1394 /**
1395 * __i40e_del_filter - Remove a specific filter from the VSI
1396 * @vsi: VSI to remove from
1397 * @f: the filter to remove from the list
1398 *
1399 * This function should be called instead of i40e_del_filter only if you know
1400 * the exact filter you will remove already, such as via i40e_find_filter or
1401 * i40e_find_mac.
1402 *
1403 * NOTE: This function is expected to be called with mac_filter_hash_lock
1404 * being held.
1405 * ANOTHER NOTE: This function MUST be called from within the context of
1406 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1407 * instead of list_for_each_entry().
1408 **/
1409 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1410 {
1411 if (!f)
1412 return;
1413
1414 /* If the filter was never added to firmware then we can just delete it
1415 * directly and we don't want to set the status to remove or else an
1416 * admin queue command will unnecessarily fire.
1417 */
1418 if ((f->state == I40E_FILTER_FAILED) ||
1419 (f->state == I40E_FILTER_NEW)) {
1420 hash_del(&f->hlist);
1421 kfree(f);
1422 } else {
1423 f->state = I40E_FILTER_REMOVE;
1424 }
1425
1426 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1427 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1428 }
1429
1430 /**
1431 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1432 * @vsi: the VSI to be searched
1433 * @macaddr: the MAC address
1434 * @vlan: the VLAN
1435 *
1436 * NOTE: This function is expected to be called with mac_filter_hash_lock
1437 * being held.
1438 * ANOTHER NOTE: This function MUST be called from within the context of
1439 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1440 * instead of list_for_each_entry().
1441 **/
1442 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1443 {
1444 struct i40e_mac_filter *f;
1445
1446 if (!vsi || !macaddr)
1447 return;
1448
1449 f = i40e_find_filter(vsi, macaddr, vlan);
1450 __i40e_del_filter(vsi, f);
1451 }
1452
1453 /**
1454 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1455 * @vsi: the VSI to be searched
1456 * @macaddr: the mac address to be filtered
1457 *
1458 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1459 * go through all the macvlan filters and add a macvlan filter for each
1460 * unique vlan that already exists. If a PVID has been assigned, instead only
1461 * add the macaddr to that VLAN.
1462 *
1463 * Returns last filter added on success, else NULL
1464 **/
1465 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1466 const u8 *macaddr)
1467 {
1468 struct i40e_mac_filter *f, *add = NULL;
1469 struct hlist_node *h;
1470 int bkt;
1471
1472 if (vsi->info.pvid)
1473 return i40e_add_filter(vsi, macaddr,
1474 le16_to_cpu(vsi->info.pvid));
1475
1476 if (!i40e_is_vsi_in_vlan(vsi))
1477 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1478
1479 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1480 if (f->state == I40E_FILTER_REMOVE)
1481 continue;
1482 add = i40e_add_filter(vsi, macaddr, f->vlan);
1483 if (!add)
1484 return NULL;
1485 }
1486
1487 return add;
1488 }
1489
1490 /**
1491 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1492 * @vsi: the VSI to be searched
1493 * @macaddr: the mac address to be removed
1494 *
1495 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1496 * associated with.
1497 *
1498 * Returns 0 for success, or error
1499 **/
1500 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1501 {
1502 struct i40e_mac_filter *f;
1503 struct hlist_node *h;
1504 bool found = false;
1505 int bkt;
1506
1507 WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
1508 "Missing mac_filter_hash_lock\n");
1509 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1510 if (ether_addr_equal(macaddr, f->macaddr)) {
1511 __i40e_del_filter(vsi, f);
1512 found = true;
1513 }
1514 }
1515
1516 if (found)
1517 return 0;
1518 else
1519 return -ENOENT;
1520 }
1521
1522 /**
1523 * i40e_set_mac - NDO callback to set mac address
1524 * @netdev: network interface device structure
1525 * @p: pointer to an address structure
1526 *
1527 * Returns 0 on success, negative on failure
1528 **/
1529 static int i40e_set_mac(struct net_device *netdev, void *p)
1530 {
1531 struct i40e_netdev_priv *np = netdev_priv(netdev);
1532 struct i40e_vsi *vsi = np->vsi;
1533 struct i40e_pf *pf = vsi->back;
1534 struct i40e_hw *hw = &pf->hw;
1535 struct sockaddr *addr = p;
1536
1537 if (!is_valid_ether_addr(addr->sa_data))
1538 return -EADDRNOTAVAIL;
1539
1540 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1541 netdev_info(netdev, "already using mac address %pM\n",
1542 addr->sa_data);
1543 return 0;
1544 }
1545
1546 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
1547 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
1548 return -EADDRNOTAVAIL;
1549
1550 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1551 netdev_info(netdev, "returning to hw mac address %pM\n",
1552 hw->mac.addr);
1553 else
1554 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1555
1556 /* Copy the address first, so that we avoid a possible race with
1557 * .set_rx_mode(). If we copy after changing the address in the filter
1558 * list, we might open ourselves to a narrow race window where
1559 * .set_rx_mode could delete our dev_addr filter and prevent traffic
1560 * from passing.
1561 */
1562 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1563
1564 spin_lock_bh(&vsi->mac_filter_hash_lock);
1565 i40e_del_mac_filter(vsi, netdev->dev_addr);
1566 i40e_add_mac_filter(vsi, addr->sa_data);
1567 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1568 if (vsi->type == I40E_VSI_MAIN) {
1569 i40e_status ret;
1570
1571 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1572 I40E_AQC_WRITE_TYPE_LAA_WOL,
1573 addr->sa_data, NULL);
1574 if (ret)
1575 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1576 i40e_stat_str(hw, ret),
1577 i40e_aq_str(hw, hw->aq.asq_last_status));
1578 }
1579
1580 /* schedule our worker thread which will take care of
1581 * applying the new filter changes
1582 */
1583 i40e_service_event_schedule(vsi->back);
1584 return 0;
1585 }
1586
1587 /**
1588 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1589 * @vsi: the VSI being setup
1590 * @ctxt: VSI context structure
1591 * @enabled_tc: Enabled TCs bitmap
1592 * @is_add: True if called before Add VSI
1593 *
1594 * Setup VSI queue mapping for enabled traffic classes.
1595 **/
1596 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1597 struct i40e_vsi_context *ctxt,
1598 u8 enabled_tc,
1599 bool is_add)
1600 {
1601 struct i40e_pf *pf = vsi->back;
1602 u16 sections = 0;
1603 u8 netdev_tc = 0;
1604 u16 numtc = 0;
1605 u16 qcount;
1606 u8 offset;
1607 u16 qmap;
1608 int i;
1609 u16 num_tc_qps = 0;
1610
1611 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1612 offset = 0;
1613
1614 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1615 /* Find numtc from enabled TC bitmap */
1616 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1617 if (enabled_tc & BIT(i)) /* TC is enabled */
1618 numtc++;
1619 }
1620 if (!numtc) {
1621 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1622 numtc = 1;
1623 }
1624 } else {
1625 /* At least TC0 is enabled in case of non-DCB case */
1626 numtc = 1;
1627 }
1628
1629 vsi->tc_config.numtc = numtc;
1630 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1631 /* Number of queues per enabled TC */
1632 qcount = vsi->alloc_queue_pairs;
1633
1634 num_tc_qps = qcount / numtc;
1635 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1636
1637 /* Setup queue offset/count for all TCs for given VSI */
1638 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1639 /* See if the given TC is enabled for the given VSI */
1640 if (vsi->tc_config.enabled_tc & BIT(i)) {
1641 /* TC is enabled */
1642 int pow, num_qps;
1643
1644 switch (vsi->type) {
1645 case I40E_VSI_MAIN:
1646 qcount = min_t(int, pf->alloc_rss_size,
1647 num_tc_qps);
1648 break;
1649 case I40E_VSI_FDIR:
1650 case I40E_VSI_SRIOV:
1651 case I40E_VSI_VMDQ2:
1652 default:
1653 qcount = num_tc_qps;
1654 WARN_ON(i != 0);
1655 break;
1656 }
1657 vsi->tc_config.tc_info[i].qoffset = offset;
1658 vsi->tc_config.tc_info[i].qcount = qcount;
1659
1660 /* find the next higher power-of-2 of num queue pairs */
1661 num_qps = qcount;
1662 pow = 0;
1663 while (num_qps && (BIT_ULL(pow) < qcount)) {
1664 pow++;
1665 num_qps >>= 1;
1666 }
1667
1668 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1669 qmap =
1670 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1671 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1672
1673 offset += qcount;
1674 } else {
1675 /* TC is not enabled so set the offset to
1676 * default queue and allocate one queue
1677 * for the given TC.
1678 */
1679 vsi->tc_config.tc_info[i].qoffset = 0;
1680 vsi->tc_config.tc_info[i].qcount = 1;
1681 vsi->tc_config.tc_info[i].netdev_tc = 0;
1682
1683 qmap = 0;
1684 }
1685 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1686 }
1687
1688 /* Set actual Tx/Rx queue pairs */
1689 vsi->num_queue_pairs = offset;
1690 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1691 if (vsi->req_queue_pairs > 0)
1692 vsi->num_queue_pairs = vsi->req_queue_pairs;
1693 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1694 vsi->num_queue_pairs = pf->num_lan_msix;
1695 }
1696
1697 /* Scheduler section valid can only be set for ADD VSI */
1698 if (is_add) {
1699 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1700
1701 ctxt->info.up_enable_bits = enabled_tc;
1702 }
1703 if (vsi->type == I40E_VSI_SRIOV) {
1704 ctxt->info.mapping_flags |=
1705 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1706 for (i = 0; i < vsi->num_queue_pairs; i++)
1707 ctxt->info.queue_mapping[i] =
1708 cpu_to_le16(vsi->base_queue + i);
1709 } else {
1710 ctxt->info.mapping_flags |=
1711 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1712 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1713 }
1714 ctxt->info.valid_sections |= cpu_to_le16(sections);
1715 }
1716
1717 /**
1718 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1719 * @netdev: the netdevice
1720 * @addr: address to add
1721 *
1722 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1723 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1724 */
1725 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1726 {
1727 struct i40e_netdev_priv *np = netdev_priv(netdev);
1728 struct i40e_vsi *vsi = np->vsi;
1729
1730 if (i40e_add_mac_filter(vsi, addr))
1731 return 0;
1732 else
1733 return -ENOMEM;
1734 }
1735
1736 /**
1737 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1738 * @netdev: the netdevice
1739 * @addr: address to add
1740 *
1741 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1742 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1743 */
1744 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1745 {
1746 struct i40e_netdev_priv *np = netdev_priv(netdev);
1747 struct i40e_vsi *vsi = np->vsi;
1748
1749 /* Under some circumstances, we might receive a request to delete
1750 * our own device address from our uc list. Because we store the
1751 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1752 * such requests and not delete our device address from this list.
1753 */
1754 if (ether_addr_equal(addr, netdev->dev_addr))
1755 return 0;
1756
1757 i40e_del_mac_filter(vsi, addr);
1758
1759 return 0;
1760 }
1761
1762 /**
1763 * i40e_set_rx_mode - NDO callback to set the netdev filters
1764 * @netdev: network interface device structure
1765 **/
1766 static void i40e_set_rx_mode(struct net_device *netdev)
1767 {
1768 struct i40e_netdev_priv *np = netdev_priv(netdev);
1769 struct i40e_vsi *vsi = np->vsi;
1770
1771 spin_lock_bh(&vsi->mac_filter_hash_lock);
1772
1773 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1774 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1775
1776 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1777
1778 /* check for other flag changes */
1779 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1780 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1781 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1782 }
1783
1784 /* schedule our worker thread which will take care of
1785 * applying the new filter changes
1786 */
1787 i40e_service_event_schedule(vsi->back);
1788 }
1789
1790 /**
1791 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1792 * @vsi: Pointer to VSI struct
1793 * @from: Pointer to list which contains MAC filter entries - changes to
1794 * those entries needs to be undone.
1795 *
1796 * MAC filter entries from this list were slated for deletion.
1797 **/
1798 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1799 struct hlist_head *from)
1800 {
1801 struct i40e_mac_filter *f;
1802 struct hlist_node *h;
1803
1804 hlist_for_each_entry_safe(f, h, from, hlist) {
1805 u64 key = i40e_addr_to_hkey(f->macaddr);
1806
1807 /* Move the element back into MAC filter list*/
1808 hlist_del(&f->hlist);
1809 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1810 }
1811 }
1812
1813 /**
1814 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1815 * @vsi: Pointer to vsi struct
1816 * @from: Pointer to list which contains MAC filter entries - changes to
1817 * those entries needs to be undone.
1818 *
1819 * MAC filter entries from this list were slated for addition.
1820 **/
1821 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1822 struct hlist_head *from)
1823 {
1824 struct i40e_new_mac_filter *new;
1825 struct hlist_node *h;
1826
1827 hlist_for_each_entry_safe(new, h, from, hlist) {
1828 /* We can simply free the wrapper structure */
1829 hlist_del(&new->hlist);
1830 kfree(new);
1831 }
1832 }
1833
1834 /**
1835 * i40e_next_entry - Get the next non-broadcast filter from a list
1836 * @next: pointer to filter in list
1837 *
1838 * Returns the next non-broadcast filter in the list. Required so that we
1839 * ignore broadcast filters within the list, since these are not handled via
1840 * the normal firmware update path.
1841 */
1842 static
1843 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
1844 {
1845 hlist_for_each_entry_continue(next, hlist) {
1846 if (!is_broadcast_ether_addr(next->f->macaddr))
1847 return next;
1848 }
1849
1850 return NULL;
1851 }
1852
1853 /**
1854 * i40e_update_filter_state - Update filter state based on return data
1855 * from firmware
1856 * @count: Number of filters added
1857 * @add_list: return data from fw
1858 * @head: pointer to first filter in current batch
1859 *
1860 * MAC filter entries from list were slated to be added to device. Returns
1861 * number of successful filters. Note that 0 does NOT mean success!
1862 **/
1863 static int
1864 i40e_update_filter_state(int count,
1865 struct i40e_aqc_add_macvlan_element_data *add_list,
1866 struct i40e_new_mac_filter *add_head)
1867 {
1868 int retval = 0;
1869 int i;
1870
1871 for (i = 0; i < count; i++) {
1872 /* Always check status of each filter. We don't need to check
1873 * the firmware return status because we pre-set the filter
1874 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
1875 * request to the adminq. Thus, if it no longer matches then
1876 * we know the filter is active.
1877 */
1878 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
1879 add_head->state = I40E_FILTER_FAILED;
1880 } else {
1881 add_head->state = I40E_FILTER_ACTIVE;
1882 retval++;
1883 }
1884
1885 add_head = i40e_next_filter(add_head);
1886 if (!add_head)
1887 break;
1888 }
1889
1890 return retval;
1891 }
1892
1893 /**
1894 * i40e_aqc_del_filters - Request firmware to delete a set of filters
1895 * @vsi: ptr to the VSI
1896 * @vsi_name: name to display in messages
1897 * @list: the list of filters to send to firmware
1898 * @num_del: the number of filters to delete
1899 * @retval: Set to -EIO on failure to delete
1900 *
1901 * Send a request to firmware via AdminQ to delete a set of filters. Uses
1902 * *retval instead of a return value so that success does not force ret_val to
1903 * be set to 0. This ensures that a sequence of calls to this function
1904 * preserve the previous value of *retval on successful delete.
1905 */
1906 static
1907 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
1908 struct i40e_aqc_remove_macvlan_element_data *list,
1909 int num_del, int *retval)
1910 {
1911 struct i40e_hw *hw = &vsi->back->hw;
1912 i40e_status aq_ret;
1913 int aq_err;
1914
1915 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
1916 aq_err = hw->aq.asq_last_status;
1917
1918 /* Explicitly ignore and do not report when firmware returns ENOENT */
1919 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1920 *retval = -EIO;
1921 dev_info(&vsi->back->pdev->dev,
1922 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
1923 vsi_name, i40e_stat_str(hw, aq_ret),
1924 i40e_aq_str(hw, aq_err));
1925 }
1926 }
1927
1928 /**
1929 * i40e_aqc_add_filters - Request firmware to add a set of filters
1930 * @vsi: ptr to the VSI
1931 * @vsi_name: name to display in messages
1932 * @list: the list of filters to send to firmware
1933 * @add_head: Position in the add hlist
1934 * @num_add: the number of filters to add
1935 * @promisc_change: set to true on exit if promiscuous mode was forced on
1936 *
1937 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
1938 * promisc_changed to true if the firmware has run out of space for more
1939 * filters.
1940 */
1941 static
1942 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
1943 struct i40e_aqc_add_macvlan_element_data *list,
1944 struct i40e_new_mac_filter *add_head,
1945 int num_add, bool *promisc_changed)
1946 {
1947 struct i40e_hw *hw = &vsi->back->hw;
1948 int aq_err, fcnt;
1949
1950 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
1951 aq_err = hw->aq.asq_last_status;
1952 fcnt = i40e_update_filter_state(num_add, list, add_head);
1953
1954 if (fcnt != num_add) {
1955 *promisc_changed = true;
1956 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
1957 dev_warn(&vsi->back->pdev->dev,
1958 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
1959 i40e_aq_str(hw, aq_err),
1960 vsi_name);
1961 }
1962 }
1963
1964 /**
1965 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
1966 * @vsi: pointer to the VSI
1967 * @f: filter data
1968 *
1969 * This function sets or clears the promiscuous broadcast flags for VLAN
1970 * filters in order to properly receive broadcast frames. Assumes that only
1971 * broadcast filters are passed.
1972 *
1973 * Returns status indicating success or failure;
1974 **/
1975 static i40e_status
1976 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
1977 struct i40e_mac_filter *f)
1978 {
1979 bool enable = f->state == I40E_FILTER_NEW;
1980 struct i40e_hw *hw = &vsi->back->hw;
1981 i40e_status aq_ret;
1982
1983 if (f->vlan == I40E_VLAN_ANY) {
1984 aq_ret = i40e_aq_set_vsi_broadcast(hw,
1985 vsi->seid,
1986 enable,
1987 NULL);
1988 } else {
1989 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
1990 vsi->seid,
1991 enable,
1992 f->vlan,
1993 NULL);
1994 }
1995
1996 if (aq_ret)
1997 dev_warn(&vsi->back->pdev->dev,
1998 "Error %s setting broadcast promiscuous mode on %s\n",
1999 i40e_aq_str(hw, hw->aq.asq_last_status),
2000 vsi_name);
2001
2002 return aq_ret;
2003 }
2004
2005 /**
2006 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2007 * @vsi: ptr to the VSI
2008 *
2009 * Push any outstanding VSI filter changes through the AdminQ.
2010 *
2011 * Returns 0 or error value
2012 **/
2013 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2014 {
2015 struct hlist_head tmp_add_list, tmp_del_list;
2016 struct i40e_mac_filter *f;
2017 struct i40e_new_mac_filter *new, *add_head = NULL;
2018 struct i40e_hw *hw = &vsi->back->hw;
2019 unsigned int failed_filters = 0;
2020 unsigned int vlan_filters = 0;
2021 bool promisc_changed = false;
2022 char vsi_name[16] = "PF";
2023 int filter_list_len = 0;
2024 i40e_status aq_ret = 0;
2025 u32 changed_flags = 0;
2026 struct hlist_node *h;
2027 struct i40e_pf *pf;
2028 int num_add = 0;
2029 int num_del = 0;
2030 int retval = 0;
2031 u16 cmd_flags;
2032 int list_size;
2033 int bkt;
2034
2035 /* empty array typed pointers, kcalloc later */
2036 struct i40e_aqc_add_macvlan_element_data *add_list;
2037 struct i40e_aqc_remove_macvlan_element_data *del_list;
2038
2039 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2040 usleep_range(1000, 2000);
2041 pf = vsi->back;
2042
2043 if (vsi->netdev) {
2044 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2045 vsi->current_netdev_flags = vsi->netdev->flags;
2046 }
2047
2048 INIT_HLIST_HEAD(&tmp_add_list);
2049 INIT_HLIST_HEAD(&tmp_del_list);
2050
2051 if (vsi->type == I40E_VSI_SRIOV)
2052 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2053 else if (vsi->type != I40E_VSI_MAIN)
2054 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2055
2056 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2057 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2058
2059 spin_lock_bh(&vsi->mac_filter_hash_lock);
2060 /* Create a list of filters to delete. */
2061 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2062 if (f->state == I40E_FILTER_REMOVE) {
2063 /* Move the element into temporary del_list */
2064 hash_del(&f->hlist);
2065 hlist_add_head(&f->hlist, &tmp_del_list);
2066
2067 /* Avoid counting removed filters */
2068 continue;
2069 }
2070 if (f->state == I40E_FILTER_NEW) {
2071 /* Create a temporary i40e_new_mac_filter */
2072 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2073 if (!new)
2074 goto err_no_memory_locked;
2075
2076 /* Store pointer to the real filter */
2077 new->f = f;
2078 new->state = f->state;
2079
2080 /* Add it to the hash list */
2081 hlist_add_head(&new->hlist, &tmp_add_list);
2082 }
2083
2084 /* Count the number of active (current and new) VLAN
2085 * filters we have now. Does not count filters which
2086 * are marked for deletion.
2087 */
2088 if (f->vlan > 0)
2089 vlan_filters++;
2090 }
2091
2092 retval = i40e_correct_mac_vlan_filters(vsi,
2093 &tmp_add_list,
2094 &tmp_del_list,
2095 vlan_filters);
2096 if (retval)
2097 goto err_no_memory_locked;
2098
2099 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2100 }
2101
2102 /* Now process 'del_list' outside the lock */
2103 if (!hlist_empty(&tmp_del_list)) {
2104 filter_list_len = hw->aq.asq_buf_size /
2105 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2106 list_size = filter_list_len *
2107 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2108 del_list = kzalloc(list_size, GFP_ATOMIC);
2109 if (!del_list)
2110 goto err_no_memory;
2111
2112 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2113 cmd_flags = 0;
2114
2115 /* handle broadcast filters by updating the broadcast
2116 * promiscuous flag and release filter list.
2117 */
2118 if (is_broadcast_ether_addr(f->macaddr)) {
2119 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2120
2121 hlist_del(&f->hlist);
2122 kfree(f);
2123 continue;
2124 }
2125
2126 /* add to delete list */
2127 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2128 if (f->vlan == I40E_VLAN_ANY) {
2129 del_list[num_del].vlan_tag = 0;
2130 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2131 } else {
2132 del_list[num_del].vlan_tag =
2133 cpu_to_le16((u16)(f->vlan));
2134 }
2135
2136 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2137 del_list[num_del].flags = cmd_flags;
2138 num_del++;
2139
2140 /* flush a full buffer */
2141 if (num_del == filter_list_len) {
2142 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2143 num_del, &retval);
2144 memset(del_list, 0, list_size);
2145 num_del = 0;
2146 }
2147 /* Release memory for MAC filter entries which were
2148 * synced up with HW.
2149 */
2150 hlist_del(&f->hlist);
2151 kfree(f);
2152 }
2153
2154 if (num_del) {
2155 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2156 num_del, &retval);
2157 }
2158
2159 kfree(del_list);
2160 del_list = NULL;
2161 }
2162
2163 if (!hlist_empty(&tmp_add_list)) {
2164 /* Do all the adds now. */
2165 filter_list_len = hw->aq.asq_buf_size /
2166 sizeof(struct i40e_aqc_add_macvlan_element_data);
2167 list_size = filter_list_len *
2168 sizeof(struct i40e_aqc_add_macvlan_element_data);
2169 add_list = kzalloc(list_size, GFP_ATOMIC);
2170 if (!add_list)
2171 goto err_no_memory;
2172
2173 num_add = 0;
2174 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2175 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
2176 vsi->state)) {
2177 new->state = I40E_FILTER_FAILED;
2178 continue;
2179 }
2180
2181 /* handle broadcast filters by updating the broadcast
2182 * promiscuous flag instead of adding a MAC filter.
2183 */
2184 if (is_broadcast_ether_addr(new->f->macaddr)) {
2185 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2186 new->f))
2187 new->state = I40E_FILTER_FAILED;
2188 else
2189 new->state = I40E_FILTER_ACTIVE;
2190 continue;
2191 }
2192
2193 /* add to add array */
2194 if (num_add == 0)
2195 add_head = new;
2196 cmd_flags = 0;
2197 ether_addr_copy(add_list[num_add].mac_addr,
2198 new->f->macaddr);
2199 if (new->f->vlan == I40E_VLAN_ANY) {
2200 add_list[num_add].vlan_tag = 0;
2201 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2202 } else {
2203 add_list[num_add].vlan_tag =
2204 cpu_to_le16((u16)(new->f->vlan));
2205 }
2206 add_list[num_add].queue_number = 0;
2207 /* set invalid match method for later detection */
2208 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2209 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2210 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2211 num_add++;
2212
2213 /* flush a full buffer */
2214 if (num_add == filter_list_len) {
2215 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2216 add_head, num_add,
2217 &promisc_changed);
2218 memset(add_list, 0, list_size);
2219 num_add = 0;
2220 }
2221 }
2222 if (num_add) {
2223 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2224 num_add, &promisc_changed);
2225 }
2226 /* Now move all of the filters from the temp add list back to
2227 * the VSI's list.
2228 */
2229 spin_lock_bh(&vsi->mac_filter_hash_lock);
2230 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2231 /* Only update the state if we're still NEW */
2232 if (new->f->state == I40E_FILTER_NEW)
2233 new->f->state = new->state;
2234 hlist_del(&new->hlist);
2235 kfree(new);
2236 }
2237 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2238 kfree(add_list);
2239 add_list = NULL;
2240 }
2241
2242 /* Determine the number of active and failed filters. */
2243 spin_lock_bh(&vsi->mac_filter_hash_lock);
2244 vsi->active_filters = 0;
2245 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2246 if (f->state == I40E_FILTER_ACTIVE)
2247 vsi->active_filters++;
2248 else if (f->state == I40E_FILTER_FAILED)
2249 failed_filters++;
2250 }
2251 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2252
2253 /* If promiscuous mode has changed, we need to calculate a new
2254 * threshold for when we are safe to exit
2255 */
2256 if (promisc_changed)
2257 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2258
2259 /* Check if we are able to exit overflow promiscuous mode. We can
2260 * safely exit if we didn't just enter, we no longer have any failed
2261 * filters, and we have reduced filters below the threshold value.
2262 */
2263 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
2264 !promisc_changed && !failed_filters &&
2265 (vsi->active_filters < vsi->promisc_threshold)) {
2266 dev_info(&pf->pdev->dev,
2267 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2268 vsi_name);
2269 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2270 promisc_changed = true;
2271 vsi->promisc_threshold = 0;
2272 }
2273
2274 /* if the VF is not trusted do not do promisc */
2275 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2276 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2277 goto out;
2278 }
2279
2280 /* check for changes in promiscuous modes */
2281 if (changed_flags & IFF_ALLMULTI) {
2282 bool cur_multipromisc;
2283
2284 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2285 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2286 vsi->seid,
2287 cur_multipromisc,
2288 NULL);
2289 if (aq_ret) {
2290 retval = i40e_aq_rc_to_posix(aq_ret,
2291 hw->aq.asq_last_status);
2292 dev_info(&pf->pdev->dev,
2293 "set multi promisc failed on %s, err %s aq_err %s\n",
2294 vsi_name,
2295 i40e_stat_str(hw, aq_ret),
2296 i40e_aq_str(hw, hw->aq.asq_last_status));
2297 }
2298 }
2299
2300 if ((changed_flags & IFF_PROMISC) || promisc_changed) {
2301 bool cur_promisc;
2302
2303 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2304 test_bit(__I40E_VSI_OVERFLOW_PROMISC,
2305 vsi->state));
2306 if ((vsi->type == I40E_VSI_MAIN) &&
2307 (pf->lan_veb != I40E_NO_VEB) &&
2308 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2309 /* set defport ON for Main VSI instead of true promisc
2310 * this way we will get all unicast/multicast and VLAN
2311 * promisc behavior but will not get VF or VMDq traffic
2312 * replicated on the Main VSI.
2313 */
2314 if (pf->cur_promisc != cur_promisc) {
2315 pf->cur_promisc = cur_promisc;
2316 if (cur_promisc)
2317 aq_ret =
2318 i40e_aq_set_default_vsi(hw,
2319 vsi->seid,
2320 NULL);
2321 else
2322 aq_ret =
2323 i40e_aq_clear_default_vsi(hw,
2324 vsi->seid,
2325 NULL);
2326 if (aq_ret) {
2327 retval = i40e_aq_rc_to_posix(aq_ret,
2328 hw->aq.asq_last_status);
2329 dev_info(&pf->pdev->dev,
2330 "Set default VSI failed on %s, err %s, aq_err %s\n",
2331 vsi_name,
2332 i40e_stat_str(hw, aq_ret),
2333 i40e_aq_str(hw,
2334 hw->aq.asq_last_status));
2335 }
2336 }
2337 } else {
2338 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2339 hw,
2340 vsi->seid,
2341 cur_promisc, NULL,
2342 true);
2343 if (aq_ret) {
2344 retval =
2345 i40e_aq_rc_to_posix(aq_ret,
2346 hw->aq.asq_last_status);
2347 dev_info(&pf->pdev->dev,
2348 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2349 vsi_name,
2350 i40e_stat_str(hw, aq_ret),
2351 i40e_aq_str(hw,
2352 hw->aq.asq_last_status));
2353 }
2354 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2355 hw,
2356 vsi->seid,
2357 cur_promisc, NULL);
2358 if (aq_ret) {
2359 retval =
2360 i40e_aq_rc_to_posix(aq_ret,
2361 hw->aq.asq_last_status);
2362 dev_info(&pf->pdev->dev,
2363 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2364 vsi_name,
2365 i40e_stat_str(hw, aq_ret),
2366 i40e_aq_str(hw,
2367 hw->aq.asq_last_status));
2368 }
2369 }
2370 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2371 vsi->seid,
2372 cur_promisc, NULL);
2373 if (aq_ret) {
2374 retval = i40e_aq_rc_to_posix(aq_ret,
2375 pf->hw.aq.asq_last_status);
2376 dev_info(&pf->pdev->dev,
2377 "set brdcast promisc failed, err %s, aq_err %s\n",
2378 i40e_stat_str(hw, aq_ret),
2379 i40e_aq_str(hw,
2380 hw->aq.asq_last_status));
2381 }
2382 }
2383 out:
2384 /* if something went wrong then set the changed flag so we try again */
2385 if (retval)
2386 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2387
2388 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2389 return retval;
2390
2391 err_no_memory:
2392 /* Restore elements on the temporary add and delete lists */
2393 spin_lock_bh(&vsi->mac_filter_hash_lock);
2394 err_no_memory_locked:
2395 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2396 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2397 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2398
2399 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2400 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2401 return -ENOMEM;
2402 }
2403
2404 /**
2405 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2406 * @pf: board private structure
2407 **/
2408 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2409 {
2410 int v;
2411
2412 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2413 return;
2414 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2415
2416 for (v = 0; v < pf->num_alloc_vsi; v++) {
2417 if (pf->vsi[v] &&
2418 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2419 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2420
2421 if (ret) {
2422 /* come back and try again later */
2423 pf->flags |= I40E_FLAG_FILTER_SYNC;
2424 break;
2425 }
2426 }
2427 }
2428 }
2429
2430 /**
2431 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2432 * @vsi: the vsi
2433 **/
2434 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2435 {
2436 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2437 return I40E_RXBUFFER_2048;
2438 else
2439 return I40E_RXBUFFER_3072;
2440 }
2441
2442 /**
2443 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2444 * @netdev: network interface device structure
2445 * @new_mtu: new value for maximum frame size
2446 *
2447 * Returns 0 on success, negative on failure
2448 **/
2449 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2450 {
2451 struct i40e_netdev_priv *np = netdev_priv(netdev);
2452 struct i40e_vsi *vsi = np->vsi;
2453 struct i40e_pf *pf = vsi->back;
2454
2455 if (i40e_enabled_xdp_vsi(vsi)) {
2456 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2457
2458 if (frame_size > i40e_max_xdp_frame_size(vsi))
2459 return -EINVAL;
2460 }
2461
2462 netdev_info(netdev, "changing MTU from %d to %d\n",
2463 netdev->mtu, new_mtu);
2464 netdev->mtu = new_mtu;
2465 if (netif_running(netdev))
2466 i40e_vsi_reinit_locked(vsi);
2467 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
2468 I40E_FLAG_CLIENT_L2_CHANGE);
2469 return 0;
2470 }
2471
2472 /**
2473 * i40e_ioctl - Access the hwtstamp interface
2474 * @netdev: network interface device structure
2475 * @ifr: interface request data
2476 * @cmd: ioctl command
2477 **/
2478 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2479 {
2480 struct i40e_netdev_priv *np = netdev_priv(netdev);
2481 struct i40e_pf *pf = np->vsi->back;
2482
2483 switch (cmd) {
2484 case SIOCGHWTSTAMP:
2485 return i40e_ptp_get_ts_config(pf, ifr);
2486 case SIOCSHWTSTAMP:
2487 return i40e_ptp_set_ts_config(pf, ifr);
2488 default:
2489 return -EOPNOTSUPP;
2490 }
2491 }
2492
2493 /**
2494 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2495 * @vsi: the vsi being adjusted
2496 **/
2497 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2498 {
2499 struct i40e_vsi_context ctxt;
2500 i40e_status ret;
2501
2502 if ((vsi->info.valid_sections &
2503 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2504 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2505 return; /* already enabled */
2506
2507 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2508 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2509 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2510
2511 ctxt.seid = vsi->seid;
2512 ctxt.info = vsi->info;
2513 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2514 if (ret) {
2515 dev_info(&vsi->back->pdev->dev,
2516 "update vlan stripping failed, err %s aq_err %s\n",
2517 i40e_stat_str(&vsi->back->hw, ret),
2518 i40e_aq_str(&vsi->back->hw,
2519 vsi->back->hw.aq.asq_last_status));
2520 }
2521 }
2522
2523 /**
2524 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2525 * @vsi: the vsi being adjusted
2526 **/
2527 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2528 {
2529 struct i40e_vsi_context ctxt;
2530 i40e_status ret;
2531
2532 if ((vsi->info.valid_sections &
2533 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2534 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2535 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2536 return; /* already disabled */
2537
2538 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2539 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2540 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2541
2542 ctxt.seid = vsi->seid;
2543 ctxt.info = vsi->info;
2544 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2545 if (ret) {
2546 dev_info(&vsi->back->pdev->dev,
2547 "update vlan stripping failed, err %s aq_err %s\n",
2548 i40e_stat_str(&vsi->back->hw, ret),
2549 i40e_aq_str(&vsi->back->hw,
2550 vsi->back->hw.aq.asq_last_status));
2551 }
2552 }
2553
2554 /**
2555 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2556 * @netdev: network interface to be adjusted
2557 * @features: netdev features to test if VLAN offload is enabled or not
2558 **/
2559 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2560 {
2561 struct i40e_netdev_priv *np = netdev_priv(netdev);
2562 struct i40e_vsi *vsi = np->vsi;
2563
2564 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2565 i40e_vlan_stripping_enable(vsi);
2566 else
2567 i40e_vlan_stripping_disable(vsi);
2568 }
2569
2570 /**
2571 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2572 * @vsi: the vsi being configured
2573 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2574 *
2575 * This is a helper function for adding a new MAC/VLAN filter with the
2576 * specified VLAN for each existing MAC address already in the hash table.
2577 * This function does *not* perform any accounting to update filters based on
2578 * VLAN mode.
2579 *
2580 * NOTE: this function expects to be called while under the
2581 * mac_filter_hash_lock
2582 **/
2583 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2584 {
2585 struct i40e_mac_filter *f, *add_f;
2586 struct hlist_node *h;
2587 int bkt;
2588
2589 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2590 if (f->state == I40E_FILTER_REMOVE)
2591 continue;
2592 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2593 if (!add_f) {
2594 dev_info(&vsi->back->pdev->dev,
2595 "Could not add vlan filter %d for %pM\n",
2596 vid, f->macaddr);
2597 return -ENOMEM;
2598 }
2599 }
2600
2601 return 0;
2602 }
2603
2604 /**
2605 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2606 * @vsi: the VSI being configured
2607 * @vid: VLAN id to be added
2608 **/
2609 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2610 {
2611 int err;
2612
2613 if (vsi->info.pvid)
2614 return -EINVAL;
2615
2616 /* The network stack will attempt to add VID=0, with the intention to
2617 * receive priority tagged packets with a VLAN of 0. Our HW receives
2618 * these packets by default when configured to receive untagged
2619 * packets, so we don't need to add a filter for this case.
2620 * Additionally, HW interprets adding a VID=0 filter as meaning to
2621 * receive *only* tagged traffic and stops receiving untagged traffic.
2622 * Thus, we do not want to actually add a filter for VID=0
2623 */
2624 if (!vid)
2625 return 0;
2626
2627 /* Locked once because all functions invoked below iterates list*/
2628 spin_lock_bh(&vsi->mac_filter_hash_lock);
2629 err = i40e_add_vlan_all_mac(vsi, vid);
2630 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2631 if (err)
2632 return err;
2633
2634 /* schedule our worker thread which will take care of
2635 * applying the new filter changes
2636 */
2637 i40e_service_event_schedule(vsi->back);
2638 return 0;
2639 }
2640
2641 /**
2642 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2643 * @vsi: the vsi being configured
2644 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2645 *
2646 * This function should be used to remove all VLAN filters which match the
2647 * given VID. It does not schedule the service event and does not take the
2648 * mac_filter_hash_lock so it may be combined with other operations under
2649 * a single invocation of the mac_filter_hash_lock.
2650 *
2651 * NOTE: this function expects to be called while under the
2652 * mac_filter_hash_lock
2653 */
2654 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2655 {
2656 struct i40e_mac_filter *f;
2657 struct hlist_node *h;
2658 int bkt;
2659
2660 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2661 if (f->vlan == vid)
2662 __i40e_del_filter(vsi, f);
2663 }
2664 }
2665
2666 /**
2667 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2668 * @vsi: the VSI being configured
2669 * @vid: VLAN id to be removed
2670 **/
2671 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2672 {
2673 if (!vid || vsi->info.pvid)
2674 return;
2675
2676 spin_lock_bh(&vsi->mac_filter_hash_lock);
2677 i40e_rm_vlan_all_mac(vsi, vid);
2678 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2679
2680 /* schedule our worker thread which will take care of
2681 * applying the new filter changes
2682 */
2683 i40e_service_event_schedule(vsi->back);
2684 }
2685
2686 /**
2687 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2688 * @netdev: network interface to be adjusted
2689 * @vid: vlan id to be added
2690 *
2691 * net_device_ops implementation for adding vlan ids
2692 **/
2693 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2694 __always_unused __be16 proto, u16 vid)
2695 {
2696 struct i40e_netdev_priv *np = netdev_priv(netdev);
2697 struct i40e_vsi *vsi = np->vsi;
2698 int ret = 0;
2699
2700 if (vid >= VLAN_N_VID)
2701 return -EINVAL;
2702
2703 ret = i40e_vsi_add_vlan(vsi, vid);
2704 if (!ret)
2705 set_bit(vid, vsi->active_vlans);
2706
2707 return ret;
2708 }
2709
2710 /**
2711 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2712 * @netdev: network interface to be adjusted
2713 * @vid: vlan id to be removed
2714 *
2715 * net_device_ops implementation for removing vlan ids
2716 **/
2717 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2718 __always_unused __be16 proto, u16 vid)
2719 {
2720 struct i40e_netdev_priv *np = netdev_priv(netdev);
2721 struct i40e_vsi *vsi = np->vsi;
2722
2723 /* return code is ignored as there is nothing a user
2724 * can do about failure to remove and a log message was
2725 * already printed from the other function
2726 */
2727 i40e_vsi_kill_vlan(vsi, vid);
2728
2729 clear_bit(vid, vsi->active_vlans);
2730
2731 return 0;
2732 }
2733
2734 /**
2735 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2736 * @vsi: the vsi being brought back up
2737 **/
2738 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2739 {
2740 u16 vid;
2741
2742 if (!vsi->netdev)
2743 return;
2744
2745 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2746
2747 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2748 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2749 vid);
2750 }
2751
2752 /**
2753 * i40e_vsi_add_pvid - Add pvid for the VSI
2754 * @vsi: the vsi being adjusted
2755 * @vid: the vlan id to set as a PVID
2756 **/
2757 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2758 {
2759 struct i40e_vsi_context ctxt;
2760 i40e_status ret;
2761
2762 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2763 vsi->info.pvid = cpu_to_le16(vid);
2764 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2765 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2766 I40E_AQ_VSI_PVLAN_EMOD_STR;
2767
2768 ctxt.seid = vsi->seid;
2769 ctxt.info = vsi->info;
2770 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2771 if (ret) {
2772 dev_info(&vsi->back->pdev->dev,
2773 "add pvid failed, err %s aq_err %s\n",
2774 i40e_stat_str(&vsi->back->hw, ret),
2775 i40e_aq_str(&vsi->back->hw,
2776 vsi->back->hw.aq.asq_last_status));
2777 return -ENOENT;
2778 }
2779
2780 return 0;
2781 }
2782
2783 /**
2784 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2785 * @vsi: the vsi being adjusted
2786 *
2787 * Just use the vlan_rx_register() service to put it back to normal
2788 **/
2789 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2790 {
2791 i40e_vlan_stripping_disable(vsi);
2792
2793 vsi->info.pvid = 0;
2794 }
2795
2796 /**
2797 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2798 * @vsi: ptr to the VSI
2799 *
2800 * If this function returns with an error, then it's possible one or
2801 * more of the rings is populated (while the rest are not). It is the
2802 * callers duty to clean those orphaned rings.
2803 *
2804 * Return 0 on success, negative on failure
2805 **/
2806 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2807 {
2808 int i, err = 0;
2809
2810 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2811 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2812
2813 if (!i40e_enabled_xdp_vsi(vsi))
2814 return err;
2815
2816 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2817 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
2818
2819 return err;
2820 }
2821
2822 /**
2823 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2824 * @vsi: ptr to the VSI
2825 *
2826 * Free VSI's transmit software resources
2827 **/
2828 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2829 {
2830 int i;
2831
2832 if (vsi->tx_rings) {
2833 for (i = 0; i < vsi->num_queue_pairs; i++)
2834 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2835 i40e_free_tx_resources(vsi->tx_rings[i]);
2836 }
2837
2838 if (vsi->xdp_rings) {
2839 for (i = 0; i < vsi->num_queue_pairs; i++)
2840 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2841 i40e_free_tx_resources(vsi->xdp_rings[i]);
2842 }
2843 }
2844
2845 /**
2846 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2847 * @vsi: ptr to the VSI
2848 *
2849 * If this function returns with an error, then it's possible one or
2850 * more of the rings is populated (while the rest are not). It is the
2851 * callers duty to clean those orphaned rings.
2852 *
2853 * Return 0 on success, negative on failure
2854 **/
2855 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2856 {
2857 int i, err = 0;
2858
2859 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2860 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2861 return err;
2862 }
2863
2864 /**
2865 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2866 * @vsi: ptr to the VSI
2867 *
2868 * Free all receive software resources
2869 **/
2870 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2871 {
2872 int i;
2873
2874 if (!vsi->rx_rings)
2875 return;
2876
2877 for (i = 0; i < vsi->num_queue_pairs; i++)
2878 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2879 i40e_free_rx_resources(vsi->rx_rings[i]);
2880 }
2881
2882 /**
2883 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2884 * @ring: The Tx ring to configure
2885 *
2886 * This enables/disables XPS for a given Tx descriptor ring
2887 * based on the TCs enabled for the VSI that ring belongs to.
2888 **/
2889 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2890 {
2891 struct i40e_vsi *vsi = ring->vsi;
2892 int cpu;
2893
2894 if (!ring->q_vector || !ring->netdev)
2895 return;
2896
2897 if ((vsi->tc_config.numtc <= 1) &&
2898 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
2899 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
2900 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
2901 ring->queue_index);
2902 }
2903
2904 /* schedule our worker thread which will take care of
2905 * applying the new filter changes
2906 */
2907 i40e_service_event_schedule(vsi->back);
2908 }
2909
2910 /**
2911 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2912 * @ring: The Tx ring to configure
2913 *
2914 * Configure the Tx descriptor ring in the HMC context.
2915 **/
2916 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2917 {
2918 struct i40e_vsi *vsi = ring->vsi;
2919 u16 pf_q = vsi->base_queue + ring->queue_index;
2920 struct i40e_hw *hw = &vsi->back->hw;
2921 struct i40e_hmc_obj_txq tx_ctx;
2922 i40e_status err = 0;
2923 u32 qtx_ctl = 0;
2924
2925 /* some ATR related tx ring init */
2926 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2927 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2928 ring->atr_count = 0;
2929 } else {
2930 ring->atr_sample_rate = 0;
2931 }
2932
2933 /* configure XPS */
2934 i40e_config_xps_tx_ring(ring);
2935
2936 /* clear the context structure first */
2937 memset(&tx_ctx, 0, sizeof(tx_ctx));
2938
2939 tx_ctx.new_context = 1;
2940 tx_ctx.base = (ring->dma / 128);
2941 tx_ctx.qlen = ring->count;
2942 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2943 I40E_FLAG_FD_ATR_ENABLED));
2944 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2945 /* FDIR VSI tx ring can still use RS bit and writebacks */
2946 if (vsi->type != I40E_VSI_FDIR)
2947 tx_ctx.head_wb_ena = 1;
2948 tx_ctx.head_wb_addr = ring->dma +
2949 (ring->count * sizeof(struct i40e_tx_desc));
2950
2951 /* As part of VSI creation/update, FW allocates certain
2952 * Tx arbitration queue sets for each TC enabled for
2953 * the VSI. The FW returns the handles to these queue
2954 * sets as part of the response buffer to Add VSI,
2955 * Update VSI, etc. AQ commands. It is expected that
2956 * these queue set handles be associated with the Tx
2957 * queues by the driver as part of the TX queue context
2958 * initialization. This has to be done regardless of
2959 * DCB as by default everything is mapped to TC0.
2960 */
2961 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2962 tx_ctx.rdylist_act = 0;
2963
2964 /* clear the context in the HMC */
2965 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2966 if (err) {
2967 dev_info(&vsi->back->pdev->dev,
2968 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2969 ring->queue_index, pf_q, err);
2970 return -ENOMEM;
2971 }
2972
2973 /* set the context in the HMC */
2974 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2975 if (err) {
2976 dev_info(&vsi->back->pdev->dev,
2977 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2978 ring->queue_index, pf_q, err);
2979 return -ENOMEM;
2980 }
2981
2982 /* Now associate this queue with this PCI function */
2983 if (vsi->type == I40E_VSI_VMDQ2) {
2984 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2985 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2986 I40E_QTX_CTL_VFVM_INDX_MASK;
2987 } else {
2988 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2989 }
2990
2991 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2992 I40E_QTX_CTL_PF_INDX_MASK);
2993 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2994 i40e_flush(hw);
2995
2996 /* cache tail off for easier writes later */
2997 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2998
2999 return 0;
3000 }
3001
3002 /**
3003 * i40e_configure_rx_ring - Configure a receive ring context
3004 * @ring: The Rx ring to configure
3005 *
3006 * Configure the Rx descriptor ring in the HMC context.
3007 **/
3008 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3009 {
3010 struct i40e_vsi *vsi = ring->vsi;
3011 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3012 u16 pf_q = vsi->base_queue + ring->queue_index;
3013 struct i40e_hw *hw = &vsi->back->hw;
3014 struct i40e_hmc_obj_rxq rx_ctx;
3015 i40e_status err = 0;
3016
3017 ring->state = 0;
3018
3019 /* clear the context structure first */
3020 memset(&rx_ctx, 0, sizeof(rx_ctx));
3021
3022 ring->rx_buf_len = vsi->rx_buf_len;
3023
3024 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3025 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3026
3027 rx_ctx.base = (ring->dma / 128);
3028 rx_ctx.qlen = ring->count;
3029
3030 /* use 32 byte descriptors */
3031 rx_ctx.dsize = 1;
3032
3033 /* descriptor type is always zero
3034 * rx_ctx.dtype = 0;
3035 */
3036 rx_ctx.hsplit_0 = 0;
3037
3038 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3039 if (hw->revision_id == 0)
3040 rx_ctx.lrxqthresh = 0;
3041 else
3042 rx_ctx.lrxqthresh = 2;
3043 rx_ctx.crcstrip = 1;
3044 rx_ctx.l2tsel = 1;
3045 /* this controls whether VLAN is stripped from inner headers */
3046 rx_ctx.showiv = 0;
3047 /* set the prefena field to 1 because the manual says to */
3048 rx_ctx.prefena = 1;
3049
3050 /* clear the context in the HMC */
3051 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3052 if (err) {
3053 dev_info(&vsi->back->pdev->dev,
3054 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3055 ring->queue_index, pf_q, err);
3056 return -ENOMEM;
3057 }
3058
3059 /* set the context in the HMC */
3060 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3061 if (err) {
3062 dev_info(&vsi->back->pdev->dev,
3063 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3064 ring->queue_index, pf_q, err);
3065 return -ENOMEM;
3066 }
3067
3068 /* configure Rx buffer alignment */
3069 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3070 clear_ring_build_skb_enabled(ring);
3071 else
3072 set_ring_build_skb_enabled(ring);
3073
3074 /* cache tail for quicker writes, and clear the reg before use */
3075 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3076 writel(0, ring->tail);
3077
3078 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3079
3080 return 0;
3081 }
3082
3083 /**
3084 * i40e_vsi_configure_tx - Configure the VSI for Tx
3085 * @vsi: VSI structure describing this set of rings and resources
3086 *
3087 * Configure the Tx VSI for operation.
3088 **/
3089 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3090 {
3091 int err = 0;
3092 u16 i;
3093
3094 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3095 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3096
3097 if (!i40e_enabled_xdp_vsi(vsi))
3098 return err;
3099
3100 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3101 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3102
3103 return err;
3104 }
3105
3106 /**
3107 * i40e_vsi_configure_rx - Configure the VSI for Rx
3108 * @vsi: the VSI being configured
3109 *
3110 * Configure the Rx VSI for operation.
3111 **/
3112 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3113 {
3114 int err = 0;
3115 u16 i;
3116
3117 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3118 vsi->max_frame = I40E_MAX_RXBUFFER;
3119 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3120 #if (PAGE_SIZE < 8192)
3121 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3122 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3123 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3124 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3125 #endif
3126 } else {
3127 vsi->max_frame = I40E_MAX_RXBUFFER;
3128 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3129 I40E_RXBUFFER_2048;
3130 }
3131
3132 /* set up individual rings */
3133 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3134 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3135
3136 return err;
3137 }
3138
3139 /**
3140 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3141 * @vsi: ptr to the VSI
3142 **/
3143 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3144 {
3145 struct i40e_ring *tx_ring, *rx_ring;
3146 u16 qoffset, qcount;
3147 int i, n;
3148
3149 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3150 /* Reset the TC information */
3151 for (i = 0; i < vsi->num_queue_pairs; i++) {
3152 rx_ring = vsi->rx_rings[i];
3153 tx_ring = vsi->tx_rings[i];
3154 rx_ring->dcb_tc = 0;
3155 tx_ring->dcb_tc = 0;
3156 }
3157 }
3158
3159 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3160 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3161 continue;
3162
3163 qoffset = vsi->tc_config.tc_info[n].qoffset;
3164 qcount = vsi->tc_config.tc_info[n].qcount;
3165 for (i = qoffset; i < (qoffset + qcount); i++) {
3166 rx_ring = vsi->rx_rings[i];
3167 tx_ring = vsi->tx_rings[i];
3168 rx_ring->dcb_tc = n;
3169 tx_ring->dcb_tc = n;
3170 }
3171 }
3172 }
3173
3174 /**
3175 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3176 * @vsi: ptr to the VSI
3177 **/
3178 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3179 {
3180 if (vsi->netdev)
3181 i40e_set_rx_mode(vsi->netdev);
3182 }
3183
3184 /**
3185 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3186 * @vsi: Pointer to the targeted VSI
3187 *
3188 * This function replays the hlist on the hw where all the SB Flow Director
3189 * filters were saved.
3190 **/
3191 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3192 {
3193 struct i40e_fdir_filter *filter;
3194 struct i40e_pf *pf = vsi->back;
3195 struct hlist_node *node;
3196
3197 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3198 return;
3199
3200 /* Reset FDir counters as we're replaying all existing filters */
3201 pf->fd_tcp4_filter_cnt = 0;
3202 pf->fd_udp4_filter_cnt = 0;
3203 pf->fd_sctp4_filter_cnt = 0;
3204 pf->fd_ip4_filter_cnt = 0;
3205
3206 hlist_for_each_entry_safe(filter, node,
3207 &pf->fdir_filter_list, fdir_node) {
3208 i40e_add_del_fdir(vsi, filter, true);
3209 }
3210 }
3211
3212 /**
3213 * i40e_vsi_configure - Set up the VSI for action
3214 * @vsi: the VSI being configured
3215 **/
3216 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3217 {
3218 int err;
3219
3220 i40e_set_vsi_rx_mode(vsi);
3221 i40e_restore_vlan(vsi);
3222 i40e_vsi_config_dcb_rings(vsi);
3223 err = i40e_vsi_configure_tx(vsi);
3224 if (!err)
3225 err = i40e_vsi_configure_rx(vsi);
3226
3227 return err;
3228 }
3229
3230 /**
3231 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3232 * @vsi: the VSI being configured
3233 **/
3234 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3235 {
3236 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3237 struct i40e_pf *pf = vsi->back;
3238 struct i40e_hw *hw = &pf->hw;
3239 u16 vector;
3240 int i, q;
3241 u32 qp;
3242
3243 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3244 * and PFINT_LNKLSTn registers, e.g.:
3245 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3246 */
3247 qp = vsi->base_queue;
3248 vector = vsi->base_vector;
3249 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3250 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3251
3252 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3253 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
3254 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3255 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3256 q_vector->rx.itr);
3257 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
3258 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3259 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3260 q_vector->tx.itr);
3261 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3262 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3263
3264 /* Linked list for the queuepairs assigned to this vector */
3265 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3266 for (q = 0; q < q_vector->num_ringpairs; q++) {
3267 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3268 u32 val;
3269
3270 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3271 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3272 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3273 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3274 (I40E_QUEUE_TYPE_TX <<
3275 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3276
3277 wr32(hw, I40E_QINT_RQCTL(qp), val);
3278
3279 if (has_xdp) {
3280 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3281 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3282 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3283 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3284 (I40E_QUEUE_TYPE_TX <<
3285 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3286
3287 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3288 }
3289
3290 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3291 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3292 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3293 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3294 (I40E_QUEUE_TYPE_RX <<
3295 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3296
3297 /* Terminate the linked list */
3298 if (q == (q_vector->num_ringpairs - 1))
3299 val |= (I40E_QUEUE_END_OF_LIST <<
3300 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3301
3302 wr32(hw, I40E_QINT_TQCTL(qp), val);
3303 qp++;
3304 }
3305 }
3306
3307 i40e_flush(hw);
3308 }
3309
3310 /**
3311 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3312 * @hw: ptr to the hardware info
3313 **/
3314 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3315 {
3316 struct i40e_hw *hw = &pf->hw;
3317 u32 val;
3318
3319 /* clear things first */
3320 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3321 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3322
3323 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3324 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3325 I40E_PFINT_ICR0_ENA_GRST_MASK |
3326 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3327 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3328 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3329 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3330 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3331
3332 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3333 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3334
3335 if (pf->flags & I40E_FLAG_PTP)
3336 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3337
3338 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3339
3340 /* SW_ITR_IDX = 0, but don't change INTENA */
3341 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3342 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3343
3344 /* OTHER_ITR_IDX = 0 */
3345 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3346 }
3347
3348 /**
3349 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3350 * @vsi: the VSI being configured
3351 **/
3352 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3353 {
3354 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3355 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3356 struct i40e_pf *pf = vsi->back;
3357 struct i40e_hw *hw = &pf->hw;
3358 u32 val;
3359
3360 /* set the ITR configuration */
3361 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3362 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
3363 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3364 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3365 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
3366 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3367 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3368
3369 i40e_enable_misc_int_causes(pf);
3370
3371 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3372 wr32(hw, I40E_PFINT_LNKLST0, 0);
3373
3374 /* Associate the queue pair to the vector and enable the queue int */
3375 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3376 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3377 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3378 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3379
3380 wr32(hw, I40E_QINT_RQCTL(0), val);
3381
3382 if (i40e_enabled_xdp_vsi(vsi)) {
3383 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3384 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3385 (I40E_QUEUE_TYPE_TX
3386 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3387
3388 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3389 }
3390
3391 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3392 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3393 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3394
3395 wr32(hw, I40E_QINT_TQCTL(0), val);
3396 i40e_flush(hw);
3397 }
3398
3399 /**
3400 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3401 * @pf: board private structure
3402 **/
3403 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3404 {
3405 struct i40e_hw *hw = &pf->hw;
3406
3407 wr32(hw, I40E_PFINT_DYN_CTL0,
3408 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3409 i40e_flush(hw);
3410 }
3411
3412 /**
3413 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3414 * @pf: board private structure
3415 * @clearpba: true when all pending interrupt events should be cleared
3416 **/
3417 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
3418 {
3419 struct i40e_hw *hw = &pf->hw;
3420 u32 val;
3421
3422 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3423 (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
3424 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3425
3426 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3427 i40e_flush(hw);
3428 }
3429
3430 /**
3431 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3432 * @irq: interrupt number
3433 * @data: pointer to a q_vector
3434 **/
3435 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3436 {
3437 struct i40e_q_vector *q_vector = data;
3438
3439 if (!q_vector->tx.ring && !q_vector->rx.ring)
3440 return IRQ_HANDLED;
3441
3442 napi_schedule_irqoff(&q_vector->napi);
3443
3444 return IRQ_HANDLED;
3445 }
3446
3447 /**
3448 * i40e_irq_affinity_notify - Callback for affinity changes
3449 * @notify: context as to what irq was changed
3450 * @mask: the new affinity mask
3451 *
3452 * This is a callback function used by the irq_set_affinity_notifier function
3453 * so that we may register to receive changes to the irq affinity masks.
3454 **/
3455 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3456 const cpumask_t *mask)
3457 {
3458 struct i40e_q_vector *q_vector =
3459 container_of(notify, struct i40e_q_vector, affinity_notify);
3460
3461 cpumask_copy(&q_vector->affinity_mask, mask);
3462 }
3463
3464 /**
3465 * i40e_irq_affinity_release - Callback for affinity notifier release
3466 * @ref: internal core kernel usage
3467 *
3468 * This is a callback function used by the irq_set_affinity_notifier function
3469 * to inform the current notification subscriber that they will no longer
3470 * receive notifications.
3471 **/
3472 static void i40e_irq_affinity_release(struct kref *ref) {}
3473
3474 /**
3475 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3476 * @vsi: the VSI being configured
3477 * @basename: name for the vector
3478 *
3479 * Allocates MSI-X vectors and requests interrupts from the kernel.
3480 **/
3481 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3482 {
3483 int q_vectors = vsi->num_q_vectors;
3484 struct i40e_pf *pf = vsi->back;
3485 int base = vsi->base_vector;
3486 int rx_int_idx = 0;
3487 int tx_int_idx = 0;
3488 int vector, err;
3489 int irq_num;
3490 int cpu;
3491
3492 for (vector = 0; vector < q_vectors; vector++) {
3493 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3494
3495 irq_num = pf->msix_entries[base + vector].vector;
3496
3497 if (q_vector->tx.ring && q_vector->rx.ring) {
3498 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3499 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3500 tx_int_idx++;
3501 } else if (q_vector->rx.ring) {
3502 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3503 "%s-%s-%d", basename, "rx", rx_int_idx++);
3504 } else if (q_vector->tx.ring) {
3505 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3506 "%s-%s-%d", basename, "tx", tx_int_idx++);
3507 } else {
3508 /* skip this unused q_vector */
3509 continue;
3510 }
3511 err = request_irq(irq_num,
3512 vsi->irq_handler,
3513 0,
3514 q_vector->name,
3515 q_vector);
3516 if (err) {
3517 dev_info(&pf->pdev->dev,
3518 "MSIX request_irq failed, error: %d\n", err);
3519 goto free_queue_irqs;
3520 }
3521
3522 /* register for affinity change notifications */
3523 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3524 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3525 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3526 /* Spread affinity hints out across online CPUs.
3527 *
3528 * get_cpu_mask returns a static constant mask with
3529 * a permanent lifetime so it's ok to pass to
3530 * irq_set_affinity_hint without making a copy.
3531 */
3532 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3533 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3534 }
3535
3536 vsi->irqs_ready = true;
3537 return 0;
3538
3539 free_queue_irqs:
3540 while (vector) {
3541 vector--;
3542 irq_num = pf->msix_entries[base + vector].vector;
3543 irq_set_affinity_notifier(irq_num, NULL);
3544 irq_set_affinity_hint(irq_num, NULL);
3545 free_irq(irq_num, &vsi->q_vectors[vector]);
3546 }
3547 return err;
3548 }
3549
3550 /**
3551 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3552 * @vsi: the VSI being un-configured
3553 **/
3554 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3555 {
3556 struct i40e_pf *pf = vsi->back;
3557 struct i40e_hw *hw = &pf->hw;
3558 int base = vsi->base_vector;
3559 int i;
3560
3561 /* disable interrupt causation from each queue */
3562 for (i = 0; i < vsi->num_queue_pairs; i++) {
3563 u32 val;
3564
3565 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3566 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3567 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3568
3569 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3570 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3571 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3572
3573 if (!i40e_enabled_xdp_vsi(vsi))
3574 continue;
3575 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3576 }
3577
3578 /* disable each interrupt */
3579 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3580 for (i = vsi->base_vector;
3581 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3582 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3583
3584 i40e_flush(hw);
3585 for (i = 0; i < vsi->num_q_vectors; i++)
3586 synchronize_irq(pf->msix_entries[i + base].vector);
3587 } else {
3588 /* Legacy and MSI mode - this stops all interrupt handling */
3589 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3590 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3591 i40e_flush(hw);
3592 synchronize_irq(pf->pdev->irq);
3593 }
3594 }
3595
3596 /**
3597 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3598 * @vsi: the VSI being configured
3599 **/
3600 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3601 {
3602 struct i40e_pf *pf = vsi->back;
3603 int i;
3604
3605 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3606 for (i = 0; i < vsi->num_q_vectors; i++)
3607 i40e_irq_dynamic_enable(vsi, i);
3608 } else {
3609 i40e_irq_dynamic_enable_icr0(pf, true);
3610 }
3611
3612 i40e_flush(&pf->hw);
3613 return 0;
3614 }
3615
3616 /**
3617 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3618 * @pf: board private structure
3619 **/
3620 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3621 {
3622 /* Disable ICR 0 */
3623 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3624 i40e_flush(&pf->hw);
3625 }
3626
3627 /**
3628 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3629 * @irq: interrupt number
3630 * @data: pointer to a q_vector
3631 *
3632 * This is the handler used for all MSI/Legacy interrupts, and deals
3633 * with both queue and non-queue interrupts. This is also used in
3634 * MSIX mode to handle the non-queue interrupts.
3635 **/
3636 static irqreturn_t i40e_intr(int irq, void *data)
3637 {
3638 struct i40e_pf *pf = (struct i40e_pf *)data;
3639 struct i40e_hw *hw = &pf->hw;
3640 irqreturn_t ret = IRQ_NONE;
3641 u32 icr0, icr0_remaining;
3642 u32 val, ena_mask;
3643
3644 icr0 = rd32(hw, I40E_PFINT_ICR0);
3645 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3646
3647 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3648 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3649 goto enable_intr;
3650
3651 /* if interrupt but no bits showing, must be SWINT */
3652 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3653 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3654 pf->sw_int_count++;
3655
3656 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3657 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3658 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3659 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3660 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3661 }
3662
3663 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3664 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3665 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3666 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3667
3668 /* We do not have a way to disarm Queue causes while leaving
3669 * interrupt enabled for all other causes, ideally
3670 * interrupt should be disabled while we are in NAPI but
3671 * this is not a performance path and napi_schedule()
3672 * can deal with rescheduling.
3673 */
3674 if (!test_bit(__I40E_DOWN, pf->state))
3675 napi_schedule_irqoff(&q_vector->napi);
3676 }
3677
3678 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3679 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3680 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3681 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3682 }
3683
3684 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3685 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3686 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3687 }
3688
3689 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3690 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3691 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3692 }
3693
3694 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3695 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3696 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3697 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3698 val = rd32(hw, I40E_GLGEN_RSTAT);
3699 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3700 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3701 if (val == I40E_RESET_CORER) {
3702 pf->corer_count++;
3703 } else if (val == I40E_RESET_GLOBR) {
3704 pf->globr_count++;
3705 } else if (val == I40E_RESET_EMPR) {
3706 pf->empr_count++;
3707 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
3708 }
3709 }
3710
3711 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3712 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3713 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3714 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3715 rd32(hw, I40E_PFHMC_ERRORINFO),
3716 rd32(hw, I40E_PFHMC_ERRORDATA));
3717 }
3718
3719 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3720 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3721
3722 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3723 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3724 i40e_ptp_tx_hwtstamp(pf);
3725 }
3726 }
3727
3728 /* If a critical error is pending we have no choice but to reset the
3729 * device.
3730 * Report and mask out any remaining unexpected interrupts.
3731 */
3732 icr0_remaining = icr0 & ena_mask;
3733 if (icr0_remaining) {
3734 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3735 icr0_remaining);
3736 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3737 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3738 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3739 dev_info(&pf->pdev->dev, "device will be reset\n");
3740 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
3741 i40e_service_event_schedule(pf);
3742 }
3743 ena_mask &= ~icr0_remaining;
3744 }
3745 ret = IRQ_HANDLED;
3746
3747 enable_intr:
3748 /* re-enable interrupt causes */
3749 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3750 if (!test_bit(__I40E_DOWN, pf->state)) {
3751 i40e_service_event_schedule(pf);
3752 i40e_irq_dynamic_enable_icr0(pf, false);
3753 }
3754
3755 return ret;
3756 }
3757
3758 /**
3759 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3760 * @tx_ring: tx ring to clean
3761 * @budget: how many cleans we're allowed
3762 *
3763 * Returns true if there's any budget left (e.g. the clean is finished)
3764 **/
3765 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3766 {
3767 struct i40e_vsi *vsi = tx_ring->vsi;
3768 u16 i = tx_ring->next_to_clean;
3769 struct i40e_tx_buffer *tx_buf;
3770 struct i40e_tx_desc *tx_desc;
3771
3772 tx_buf = &tx_ring->tx_bi[i];
3773 tx_desc = I40E_TX_DESC(tx_ring, i);
3774 i -= tx_ring->count;
3775
3776 do {
3777 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3778
3779 /* if next_to_watch is not set then there is no work pending */
3780 if (!eop_desc)
3781 break;
3782
3783 /* prevent any other reads prior to eop_desc */
3784 smp_rmb();
3785
3786 /* if the descriptor isn't done, no work yet to do */
3787 if (!(eop_desc->cmd_type_offset_bsz &
3788 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3789 break;
3790
3791 /* clear next_to_watch to prevent false hangs */
3792 tx_buf->next_to_watch = NULL;
3793
3794 tx_desc->buffer_addr = 0;
3795 tx_desc->cmd_type_offset_bsz = 0;
3796 /* move past filter desc */
3797 tx_buf++;
3798 tx_desc++;
3799 i++;
3800 if (unlikely(!i)) {
3801 i -= tx_ring->count;
3802 tx_buf = tx_ring->tx_bi;
3803 tx_desc = I40E_TX_DESC(tx_ring, 0);
3804 }
3805 /* unmap skb header data */
3806 dma_unmap_single(tx_ring->dev,
3807 dma_unmap_addr(tx_buf, dma),
3808 dma_unmap_len(tx_buf, len),
3809 DMA_TO_DEVICE);
3810 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3811 kfree(tx_buf->raw_buf);
3812
3813 tx_buf->raw_buf = NULL;
3814 tx_buf->tx_flags = 0;
3815 tx_buf->next_to_watch = NULL;
3816 dma_unmap_len_set(tx_buf, len, 0);
3817 tx_desc->buffer_addr = 0;
3818 tx_desc->cmd_type_offset_bsz = 0;
3819
3820 /* move us past the eop_desc for start of next FD desc */
3821 tx_buf++;
3822 tx_desc++;
3823 i++;
3824 if (unlikely(!i)) {
3825 i -= tx_ring->count;
3826 tx_buf = tx_ring->tx_bi;
3827 tx_desc = I40E_TX_DESC(tx_ring, 0);
3828 }
3829
3830 /* update budget accounting */
3831 budget--;
3832 } while (likely(budget));
3833
3834 i += tx_ring->count;
3835 tx_ring->next_to_clean = i;
3836
3837 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
3838 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
3839
3840 return budget > 0;
3841 }
3842
3843 /**
3844 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3845 * @irq: interrupt number
3846 * @data: pointer to a q_vector
3847 **/
3848 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3849 {
3850 struct i40e_q_vector *q_vector = data;
3851 struct i40e_vsi *vsi;
3852
3853 if (!q_vector->tx.ring)
3854 return IRQ_HANDLED;
3855
3856 vsi = q_vector->tx.ring->vsi;
3857 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3858
3859 return IRQ_HANDLED;
3860 }
3861
3862 /**
3863 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3864 * @vsi: the VSI being configured
3865 * @v_idx: vector index
3866 * @qp_idx: queue pair index
3867 **/
3868 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3869 {
3870 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3871 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3872 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3873
3874 tx_ring->q_vector = q_vector;
3875 tx_ring->next = q_vector->tx.ring;
3876 q_vector->tx.ring = tx_ring;
3877 q_vector->tx.count++;
3878
3879 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
3880 if (i40e_enabled_xdp_vsi(vsi)) {
3881 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
3882
3883 xdp_ring->q_vector = q_vector;
3884 xdp_ring->next = q_vector->tx.ring;
3885 q_vector->tx.ring = xdp_ring;
3886 q_vector->tx.count++;
3887 }
3888
3889 rx_ring->q_vector = q_vector;
3890 rx_ring->next = q_vector->rx.ring;
3891 q_vector->rx.ring = rx_ring;
3892 q_vector->rx.count++;
3893 }
3894
3895 /**
3896 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3897 * @vsi: the VSI being configured
3898 *
3899 * This function maps descriptor rings to the queue-specific vectors
3900 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3901 * one vector per queue pair, but on a constrained vector budget, we
3902 * group the queue pairs as "efficiently" as possible.
3903 **/
3904 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3905 {
3906 int qp_remaining = vsi->num_queue_pairs;
3907 int q_vectors = vsi->num_q_vectors;
3908 int num_ringpairs;
3909 int v_start = 0;
3910 int qp_idx = 0;
3911
3912 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3913 * group them so there are multiple queues per vector.
3914 * It is also important to go through all the vectors available to be
3915 * sure that if we don't use all the vectors, that the remaining vectors
3916 * are cleared. This is especially important when decreasing the
3917 * number of queues in use.
3918 */
3919 for (; v_start < q_vectors; v_start++) {
3920 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3921
3922 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3923
3924 q_vector->num_ringpairs = num_ringpairs;
3925
3926 q_vector->rx.count = 0;
3927 q_vector->tx.count = 0;
3928 q_vector->rx.ring = NULL;
3929 q_vector->tx.ring = NULL;
3930
3931 while (num_ringpairs--) {
3932 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3933 qp_idx++;
3934 qp_remaining--;
3935 }
3936 }
3937 }
3938
3939 /**
3940 * i40e_vsi_request_irq - Request IRQ from the OS
3941 * @vsi: the VSI being configured
3942 * @basename: name for the vector
3943 **/
3944 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3945 {
3946 struct i40e_pf *pf = vsi->back;
3947 int err;
3948
3949 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3950 err = i40e_vsi_request_irq_msix(vsi, basename);
3951 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3952 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3953 pf->int_name, pf);
3954 else
3955 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3956 pf->int_name, pf);
3957
3958 if (err)
3959 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3960
3961 return err;
3962 }
3963
3964 #ifdef CONFIG_NET_POLL_CONTROLLER
3965 /**
3966 * i40e_netpoll - A Polling 'interrupt' handler
3967 * @netdev: network interface device structure
3968 *
3969 * This is used by netconsole to send skbs without having to re-enable
3970 * interrupts. It's not called while the normal interrupt routine is executing.
3971 **/
3972 static void i40e_netpoll(struct net_device *netdev)
3973 {
3974 struct i40e_netdev_priv *np = netdev_priv(netdev);
3975 struct i40e_vsi *vsi = np->vsi;
3976 struct i40e_pf *pf = vsi->back;
3977 int i;
3978
3979 /* if interface is down do nothing */
3980 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3981 return;
3982
3983 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3984 for (i = 0; i < vsi->num_q_vectors; i++)
3985 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3986 } else {
3987 i40e_intr(pf->pdev->irq, netdev);
3988 }
3989 }
3990 #endif
3991
3992 #define I40E_QTX_ENA_WAIT_COUNT 50
3993
3994 /**
3995 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3996 * @pf: the PF being configured
3997 * @pf_q: the PF queue
3998 * @enable: enable or disable state of the queue
3999 *
4000 * This routine will wait for the given Tx queue of the PF to reach the
4001 * enabled or disabled state.
4002 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4003 * multiple retries; else will return 0 in case of success.
4004 **/
4005 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4006 {
4007 int i;
4008 u32 tx_reg;
4009
4010 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4011 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4012 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4013 break;
4014
4015 usleep_range(10, 20);
4016 }
4017 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4018 return -ETIMEDOUT;
4019
4020 return 0;
4021 }
4022
4023 /**
4024 * i40e_control_tx_q - Start or stop a particular Tx queue
4025 * @pf: the PF structure
4026 * @pf_q: the PF queue to configure
4027 * @enable: start or stop the queue
4028 *
4029 * This function enables or disables a single queue. Note that any delay
4030 * required after the operation is expected to be handled by the caller of
4031 * this function.
4032 **/
4033 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4034 {
4035 struct i40e_hw *hw = &pf->hw;
4036 u32 tx_reg;
4037 int i;
4038
4039 /* warn the TX unit of coming changes */
4040 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4041 if (!enable)
4042 usleep_range(10, 20);
4043
4044 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4045 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4046 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4047 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4048 break;
4049 usleep_range(1000, 2000);
4050 }
4051
4052 /* Skip if the queue is already in the requested state */
4053 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4054 return;
4055
4056 /* turn on/off the queue */
4057 if (enable) {
4058 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4059 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4060 } else {
4061 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4062 }
4063
4064 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4065 }
4066
4067 /**
4068 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4069 * @seid: VSI SEID
4070 * @pf: the PF structure
4071 * @pf_q: the PF queue to configure
4072 * @is_xdp: true if the queue is used for XDP
4073 * @enable: start or stop the queue
4074 **/
4075 static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4076 bool is_xdp, bool enable)
4077 {
4078 int ret;
4079
4080 i40e_control_tx_q(pf, pf_q, enable);
4081
4082 /* wait for the change to finish */
4083 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4084 if (ret) {
4085 dev_info(&pf->pdev->dev,
4086 "VSI seid %d %sTx ring %d %sable timeout\n",
4087 seid, (is_xdp ? "XDP " : ""), pf_q,
4088 (enable ? "en" : "dis"));
4089 }
4090
4091 return ret;
4092 }
4093
4094 /**
4095 * i40e_vsi_control_tx - Start or stop a VSI's rings
4096 * @vsi: the VSI being configured
4097 * @enable: start or stop the rings
4098 **/
4099 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4100 {
4101 struct i40e_pf *pf = vsi->back;
4102 int i, pf_q, ret = 0;
4103
4104 pf_q = vsi->base_queue;
4105 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4106 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4107 pf_q,
4108 false /*is xdp*/, enable);
4109 if (ret)
4110 break;
4111
4112 if (!i40e_enabled_xdp_vsi(vsi))
4113 continue;
4114
4115 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4116 pf_q + vsi->alloc_queue_pairs,
4117 true /*is xdp*/, enable);
4118 if (ret)
4119 break;
4120 }
4121
4122 return ret;
4123 }
4124
4125 /**
4126 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4127 * @pf: the PF being configured
4128 * @pf_q: the PF queue
4129 * @enable: enable or disable state of the queue
4130 *
4131 * This routine will wait for the given Rx queue of the PF to reach the
4132 * enabled or disabled state.
4133 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4134 * multiple retries; else will return 0 in case of success.
4135 **/
4136 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4137 {
4138 int i;
4139 u32 rx_reg;
4140
4141 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4142 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4143 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4144 break;
4145
4146 usleep_range(10, 20);
4147 }
4148 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4149 return -ETIMEDOUT;
4150
4151 return 0;
4152 }
4153
4154 /**
4155 * i40e_control_rx_q - Start or stop a particular Rx queue
4156 * @pf: the PF structure
4157 * @pf_q: the PF queue to configure
4158 * @enable: start or stop the queue
4159 *
4160 * This function enables or disables a single queue. Note that any delay
4161 * required after the operation is expected to be handled by the caller of
4162 * this function.
4163 **/
4164 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4165 {
4166 struct i40e_hw *hw = &pf->hw;
4167 u32 rx_reg;
4168 int i;
4169
4170 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4171 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4172 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4173 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4174 break;
4175 usleep_range(1000, 2000);
4176 }
4177
4178 /* Skip if the queue is already in the requested state */
4179 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4180 return;
4181
4182 /* turn on/off the queue */
4183 if (enable)
4184 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4185 else
4186 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4187
4188 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4189 }
4190
4191 /**
4192 * i40e_vsi_control_rx - Start or stop a VSI's rings
4193 * @vsi: the VSI being configured
4194 * @enable: start or stop the rings
4195 **/
4196 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4197 {
4198 struct i40e_pf *pf = vsi->back;
4199 int i, pf_q, ret = 0;
4200
4201 pf_q = vsi->base_queue;
4202 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4203 i40e_control_rx_q(pf, pf_q, enable);
4204
4205 /* wait for the change to finish */
4206 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4207 if (ret) {
4208 dev_info(&pf->pdev->dev,
4209 "VSI seid %d Rx ring %d %sable timeout\n",
4210 vsi->seid, pf_q, (enable ? "en" : "dis"));
4211 break;
4212 }
4213 }
4214
4215 /* Due to HW errata, on Rx disable only, the register can indicate done
4216 * before it really is. Needs 50ms to be sure
4217 */
4218 if (!enable)
4219 mdelay(50);
4220
4221 return ret;
4222 }
4223
4224 /**
4225 * i40e_vsi_start_rings - Start a VSI's rings
4226 * @vsi: the VSI being configured
4227 **/
4228 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4229 {
4230 int ret = 0;
4231
4232 /* do rx first for enable and last for disable */
4233 ret = i40e_vsi_control_rx(vsi, true);
4234 if (ret)
4235 return ret;
4236 ret = i40e_vsi_control_tx(vsi, true);
4237
4238 return ret;
4239 }
4240
4241 /**
4242 * i40e_vsi_stop_rings - Stop a VSI's rings
4243 * @vsi: the VSI being configured
4244 **/
4245 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4246 {
4247 /* When port TX is suspended, don't wait */
4248 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4249 return i40e_vsi_stop_rings_no_wait(vsi);
4250
4251 /* do rx first for enable and last for disable
4252 * Ignore return value, we need to shutdown whatever we can
4253 */
4254 i40e_vsi_control_tx(vsi, false);
4255 i40e_vsi_control_rx(vsi, false);
4256 }
4257
4258 /**
4259 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4260 * @vsi: the VSI being shutdown
4261 *
4262 * This function stops all the rings for a VSI but does not delay to verify
4263 * that rings have been disabled. It is expected that the caller is shutting
4264 * down multiple VSIs at once and will delay together for all the VSIs after
4265 * initiating the shutdown. This is particularly useful for shutting down lots
4266 * of VFs together. Otherwise, a large delay can be incurred while configuring
4267 * each VSI in serial.
4268 **/
4269 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4270 {
4271 struct i40e_pf *pf = vsi->back;
4272 int i, pf_q;
4273
4274 pf_q = vsi->base_queue;
4275 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4276 i40e_control_tx_q(pf, pf_q, false);
4277 i40e_control_rx_q(pf, pf_q, false);
4278 }
4279 }
4280
4281 /**
4282 * i40e_vsi_free_irq - Free the irq association with the OS
4283 * @vsi: the VSI being configured
4284 **/
4285 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4286 {
4287 struct i40e_pf *pf = vsi->back;
4288 struct i40e_hw *hw = &pf->hw;
4289 int base = vsi->base_vector;
4290 u32 val, qp;
4291 int i;
4292
4293 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4294 if (!vsi->q_vectors)
4295 return;
4296
4297 if (!vsi->irqs_ready)
4298 return;
4299
4300 vsi->irqs_ready = false;
4301 for (i = 0; i < vsi->num_q_vectors; i++) {
4302 int irq_num;
4303 u16 vector;
4304
4305 vector = i + base;
4306 irq_num = pf->msix_entries[vector].vector;
4307
4308 /* free only the irqs that were actually requested */
4309 if (!vsi->q_vectors[i] ||
4310 !vsi->q_vectors[i]->num_ringpairs)
4311 continue;
4312
4313 /* clear the affinity notifier in the IRQ descriptor */
4314 irq_set_affinity_notifier(irq_num, NULL);
4315 /* remove our suggested affinity mask for this IRQ */
4316 irq_set_affinity_hint(irq_num, NULL);
4317 synchronize_irq(irq_num);
4318 free_irq(irq_num, vsi->q_vectors[i]);
4319
4320 /* Tear down the interrupt queue link list
4321 *
4322 * We know that they come in pairs and always
4323 * the Rx first, then the Tx. To clear the
4324 * link list, stick the EOL value into the
4325 * next_q field of the registers.
4326 */
4327 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4328 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4329 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4330 val |= I40E_QUEUE_END_OF_LIST
4331 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4332 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4333
4334 while (qp != I40E_QUEUE_END_OF_LIST) {
4335 u32 next;
4336
4337 val = rd32(hw, I40E_QINT_RQCTL(qp));
4338
4339 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4340 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4341 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4342 I40E_QINT_RQCTL_INTEVENT_MASK);
4343
4344 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4345 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4346
4347 wr32(hw, I40E_QINT_RQCTL(qp), val);
4348
4349 val = rd32(hw, I40E_QINT_TQCTL(qp));
4350
4351 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4352 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4353
4354 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4355 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4356 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4357 I40E_QINT_TQCTL_INTEVENT_MASK);
4358
4359 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4360 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4361
4362 wr32(hw, I40E_QINT_TQCTL(qp), val);
4363 qp = next;
4364 }
4365 }
4366 } else {
4367 free_irq(pf->pdev->irq, pf);
4368
4369 val = rd32(hw, I40E_PFINT_LNKLST0);
4370 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4371 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4372 val |= I40E_QUEUE_END_OF_LIST
4373 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4374 wr32(hw, I40E_PFINT_LNKLST0, val);
4375
4376 val = rd32(hw, I40E_QINT_RQCTL(qp));
4377 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4378 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4379 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4380 I40E_QINT_RQCTL_INTEVENT_MASK);
4381
4382 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4383 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4384
4385 wr32(hw, I40E_QINT_RQCTL(qp), val);
4386
4387 val = rd32(hw, I40E_QINT_TQCTL(qp));
4388
4389 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4390 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4391 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4392 I40E_QINT_TQCTL_INTEVENT_MASK);
4393
4394 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4395 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4396
4397 wr32(hw, I40E_QINT_TQCTL(qp), val);
4398 }
4399 }
4400
4401 /**
4402 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4403 * @vsi: the VSI being configured
4404 * @v_idx: Index of vector to be freed
4405 *
4406 * This function frees the memory allocated to the q_vector. In addition if
4407 * NAPI is enabled it will delete any references to the NAPI struct prior
4408 * to freeing the q_vector.
4409 **/
4410 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4411 {
4412 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4413 struct i40e_ring *ring;
4414
4415 if (!q_vector)
4416 return;
4417
4418 /* disassociate q_vector from rings */
4419 i40e_for_each_ring(ring, q_vector->tx)
4420 ring->q_vector = NULL;
4421
4422 i40e_for_each_ring(ring, q_vector->rx)
4423 ring->q_vector = NULL;
4424
4425 /* only VSI w/ an associated netdev is set up w/ NAPI */
4426 if (vsi->netdev)
4427 netif_napi_del(&q_vector->napi);
4428
4429 vsi->q_vectors[v_idx] = NULL;
4430
4431 kfree_rcu(q_vector, rcu);
4432 }
4433
4434 /**
4435 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4436 * @vsi: the VSI being un-configured
4437 *
4438 * This frees the memory allocated to the q_vectors and
4439 * deletes references to the NAPI struct.
4440 **/
4441 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4442 {
4443 int v_idx;
4444
4445 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4446 i40e_free_q_vector(vsi, v_idx);
4447 }
4448
4449 /**
4450 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4451 * @pf: board private structure
4452 **/
4453 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4454 {
4455 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4456 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4457 pci_disable_msix(pf->pdev);
4458 kfree(pf->msix_entries);
4459 pf->msix_entries = NULL;
4460 kfree(pf->irq_pile);
4461 pf->irq_pile = NULL;
4462 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4463 pci_disable_msi(pf->pdev);
4464 }
4465 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4466 }
4467
4468 /**
4469 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4470 * @pf: board private structure
4471 *
4472 * We go through and clear interrupt specific resources and reset the structure
4473 * to pre-load conditions
4474 **/
4475 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4476 {
4477 int i;
4478
4479 i40e_stop_misc_vector(pf);
4480 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4481 synchronize_irq(pf->msix_entries[0].vector);
4482 free_irq(pf->msix_entries[0].vector, pf);
4483 }
4484
4485 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4486 I40E_IWARP_IRQ_PILE_ID);
4487
4488 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4489 for (i = 0; i < pf->num_alloc_vsi; i++)
4490 if (pf->vsi[i])
4491 i40e_vsi_free_q_vectors(pf->vsi[i]);
4492 i40e_reset_interrupt_capability(pf);
4493 }
4494
4495 /**
4496 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4497 * @vsi: the VSI being configured
4498 **/
4499 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4500 {
4501 int q_idx;
4502
4503 if (!vsi->netdev)
4504 return;
4505
4506 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4507 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4508
4509 if (q_vector->rx.ring || q_vector->tx.ring)
4510 napi_enable(&q_vector->napi);
4511 }
4512 }
4513
4514 /**
4515 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4516 * @vsi: the VSI being configured
4517 **/
4518 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4519 {
4520 int q_idx;
4521
4522 if (!vsi->netdev)
4523 return;
4524
4525 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4526 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4527
4528 if (q_vector->rx.ring || q_vector->tx.ring)
4529 napi_disable(&q_vector->napi);
4530 }
4531 }
4532
4533 /**
4534 * i40e_vsi_close - Shut down a VSI
4535 * @vsi: the vsi to be quelled
4536 **/
4537 static void i40e_vsi_close(struct i40e_vsi *vsi)
4538 {
4539 struct i40e_pf *pf = vsi->back;
4540 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4541 i40e_down(vsi);
4542 i40e_vsi_free_irq(vsi);
4543 i40e_vsi_free_tx_resources(vsi);
4544 i40e_vsi_free_rx_resources(vsi);
4545 vsi->current_netdev_flags = 0;
4546 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
4547 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4548 pf->flags |= I40E_FLAG_CLIENT_RESET;
4549 }
4550
4551 /**
4552 * i40e_quiesce_vsi - Pause a given VSI
4553 * @vsi: the VSI being paused
4554 **/
4555 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4556 {
4557 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4558 return;
4559
4560 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4561 if (vsi->netdev && netif_running(vsi->netdev))
4562 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4563 else
4564 i40e_vsi_close(vsi);
4565 }
4566
4567 /**
4568 * i40e_unquiesce_vsi - Resume a given VSI
4569 * @vsi: the VSI being resumed
4570 **/
4571 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4572 {
4573 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4574 return;
4575
4576 if (vsi->netdev && netif_running(vsi->netdev))
4577 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4578 else
4579 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4580 }
4581
4582 /**
4583 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4584 * @pf: the PF
4585 **/
4586 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4587 {
4588 int v;
4589
4590 for (v = 0; v < pf->num_alloc_vsi; v++) {
4591 if (pf->vsi[v])
4592 i40e_quiesce_vsi(pf->vsi[v]);
4593 }
4594 }
4595
4596 /**
4597 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4598 * @pf: the PF
4599 **/
4600 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4601 {
4602 int v;
4603
4604 for (v = 0; v < pf->num_alloc_vsi; v++) {
4605 if (pf->vsi[v])
4606 i40e_unquiesce_vsi(pf->vsi[v]);
4607 }
4608 }
4609
4610 /**
4611 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4612 * @vsi: the VSI being configured
4613 *
4614 * Wait until all queues on a given VSI have been disabled.
4615 **/
4616 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4617 {
4618 struct i40e_pf *pf = vsi->back;
4619 int i, pf_q, ret;
4620
4621 pf_q = vsi->base_queue;
4622 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4623 /* Check and wait for the Tx queue */
4624 ret = i40e_pf_txq_wait(pf, pf_q, false);
4625 if (ret) {
4626 dev_info(&pf->pdev->dev,
4627 "VSI seid %d Tx ring %d disable timeout\n",
4628 vsi->seid, pf_q);
4629 return ret;
4630 }
4631
4632 if (!i40e_enabled_xdp_vsi(vsi))
4633 goto wait_rx;
4634
4635 /* Check and wait for the XDP Tx queue */
4636 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4637 false);
4638 if (ret) {
4639 dev_info(&pf->pdev->dev,
4640 "VSI seid %d XDP Tx ring %d disable timeout\n",
4641 vsi->seid, pf_q);
4642 return ret;
4643 }
4644 wait_rx:
4645 /* Check and wait for the Rx queue */
4646 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4647 if (ret) {
4648 dev_info(&pf->pdev->dev,
4649 "VSI seid %d Rx ring %d disable timeout\n",
4650 vsi->seid, pf_q);
4651 return ret;
4652 }
4653 }
4654
4655 return 0;
4656 }
4657
4658 #ifdef CONFIG_I40E_DCB
4659 /**
4660 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4661 * @pf: the PF
4662 *
4663 * This function waits for the queues to be in disabled state for all the
4664 * VSIs that are managed by this PF.
4665 **/
4666 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4667 {
4668 int v, ret = 0;
4669
4670 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4671 if (pf->vsi[v]) {
4672 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4673 if (ret)
4674 break;
4675 }
4676 }
4677
4678 return ret;
4679 }
4680
4681 #endif
4682
4683 /**
4684 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4685 * @q_idx: TX queue number
4686 * @vsi: Pointer to VSI struct
4687 *
4688 * This function checks specified queue for given VSI. Detects hung condition.
4689 * We proactively detect hung TX queues by checking if interrupts are disabled
4690 * but there are pending descriptors. If it appears hung, attempt to recover
4691 * by triggering a SW interrupt.
4692 **/
4693 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4694 {
4695 struct i40e_ring *tx_ring = NULL;
4696 struct i40e_pf *pf;
4697 u32 val, tx_pending;
4698 int i;
4699
4700 pf = vsi->back;
4701
4702 /* now that we have an index, find the tx_ring struct */
4703 for (i = 0; i < vsi->num_queue_pairs; i++) {
4704 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4705 if (q_idx == vsi->tx_rings[i]->queue_index) {
4706 tx_ring = vsi->tx_rings[i];
4707 break;
4708 }
4709 }
4710 }
4711
4712 if (!tx_ring)
4713 return;
4714
4715 /* Read interrupt register */
4716 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4717 val = rd32(&pf->hw,
4718 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4719 tx_ring->vsi->base_vector - 1));
4720 else
4721 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4722
4723 tx_pending = i40e_get_tx_pending(tx_ring);
4724
4725 /* Interrupts are disabled and TX pending is non-zero,
4726 * trigger the SW interrupt (don't wait). Worst case
4727 * there will be one extra interrupt which may result
4728 * into not cleaning any queues because queues are cleaned.
4729 */
4730 if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
4731 i40e_force_wb(vsi, tx_ring->q_vector);
4732 }
4733
4734 /**
4735 * i40e_detect_recover_hung - Function to detect and recover hung_queues
4736 * @pf: pointer to PF struct
4737 *
4738 * LAN VSI has netdev and netdev has TX queues. This function is to check
4739 * each of those TX queues if they are hung, trigger recovery by issuing
4740 * SW interrupt.
4741 **/
4742 static void i40e_detect_recover_hung(struct i40e_pf *pf)
4743 {
4744 struct net_device *netdev;
4745 struct i40e_vsi *vsi;
4746 unsigned int i;
4747
4748 /* Only for LAN VSI */
4749 vsi = pf->vsi[pf->lan_vsi];
4750
4751 if (!vsi)
4752 return;
4753
4754 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4755 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
4756 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
4757 return;
4758
4759 /* Make sure type is MAIN VSI */
4760 if (vsi->type != I40E_VSI_MAIN)
4761 return;
4762
4763 netdev = vsi->netdev;
4764 if (!netdev)
4765 return;
4766
4767 /* Bail out if netif_carrier is not OK */
4768 if (!netif_carrier_ok(netdev))
4769 return;
4770
4771 /* Go thru' TX queues for netdev */
4772 for (i = 0; i < netdev->num_tx_queues; i++) {
4773 struct netdev_queue *q;
4774
4775 q = netdev_get_tx_queue(netdev, i);
4776 if (q)
4777 i40e_detect_recover_hung_queue(i, vsi);
4778 }
4779 }
4780
4781 /**
4782 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4783 * @pf: pointer to PF
4784 *
4785 * Get TC map for ISCSI PF type that will include iSCSI TC
4786 * and LAN TC.
4787 **/
4788 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4789 {
4790 struct i40e_dcb_app_priority_table app;
4791 struct i40e_hw *hw = &pf->hw;
4792 u8 enabled_tc = 1; /* TC0 is always enabled */
4793 u8 tc, i;
4794 /* Get the iSCSI APP TLV */
4795 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4796
4797 for (i = 0; i < dcbcfg->numapps; i++) {
4798 app = dcbcfg->app[i];
4799 if (app.selector == I40E_APP_SEL_TCPIP &&
4800 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4801 tc = dcbcfg->etscfg.prioritytable[app.priority];
4802 enabled_tc |= BIT(tc);
4803 break;
4804 }
4805 }
4806
4807 return enabled_tc;
4808 }
4809
4810 /**
4811 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4812 * @dcbcfg: the corresponding DCBx configuration structure
4813 *
4814 * Return the number of TCs from given DCBx configuration
4815 **/
4816 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4817 {
4818 int i, tc_unused = 0;
4819 u8 num_tc = 0;
4820 u8 ret = 0;
4821
4822 /* Scan the ETS Config Priority Table to find
4823 * traffic class enabled for a given priority
4824 * and create a bitmask of enabled TCs
4825 */
4826 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4827 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4828
4829 /* Now scan the bitmask to check for
4830 * contiguous TCs starting with TC0
4831 */
4832 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4833 if (num_tc & BIT(i)) {
4834 if (!tc_unused) {
4835 ret++;
4836 } else {
4837 pr_err("Non-contiguous TC - Disabling DCB\n");
4838 return 1;
4839 }
4840 } else {
4841 tc_unused = 1;
4842 }
4843 }
4844
4845 /* There is always at least TC0 */
4846 if (!ret)
4847 ret = 1;
4848
4849 return ret;
4850 }
4851
4852 /**
4853 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4854 * @dcbcfg: the corresponding DCBx configuration structure
4855 *
4856 * Query the current DCB configuration and return the number of
4857 * traffic classes enabled from the given DCBX config
4858 **/
4859 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4860 {
4861 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4862 u8 enabled_tc = 1;
4863 u8 i;
4864
4865 for (i = 0; i < num_tc; i++)
4866 enabled_tc |= BIT(i);
4867
4868 return enabled_tc;
4869 }
4870
4871 /**
4872 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4873 * @pf: PF being queried
4874 *
4875 * Return number of traffic classes enabled for the given PF
4876 **/
4877 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4878 {
4879 struct i40e_hw *hw = &pf->hw;
4880 u8 i, enabled_tc = 1;
4881 u8 num_tc = 0;
4882 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4883
4884 /* If DCB is not enabled then always in single TC */
4885 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4886 return 1;
4887
4888 /* SFP mode will be enabled for all TCs on port */
4889 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4890 return i40e_dcb_get_num_tc(dcbcfg);
4891
4892 /* MFP mode return count of enabled TCs for this PF */
4893 if (pf->hw.func_caps.iscsi)
4894 enabled_tc = i40e_get_iscsi_tc_map(pf);
4895 else
4896 return 1; /* Only TC0 */
4897
4898 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4899 if (enabled_tc & BIT(i))
4900 num_tc++;
4901 }
4902 return num_tc;
4903 }
4904
4905 /**
4906 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4907 * @pf: PF being queried
4908 *
4909 * Return a bitmap for enabled traffic classes for this PF.
4910 **/
4911 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4912 {
4913 /* If DCB is not enabled for this PF then just return default TC */
4914 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4915 return I40E_DEFAULT_TRAFFIC_CLASS;
4916
4917 /* SFP mode we want PF to be enabled for all TCs */
4918 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4919 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4920
4921 /* MFP enabled and iSCSI PF type */
4922 if (pf->hw.func_caps.iscsi)
4923 return i40e_get_iscsi_tc_map(pf);
4924 else
4925 return I40E_DEFAULT_TRAFFIC_CLASS;
4926 }
4927
4928 /**
4929 * i40e_vsi_get_bw_info - Query VSI BW Information
4930 * @vsi: the VSI being queried
4931 *
4932 * Returns 0 on success, negative value on failure
4933 **/
4934 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4935 {
4936 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4937 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4938 struct i40e_pf *pf = vsi->back;
4939 struct i40e_hw *hw = &pf->hw;
4940 i40e_status ret;
4941 u32 tc_bw_max;
4942 int i;
4943
4944 /* Get the VSI level BW configuration */
4945 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4946 if (ret) {
4947 dev_info(&pf->pdev->dev,
4948 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4949 i40e_stat_str(&pf->hw, ret),
4950 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4951 return -EINVAL;
4952 }
4953
4954 /* Get the VSI level BW configuration per TC */
4955 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4956 NULL);
4957 if (ret) {
4958 dev_info(&pf->pdev->dev,
4959 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4960 i40e_stat_str(&pf->hw, ret),
4961 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4962 return -EINVAL;
4963 }
4964
4965 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4966 dev_info(&pf->pdev->dev,
4967 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4968 bw_config.tc_valid_bits,
4969 bw_ets_config.tc_valid_bits);
4970 /* Still continuing */
4971 }
4972
4973 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4974 vsi->bw_max_quanta = bw_config.max_bw;
4975 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4976 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4977 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4978 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4979 vsi->bw_ets_limit_credits[i] =
4980 le16_to_cpu(bw_ets_config.credits[i]);
4981 /* 3 bits out of 4 for each TC */
4982 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4983 }
4984
4985 return 0;
4986 }
4987
4988 /**
4989 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4990 * @vsi: the VSI being configured
4991 * @enabled_tc: TC bitmap
4992 * @bw_credits: BW shared credits per TC
4993 *
4994 * Returns 0 on success, negative value on failure
4995 **/
4996 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4997 u8 *bw_share)
4998 {
4999 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5000 i40e_status ret;
5001 int i;
5002
5003 bw_data.tc_valid_bits = enabled_tc;
5004 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5005 bw_data.tc_bw_credits[i] = bw_share[i];
5006
5007 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
5008 NULL);
5009 if (ret) {
5010 dev_info(&vsi->back->pdev->dev,
5011 "AQ command Config VSI BW allocation per TC failed = %d\n",
5012 vsi->back->hw.aq.asq_last_status);
5013 return -EINVAL;
5014 }
5015
5016 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5017 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5018
5019 return 0;
5020 }
5021
5022 /**
5023 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5024 * @vsi: the VSI being configured
5025 * @enabled_tc: TC map to be enabled
5026 *
5027 **/
5028 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5029 {
5030 struct net_device *netdev = vsi->netdev;
5031 struct i40e_pf *pf = vsi->back;
5032 struct i40e_hw *hw = &pf->hw;
5033 u8 netdev_tc = 0;
5034 int i;
5035 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5036
5037 if (!netdev)
5038 return;
5039
5040 if (!enabled_tc) {
5041 netdev_reset_tc(netdev);
5042 return;
5043 }
5044
5045 /* Set up actual enabled TCs on the VSI */
5046 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5047 return;
5048
5049 /* set per TC queues for the VSI */
5050 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5051 /* Only set TC queues for enabled tcs
5052 *
5053 * e.g. For a VSI that has TC0 and TC3 enabled the
5054 * enabled_tc bitmap would be 0x00001001; the driver
5055 * will set the numtc for netdev as 2 that will be
5056 * referenced by the netdev layer as TC 0 and 1.
5057 */
5058 if (vsi->tc_config.enabled_tc & BIT(i))
5059 netdev_set_tc_queue(netdev,
5060 vsi->tc_config.tc_info[i].netdev_tc,
5061 vsi->tc_config.tc_info[i].qcount,
5062 vsi->tc_config.tc_info[i].qoffset);
5063 }
5064
5065 /* Assign UP2TC map for the VSI */
5066 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5067 /* Get the actual TC# for the UP */
5068 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5069 /* Get the mapped netdev TC# for the UP */
5070 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5071 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5072 }
5073 }
5074
5075 /**
5076 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5077 * @vsi: the VSI being configured
5078 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5079 **/
5080 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5081 struct i40e_vsi_context *ctxt)
5082 {
5083 /* copy just the sections touched not the entire info
5084 * since not all sections are valid as returned by
5085 * update vsi params
5086 */
5087 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5088 memcpy(&vsi->info.queue_mapping,
5089 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5090 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5091 sizeof(vsi->info.tc_mapping));
5092 }
5093
5094 /**
5095 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5096 * @vsi: VSI to be configured
5097 * @enabled_tc: TC bitmap
5098 *
5099 * This configures a particular VSI for TCs that are mapped to the
5100 * given TC bitmap. It uses default bandwidth share for TCs across
5101 * VSIs to configure TC for a particular VSI.
5102 *
5103 * NOTE:
5104 * It is expected that the VSI queues have been quisced before calling
5105 * this function.
5106 **/
5107 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5108 {
5109 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5110 struct i40e_vsi_context ctxt;
5111 int ret = 0;
5112 int i;
5113
5114 /* Check if enabled_tc is same as existing or new TCs */
5115 if (vsi->tc_config.enabled_tc == enabled_tc)
5116 return ret;
5117
5118 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5119 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5120 if (enabled_tc & BIT(i))
5121 bw_share[i] = 1;
5122 }
5123
5124 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5125 if (ret) {
5126 dev_info(&vsi->back->pdev->dev,
5127 "Failed configuring TC map %d for VSI %d\n",
5128 enabled_tc, vsi->seid);
5129 goto out;
5130 }
5131
5132 /* Update Queue Pairs Mapping for currently enabled UPs */
5133 ctxt.seid = vsi->seid;
5134 ctxt.pf_num = vsi->back->hw.pf_id;
5135 ctxt.vf_num = 0;
5136 ctxt.uplink_seid = vsi->uplink_seid;
5137 ctxt.info = vsi->info;
5138 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5139
5140 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5141 ctxt.info.valid_sections |=
5142 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5143 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5144 }
5145
5146 /* Update the VSI after updating the VSI queue-mapping information */
5147 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
5148 if (ret) {
5149 dev_info(&vsi->back->pdev->dev,
5150 "Update vsi tc config failed, err %s aq_err %s\n",
5151 i40e_stat_str(&vsi->back->hw, ret),
5152 i40e_aq_str(&vsi->back->hw,
5153 vsi->back->hw.aq.asq_last_status));
5154 goto out;
5155 }
5156 /* update the local VSI info with updated queue map */
5157 i40e_vsi_update_queue_map(vsi, &ctxt);
5158 vsi->info.valid_sections = 0;
5159
5160 /* Update current VSI BW information */
5161 ret = i40e_vsi_get_bw_info(vsi);
5162 if (ret) {
5163 dev_info(&vsi->back->pdev->dev,
5164 "Failed updating vsi bw info, err %s aq_err %s\n",
5165 i40e_stat_str(&vsi->back->hw, ret),
5166 i40e_aq_str(&vsi->back->hw,
5167 vsi->back->hw.aq.asq_last_status));
5168 goto out;
5169 }
5170
5171 /* Update the netdev TC setup */
5172 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5173 out:
5174 return ret;
5175 }
5176
5177 /**
5178 * i40e_veb_config_tc - Configure TCs for given VEB
5179 * @veb: given VEB
5180 * @enabled_tc: TC bitmap
5181 *
5182 * Configures given TC bitmap for VEB (switching) element
5183 **/
5184 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
5185 {
5186 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
5187 struct i40e_pf *pf = veb->pf;
5188 int ret = 0;
5189 int i;
5190
5191 /* No TCs or already enabled TCs just return */
5192 if (!enabled_tc || veb->enabled_tc == enabled_tc)
5193 return ret;
5194
5195 bw_data.tc_valid_bits = enabled_tc;
5196 /* bw_data.absolute_credits is not set (relative) */
5197
5198 /* Enable ETS TCs with equal BW Share for now */
5199 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5200 if (enabled_tc & BIT(i))
5201 bw_data.tc_bw_share_credits[i] = 1;
5202 }
5203
5204 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
5205 &bw_data, NULL);
5206 if (ret) {
5207 dev_info(&pf->pdev->dev,
5208 "VEB bw config failed, err %s aq_err %s\n",
5209 i40e_stat_str(&pf->hw, ret),
5210 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5211 goto out;
5212 }
5213
5214 /* Update the BW information */
5215 ret = i40e_veb_get_bw_info(veb);
5216 if (ret) {
5217 dev_info(&pf->pdev->dev,
5218 "Failed getting veb bw config, err %s aq_err %s\n",
5219 i40e_stat_str(&pf->hw, ret),
5220 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5221 }
5222
5223 out:
5224 return ret;
5225 }
5226
5227 #ifdef CONFIG_I40E_DCB
5228 /**
5229 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
5230 * @pf: PF struct
5231 *
5232 * Reconfigure VEB/VSIs on a given PF; it is assumed that
5233 * the caller would've quiesce all the VSIs before calling
5234 * this function
5235 **/
5236 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
5237 {
5238 u8 tc_map = 0;
5239 int ret;
5240 u8 v;
5241
5242 /* Enable the TCs available on PF to all VEBs */
5243 tc_map = i40e_pf_get_tc_map(pf);
5244 for (v = 0; v < I40E_MAX_VEB; v++) {
5245 if (!pf->veb[v])
5246 continue;
5247 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
5248 if (ret) {
5249 dev_info(&pf->pdev->dev,
5250 "Failed configuring TC for VEB seid=%d\n",
5251 pf->veb[v]->seid);
5252 /* Will try to configure as many components */
5253 }
5254 }
5255
5256 /* Update each VSI */
5257 for (v = 0; v < pf->num_alloc_vsi; v++) {
5258 if (!pf->vsi[v])
5259 continue;
5260
5261 /* - Enable all TCs for the LAN VSI
5262 * - For all others keep them at TC0 for now
5263 */
5264 if (v == pf->lan_vsi)
5265 tc_map = i40e_pf_get_tc_map(pf);
5266 else
5267 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
5268
5269 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
5270 if (ret) {
5271 dev_info(&pf->pdev->dev,
5272 "Failed configuring TC for VSI seid=%d\n",
5273 pf->vsi[v]->seid);
5274 /* Will try to configure as many components */
5275 } else {
5276 /* Re-configure VSI vectors based on updated TC map */
5277 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
5278 if (pf->vsi[v]->netdev)
5279 i40e_dcbnl_set_all(pf->vsi[v]);
5280 }
5281 }
5282 }
5283
5284 /**
5285 * i40e_resume_port_tx - Resume port Tx
5286 * @pf: PF struct
5287 *
5288 * Resume a port's Tx and issue a PF reset in case of failure to
5289 * resume.
5290 **/
5291 static int i40e_resume_port_tx(struct i40e_pf *pf)
5292 {
5293 struct i40e_hw *hw = &pf->hw;
5294 int ret;
5295
5296 ret = i40e_aq_resume_port_tx(hw, NULL);
5297 if (ret) {
5298 dev_info(&pf->pdev->dev,
5299 "Resume Port Tx failed, err %s aq_err %s\n",
5300 i40e_stat_str(&pf->hw, ret),
5301 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5302 /* Schedule PF reset to recover */
5303 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
5304 i40e_service_event_schedule(pf);
5305 }
5306
5307 return ret;
5308 }
5309
5310 /**
5311 * i40e_init_pf_dcb - Initialize DCB configuration
5312 * @pf: PF being configured
5313 *
5314 * Query the current DCB configuration and cache it
5315 * in the hardware structure
5316 **/
5317 static int i40e_init_pf_dcb(struct i40e_pf *pf)
5318 {
5319 struct i40e_hw *hw = &pf->hw;
5320 int err = 0;
5321
5322 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
5323 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT)
5324 goto out;
5325
5326 /* Get the initial DCB configuration */
5327 err = i40e_init_dcb(hw);
5328 if (!err) {
5329 /* Device/Function is not DCBX capable */
5330 if ((!hw->func_caps.dcb) ||
5331 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
5332 dev_info(&pf->pdev->dev,
5333 "DCBX offload is not supported or is disabled for this PF.\n");
5334 } else {
5335 /* When status is not DISABLED then DCBX in FW */
5336 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5337 DCB_CAP_DCBX_VER_IEEE;
5338
5339 pf->flags |= I40E_FLAG_DCB_CAPABLE;
5340 /* Enable DCB tagging only when more than one TC
5341 * or explicitly disable if only one TC
5342 */
5343 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5344 pf->flags |= I40E_FLAG_DCB_ENABLED;
5345 else
5346 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5347 dev_dbg(&pf->pdev->dev,
5348 "DCBX offload is supported for this PF.\n");
5349 }
5350 } else {
5351 dev_info(&pf->pdev->dev,
5352 "Query for DCB configuration failed, err %s aq_err %s\n",
5353 i40e_stat_str(&pf->hw, err),
5354 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5355 }
5356
5357 out:
5358 return err;
5359 }
5360 #endif /* CONFIG_I40E_DCB */
5361 #define SPEED_SIZE 14
5362 #define FC_SIZE 8
5363 /**
5364 * i40e_print_link_message - print link up or down
5365 * @vsi: the VSI for which link needs a message
5366 */
5367 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
5368 {
5369 enum i40e_aq_link_speed new_speed;
5370 char *speed = "Unknown";
5371 char *fc = "Unknown";
5372 char *fec = "";
5373 char *req_fec = "";
5374 char *an = "";
5375
5376 new_speed = vsi->back->hw.phy.link_info.link_speed;
5377
5378 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
5379 return;
5380 vsi->current_isup = isup;
5381 vsi->current_speed = new_speed;
5382 if (!isup) {
5383 netdev_info(vsi->netdev, "NIC Link is Down\n");
5384 return;
5385 }
5386
5387 /* Warn user if link speed on NPAR enabled partition is not at
5388 * least 10GB
5389 */
5390 if (vsi->back->hw.func_caps.npar_enable &&
5391 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5392 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
5393 netdev_warn(vsi->netdev,
5394 "The partition detected link speed that is less than 10Gbps\n");
5395
5396 switch (vsi->back->hw.phy.link_info.link_speed) {
5397 case I40E_LINK_SPEED_40GB:
5398 speed = "40 G";
5399 break;
5400 case I40E_LINK_SPEED_20GB:
5401 speed = "20 G";
5402 break;
5403 case I40E_LINK_SPEED_25GB:
5404 speed = "25 G";
5405 break;
5406 case I40E_LINK_SPEED_10GB:
5407 speed = "10 G";
5408 break;
5409 case I40E_LINK_SPEED_1GB:
5410 speed = "1000 M";
5411 break;
5412 case I40E_LINK_SPEED_100MB:
5413 speed = "100 M";
5414 break;
5415 default:
5416 break;
5417 }
5418
5419 switch (vsi->back->hw.fc.current_mode) {
5420 case I40E_FC_FULL:
5421 fc = "RX/TX";
5422 break;
5423 case I40E_FC_TX_PAUSE:
5424 fc = "TX";
5425 break;
5426 case I40E_FC_RX_PAUSE:
5427 fc = "RX";
5428 break;
5429 default:
5430 fc = "None";
5431 break;
5432 }
5433
5434 if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
5435 req_fec = ", Requested FEC: None";
5436 fec = ", FEC: None";
5437 an = ", Autoneg: False";
5438
5439 if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
5440 an = ", Autoneg: True";
5441
5442 if (vsi->back->hw.phy.link_info.fec_info &
5443 I40E_AQ_CONFIG_FEC_KR_ENA)
5444 fec = ", FEC: CL74 FC-FEC/BASE-R";
5445 else if (vsi->back->hw.phy.link_info.fec_info &
5446 I40E_AQ_CONFIG_FEC_RS_ENA)
5447 fec = ", FEC: CL108 RS-FEC";
5448
5449 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
5450 * both RS and FC are requested
5451 */
5452 if (vsi->back->hw.phy.link_info.req_fec_info &
5453 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
5454 if (vsi->back->hw.phy.link_info.req_fec_info &
5455 I40E_AQ_REQUEST_FEC_RS)
5456 req_fec = ", Requested FEC: CL108 RS-FEC";
5457 else
5458 req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
5459 }
5460 }
5461
5462 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
5463 speed, req_fec, fec, an, fc);
5464 }
5465
5466 /**
5467 * i40e_up_complete - Finish the last steps of bringing up a connection
5468 * @vsi: the VSI being configured
5469 **/
5470 static int i40e_up_complete(struct i40e_vsi *vsi)
5471 {
5472 struct i40e_pf *pf = vsi->back;
5473 int err;
5474
5475 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5476 i40e_vsi_configure_msix(vsi);
5477 else
5478 i40e_configure_msi_and_legacy(vsi);
5479
5480 /* start rings */
5481 err = i40e_vsi_start_rings(vsi);
5482 if (err)
5483 return err;
5484
5485 clear_bit(__I40E_VSI_DOWN, vsi->state);
5486 i40e_napi_enable_all(vsi);
5487 i40e_vsi_enable_irq(vsi);
5488
5489 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5490 (vsi->netdev)) {
5491 i40e_print_link_message(vsi, true);
5492 netif_tx_start_all_queues(vsi->netdev);
5493 netif_carrier_on(vsi->netdev);
5494 } else if (vsi->netdev) {
5495 i40e_print_link_message(vsi, false);
5496 /* need to check for qualified module here*/
5497 if ((pf->hw.phy.link_info.link_info &
5498 I40E_AQ_MEDIA_AVAILABLE) &&
5499 (!(pf->hw.phy.link_info.an_info &
5500 I40E_AQ_QUALIFIED_MODULE)))
5501 netdev_err(vsi->netdev,
5502 "the driver failed to link because an unqualified module was detected.");
5503 }
5504
5505 /* replay FDIR SB filters */
5506 if (vsi->type == I40E_VSI_FDIR) {
5507 /* reset fd counters */
5508 pf->fd_add_err = 0;
5509 pf->fd_atr_cnt = 0;
5510 i40e_fdir_filter_restore(vsi);
5511 }
5512
5513 /* On the next run of the service_task, notify any clients of the new
5514 * opened netdev
5515 */
5516 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
5517 i40e_service_event_schedule(pf);
5518
5519 return 0;
5520 }
5521
5522 /**
5523 * i40e_vsi_reinit_locked - Reset the VSI
5524 * @vsi: the VSI being configured
5525 *
5526 * Rebuild the ring structs after some configuration
5527 * has changed, e.g. MTU size.
5528 **/
5529 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5530 {
5531 struct i40e_pf *pf = vsi->back;
5532
5533 WARN_ON(in_interrupt());
5534 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
5535 usleep_range(1000, 2000);
5536 i40e_down(vsi);
5537
5538 i40e_up(vsi);
5539 clear_bit(__I40E_CONFIG_BUSY, pf->state);
5540 }
5541
5542 /**
5543 * i40e_up - Bring the connection back up after being down
5544 * @vsi: the VSI being configured
5545 **/
5546 int i40e_up(struct i40e_vsi *vsi)
5547 {
5548 int err;
5549
5550 err = i40e_vsi_configure(vsi);
5551 if (!err)
5552 err = i40e_up_complete(vsi);
5553
5554 return err;
5555 }
5556
5557 /**
5558 * i40e_down - Shutdown the connection processing
5559 * @vsi: the VSI being stopped
5560 **/
5561 void i40e_down(struct i40e_vsi *vsi)
5562 {
5563 int i;
5564
5565 /* It is assumed that the caller of this function
5566 * sets the vsi->state __I40E_VSI_DOWN bit.
5567 */
5568 if (vsi->netdev) {
5569 netif_carrier_off(vsi->netdev);
5570 netif_tx_disable(vsi->netdev);
5571 }
5572 i40e_vsi_disable_irq(vsi);
5573 i40e_vsi_stop_rings(vsi);
5574 i40e_napi_disable_all(vsi);
5575
5576 for (i = 0; i < vsi->num_queue_pairs; i++) {
5577 i40e_clean_tx_ring(vsi->tx_rings[i]);
5578 if (i40e_enabled_xdp_vsi(vsi))
5579 i40e_clean_tx_ring(vsi->xdp_rings[i]);
5580 i40e_clean_rx_ring(vsi->rx_rings[i]);
5581 }
5582
5583 }
5584
5585 /**
5586 * i40e_setup_tc - configure multiple traffic classes
5587 * @netdev: net device to configure
5588 * @tc: number of traffic classes to enable
5589 **/
5590 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5591 {
5592 struct i40e_netdev_priv *np = netdev_priv(netdev);
5593 struct i40e_vsi *vsi = np->vsi;
5594 struct i40e_pf *pf = vsi->back;
5595 u8 enabled_tc = 0;
5596 int ret = -EINVAL;
5597 int i;
5598
5599 /* Check if DCB enabled to continue */
5600 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5601 netdev_info(netdev, "DCB is not enabled for adapter\n");
5602 goto exit;
5603 }
5604
5605 /* Check if MFP enabled */
5606 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5607 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5608 goto exit;
5609 }
5610
5611 /* Check whether tc count is within enabled limit */
5612 if (tc > i40e_pf_get_num_tc(pf)) {
5613 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5614 goto exit;
5615 }
5616
5617 /* Generate TC map for number of tc requested */
5618 for (i = 0; i < tc; i++)
5619 enabled_tc |= BIT(i);
5620
5621 /* Requesting same TC configuration as already enabled */
5622 if (enabled_tc == vsi->tc_config.enabled_tc)
5623 return 0;
5624
5625 /* Quiesce VSI queues */
5626 i40e_quiesce_vsi(vsi);
5627
5628 /* Configure VSI for enabled TCs */
5629 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5630 if (ret) {
5631 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5632 vsi->seid);
5633 goto exit;
5634 }
5635
5636 /* Unquiesce VSI */
5637 i40e_unquiesce_vsi(vsi);
5638
5639 exit:
5640 return ret;
5641 }
5642
5643 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
5644 void *type_data)
5645 {
5646 struct tc_mqprio_qopt *mqprio = type_data;
5647
5648 if (type != TC_SETUP_MQPRIO)
5649 return -EOPNOTSUPP;
5650
5651 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
5652
5653 return i40e_setup_tc(netdev, mqprio->num_tc);
5654 }
5655
5656 /**
5657 * i40e_open - Called when a network interface is made active
5658 * @netdev: network interface device structure
5659 *
5660 * The open entry point is called when a network interface is made
5661 * active by the system (IFF_UP). At this point all resources needed
5662 * for transmit and receive operations are allocated, the interrupt
5663 * handler is registered with the OS, the netdev watchdog subtask is
5664 * enabled, and the stack is notified that the interface is ready.
5665 *
5666 * Returns 0 on success, negative value on failure
5667 **/
5668 int i40e_open(struct net_device *netdev)
5669 {
5670 struct i40e_netdev_priv *np = netdev_priv(netdev);
5671 struct i40e_vsi *vsi = np->vsi;
5672 struct i40e_pf *pf = vsi->back;
5673 int err;
5674
5675 /* disallow open during test or if eeprom is broken */
5676 if (test_bit(__I40E_TESTING, pf->state) ||
5677 test_bit(__I40E_BAD_EEPROM, pf->state))
5678 return -EBUSY;
5679
5680 netif_carrier_off(netdev);
5681
5682 err = i40e_vsi_open(vsi);
5683 if (err)
5684 return err;
5685
5686 /* configure global TSO hardware offload settings */
5687 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5688 TCP_FLAG_FIN) >> 16);
5689 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5690 TCP_FLAG_FIN |
5691 TCP_FLAG_CWR) >> 16);
5692 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5693
5694 udp_tunnel_get_rx_info(netdev);
5695
5696 return 0;
5697 }
5698
5699 /**
5700 * i40e_vsi_open -
5701 * @vsi: the VSI to open
5702 *
5703 * Finish initialization of the VSI.
5704 *
5705 * Returns 0 on success, negative value on failure
5706 *
5707 * Note: expects to be called while under rtnl_lock()
5708 **/
5709 int i40e_vsi_open(struct i40e_vsi *vsi)
5710 {
5711 struct i40e_pf *pf = vsi->back;
5712 char int_name[I40E_INT_NAME_STR_LEN];
5713 int err;
5714
5715 /* allocate descriptors */
5716 err = i40e_vsi_setup_tx_resources(vsi);
5717 if (err)
5718 goto err_setup_tx;
5719 err = i40e_vsi_setup_rx_resources(vsi);
5720 if (err)
5721 goto err_setup_rx;
5722
5723 err = i40e_vsi_configure(vsi);
5724 if (err)
5725 goto err_setup_rx;
5726
5727 if (vsi->netdev) {
5728 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5729 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5730 err = i40e_vsi_request_irq(vsi, int_name);
5731 if (err)
5732 goto err_setup_rx;
5733
5734 /* Notify the stack of the actual queue counts. */
5735 err = netif_set_real_num_tx_queues(vsi->netdev,
5736 vsi->num_queue_pairs);
5737 if (err)
5738 goto err_set_queues;
5739
5740 err = netif_set_real_num_rx_queues(vsi->netdev,
5741 vsi->num_queue_pairs);
5742 if (err)
5743 goto err_set_queues;
5744
5745 } else if (vsi->type == I40E_VSI_FDIR) {
5746 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5747 dev_driver_string(&pf->pdev->dev),
5748 dev_name(&pf->pdev->dev));
5749 err = i40e_vsi_request_irq(vsi, int_name);
5750
5751 } else {
5752 err = -EINVAL;
5753 goto err_setup_rx;
5754 }
5755
5756 err = i40e_up_complete(vsi);
5757 if (err)
5758 goto err_up_complete;
5759
5760 return 0;
5761
5762 err_up_complete:
5763 i40e_down(vsi);
5764 err_set_queues:
5765 i40e_vsi_free_irq(vsi);
5766 err_setup_rx:
5767 i40e_vsi_free_rx_resources(vsi);
5768 err_setup_tx:
5769 i40e_vsi_free_tx_resources(vsi);
5770 if (vsi == pf->vsi[pf->lan_vsi])
5771 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
5772
5773 return err;
5774 }
5775
5776 /**
5777 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5778 * @pf: Pointer to PF
5779 *
5780 * This function destroys the hlist where all the Flow Director
5781 * filters were saved.
5782 **/
5783 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5784 {
5785 struct i40e_fdir_filter *filter;
5786 struct i40e_flex_pit *pit_entry, *tmp;
5787 struct hlist_node *node2;
5788
5789 hlist_for_each_entry_safe(filter, node2,
5790 &pf->fdir_filter_list, fdir_node) {
5791 hlist_del(&filter->fdir_node);
5792 kfree(filter);
5793 }
5794
5795 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
5796 list_del(&pit_entry->list);
5797 kfree(pit_entry);
5798 }
5799 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
5800
5801 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
5802 list_del(&pit_entry->list);
5803 kfree(pit_entry);
5804 }
5805 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
5806
5807 pf->fdir_pf_active_filters = 0;
5808 pf->fd_tcp4_filter_cnt = 0;
5809 pf->fd_udp4_filter_cnt = 0;
5810 pf->fd_sctp4_filter_cnt = 0;
5811 pf->fd_ip4_filter_cnt = 0;
5812
5813 /* Reprogram the default input set for TCP/IPv4 */
5814 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
5815 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5816 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5817
5818 /* Reprogram the default input set for UDP/IPv4 */
5819 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
5820 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5821 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5822
5823 /* Reprogram the default input set for SCTP/IPv4 */
5824 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
5825 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5826 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5827
5828 /* Reprogram the default input set for Other/IPv4 */
5829 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
5830 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
5831 }
5832
5833 /**
5834 * i40e_close - Disables a network interface
5835 * @netdev: network interface device structure
5836 *
5837 * The close entry point is called when an interface is de-activated
5838 * by the OS. The hardware is still under the driver's control, but
5839 * this netdev interface is disabled.
5840 *
5841 * Returns 0, this is not allowed to fail
5842 **/
5843 int i40e_close(struct net_device *netdev)
5844 {
5845 struct i40e_netdev_priv *np = netdev_priv(netdev);
5846 struct i40e_vsi *vsi = np->vsi;
5847
5848 i40e_vsi_close(vsi);
5849
5850 return 0;
5851 }
5852
5853 /**
5854 * i40e_do_reset - Start a PF or Core Reset sequence
5855 * @pf: board private structure
5856 * @reset_flags: which reset is requested
5857 * @lock_acquired: indicates whether or not the lock has been acquired
5858 * before this function was called.
5859 *
5860 * The essential difference in resets is that the PF Reset
5861 * doesn't clear the packet buffers, doesn't reset the PE
5862 * firmware, and doesn't bother the other PFs on the chip.
5863 **/
5864 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
5865 {
5866 u32 val;
5867
5868 WARN_ON(in_interrupt());
5869
5870
5871 /* do the biggest reset indicated */
5872 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5873
5874 /* Request a Global Reset
5875 *
5876 * This will start the chip's countdown to the actual full
5877 * chip reset event, and a warning interrupt to be sent
5878 * to all PFs, including the requestor. Our handler
5879 * for the warning interrupt will deal with the shutdown
5880 * and recovery of the switch setup.
5881 */
5882 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5883 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5884 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5885 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5886
5887 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5888
5889 /* Request a Core Reset
5890 *
5891 * Same as Global Reset, except does *not* include the MAC/PHY
5892 */
5893 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5894 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5895 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5896 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5897 i40e_flush(&pf->hw);
5898
5899 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5900
5901 /* Request a PF Reset
5902 *
5903 * Resets only the PF-specific registers
5904 *
5905 * This goes directly to the tear-down and rebuild of
5906 * the switch, since we need to do all the recovery as
5907 * for the Core Reset.
5908 */
5909 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5910 i40e_handle_reset_warning(pf, lock_acquired);
5911
5912 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5913 int v;
5914
5915 /* Find the VSI(s) that requested a re-init */
5916 dev_info(&pf->pdev->dev,
5917 "VSI reinit requested\n");
5918 for (v = 0; v < pf->num_alloc_vsi; v++) {
5919 struct i40e_vsi *vsi = pf->vsi[v];
5920
5921 if (vsi != NULL &&
5922 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
5923 vsi->state))
5924 i40e_vsi_reinit_locked(pf->vsi[v]);
5925 }
5926 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5927 int v;
5928
5929 /* Find the VSI(s) that needs to be brought down */
5930 dev_info(&pf->pdev->dev, "VSI down requested\n");
5931 for (v = 0; v < pf->num_alloc_vsi; v++) {
5932 struct i40e_vsi *vsi = pf->vsi[v];
5933
5934 if (vsi != NULL &&
5935 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
5936 vsi->state)) {
5937 set_bit(__I40E_VSI_DOWN, vsi->state);
5938 i40e_down(vsi);
5939 }
5940 }
5941 } else {
5942 dev_info(&pf->pdev->dev,
5943 "bad reset request 0x%08x\n", reset_flags);
5944 }
5945 }
5946
5947 #ifdef CONFIG_I40E_DCB
5948 /**
5949 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5950 * @pf: board private structure
5951 * @old_cfg: current DCB config
5952 * @new_cfg: new DCB config
5953 **/
5954 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5955 struct i40e_dcbx_config *old_cfg,
5956 struct i40e_dcbx_config *new_cfg)
5957 {
5958 bool need_reconfig = false;
5959
5960 /* Check if ETS configuration has changed */
5961 if (memcmp(&new_cfg->etscfg,
5962 &old_cfg->etscfg,
5963 sizeof(new_cfg->etscfg))) {
5964 /* If Priority Table has changed reconfig is needed */
5965 if (memcmp(&new_cfg->etscfg.prioritytable,
5966 &old_cfg->etscfg.prioritytable,
5967 sizeof(new_cfg->etscfg.prioritytable))) {
5968 need_reconfig = true;
5969 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5970 }
5971
5972 if (memcmp(&new_cfg->etscfg.tcbwtable,
5973 &old_cfg->etscfg.tcbwtable,
5974 sizeof(new_cfg->etscfg.tcbwtable)))
5975 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5976
5977 if (memcmp(&new_cfg->etscfg.tsatable,
5978 &old_cfg->etscfg.tsatable,
5979 sizeof(new_cfg->etscfg.tsatable)))
5980 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5981 }
5982
5983 /* Check if PFC configuration has changed */
5984 if (memcmp(&new_cfg->pfc,
5985 &old_cfg->pfc,
5986 sizeof(new_cfg->pfc))) {
5987 need_reconfig = true;
5988 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5989 }
5990
5991 /* Check if APP Table has changed */
5992 if (memcmp(&new_cfg->app,
5993 &old_cfg->app,
5994 sizeof(new_cfg->app))) {
5995 need_reconfig = true;
5996 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5997 }
5998
5999 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
6000 return need_reconfig;
6001 }
6002
6003 /**
6004 * i40e_handle_lldp_event - Handle LLDP Change MIB event
6005 * @pf: board private structure
6006 * @e: event info posted on ARQ
6007 **/
6008 static int i40e_handle_lldp_event(struct i40e_pf *pf,
6009 struct i40e_arq_event_info *e)
6010 {
6011 struct i40e_aqc_lldp_get_mib *mib =
6012 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
6013 struct i40e_hw *hw = &pf->hw;
6014 struct i40e_dcbx_config tmp_dcbx_cfg;
6015 bool need_reconfig = false;
6016 int ret = 0;
6017 u8 type;
6018
6019 /* Not DCB capable or capability disabled */
6020 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
6021 return ret;
6022
6023 /* Ignore if event is not for Nearest Bridge */
6024 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
6025 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
6026 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
6027 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
6028 return ret;
6029
6030 /* Check MIB Type and return if event for Remote MIB update */
6031 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
6032 dev_dbg(&pf->pdev->dev,
6033 "LLDP event mib type %s\n", type ? "remote" : "local");
6034 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
6035 /* Update the remote cached instance and return */
6036 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
6037 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
6038 &hw->remote_dcbx_config);
6039 goto exit;
6040 }
6041
6042 /* Store the old configuration */
6043 tmp_dcbx_cfg = hw->local_dcbx_config;
6044
6045 /* Reset the old DCBx configuration data */
6046 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
6047 /* Get updated DCBX data from firmware */
6048 ret = i40e_get_dcb_config(&pf->hw);
6049 if (ret) {
6050 dev_info(&pf->pdev->dev,
6051 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
6052 i40e_stat_str(&pf->hw, ret),
6053 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6054 goto exit;
6055 }
6056
6057 /* No change detected in DCBX configs */
6058 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
6059 sizeof(tmp_dcbx_cfg))) {
6060 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
6061 goto exit;
6062 }
6063
6064 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
6065 &hw->local_dcbx_config);
6066
6067 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
6068
6069 if (!need_reconfig)
6070 goto exit;
6071
6072 /* Enable DCB tagging only when more than one TC */
6073 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6074 pf->flags |= I40E_FLAG_DCB_ENABLED;
6075 else
6076 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6077
6078 set_bit(__I40E_PORT_SUSPENDED, pf->state);
6079 /* Reconfiguration needed quiesce all VSIs */
6080 i40e_pf_quiesce_all_vsi(pf);
6081
6082 /* Changes in configuration update VEB/VSI */
6083 i40e_dcb_reconfigure(pf);
6084
6085 ret = i40e_resume_port_tx(pf);
6086
6087 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
6088 /* In case of error no point in resuming VSIs */
6089 if (ret)
6090 goto exit;
6091
6092 /* Wait for the PF's queues to be disabled */
6093 ret = i40e_pf_wait_queues_disabled(pf);
6094 if (ret) {
6095 /* Schedule PF reset to recover */
6096 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6097 i40e_service_event_schedule(pf);
6098 } else {
6099 i40e_pf_unquiesce_all_vsi(pf);
6100 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
6101 I40E_FLAG_CLIENT_L2_CHANGE);
6102 }
6103
6104 exit:
6105 return ret;
6106 }
6107 #endif /* CONFIG_I40E_DCB */
6108
6109 /**
6110 * i40e_do_reset_safe - Protected reset path for userland calls.
6111 * @pf: board private structure
6112 * @reset_flags: which reset is requested
6113 *
6114 **/
6115 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
6116 {
6117 rtnl_lock();
6118 i40e_do_reset(pf, reset_flags, true);
6119 rtnl_unlock();
6120 }
6121
6122 /**
6123 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
6124 * @pf: board private structure
6125 * @e: event info posted on ARQ
6126 *
6127 * Handler for LAN Queue Overflow Event generated by the firmware for PF
6128 * and VF queues
6129 **/
6130 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
6131 struct i40e_arq_event_info *e)
6132 {
6133 struct i40e_aqc_lan_overflow *data =
6134 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
6135 u32 queue = le32_to_cpu(data->prtdcb_rupto);
6136 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
6137 struct i40e_hw *hw = &pf->hw;
6138 struct i40e_vf *vf;
6139 u16 vf_id;
6140
6141 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
6142 queue, qtx_ctl);
6143
6144 /* Queue belongs to VF, find the VF and issue VF reset */
6145 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
6146 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
6147 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
6148 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
6149 vf_id -= hw->func_caps.vf_base_id;
6150 vf = &pf->vf[vf_id];
6151 i40e_vc_notify_vf_reset(vf);
6152 /* Allow VF to process pending reset notification */
6153 msleep(20);
6154 i40e_reset_vf(vf, false);
6155 }
6156 }
6157
6158 /**
6159 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
6160 * @pf: board private structure
6161 **/
6162 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
6163 {
6164 u32 val, fcnt_prog;
6165
6166 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
6167 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
6168 return fcnt_prog;
6169 }
6170
6171 /**
6172 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
6173 * @pf: board private structure
6174 **/
6175 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
6176 {
6177 u32 val, fcnt_prog;
6178
6179 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
6180 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
6181 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
6182 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
6183 return fcnt_prog;
6184 }
6185
6186 /**
6187 * i40e_get_global_fd_count - Get total FD filters programmed on device
6188 * @pf: board private structure
6189 **/
6190 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
6191 {
6192 u32 val, fcnt_prog;
6193
6194 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
6195 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
6196 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
6197 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
6198 return fcnt_prog;
6199 }
6200
6201 /**
6202 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
6203 * @pf: board private structure
6204 **/
6205 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
6206 {
6207 struct i40e_fdir_filter *filter;
6208 u32 fcnt_prog, fcnt_avail;
6209 struct hlist_node *node;
6210
6211 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
6212 return;
6213
6214 /* Check if we have enough room to re-enable FDir SB capability. */
6215 fcnt_prog = i40e_get_global_fd_count(pf);
6216 fcnt_avail = pf->fdir_pf_filter_count;
6217 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
6218 (pf->fd_add_err == 0) ||
6219 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
6220 if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
6221 pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
6222 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
6223 (I40E_DEBUG_FD & pf->hw.debug_mask))
6224 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
6225 }
6226 }
6227
6228 /* We should wait for even more space before re-enabling ATR.
6229 * Additionally, we cannot enable ATR as long as we still have TCP SB
6230 * rules active.
6231 */
6232 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
6233 (pf->fd_tcp4_filter_cnt == 0)) {
6234 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
6235 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
6236 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
6237 (I40E_DEBUG_FD & pf->hw.debug_mask))
6238 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
6239 }
6240 }
6241
6242 /* if hw had a problem adding a filter, delete it */
6243 if (pf->fd_inv > 0) {
6244 hlist_for_each_entry_safe(filter, node,
6245 &pf->fdir_filter_list, fdir_node) {
6246 if (filter->fd_id == pf->fd_inv) {
6247 hlist_del(&filter->fdir_node);
6248 kfree(filter);
6249 pf->fdir_pf_active_filters--;
6250 }
6251 }
6252 }
6253 }
6254
6255 #define I40E_MIN_FD_FLUSH_INTERVAL 10
6256 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
6257 /**
6258 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
6259 * @pf: board private structure
6260 **/
6261 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
6262 {
6263 unsigned long min_flush_time;
6264 int flush_wait_retry = 50;
6265 bool disable_atr = false;
6266 int fd_room;
6267 int reg;
6268
6269 if (!time_after(jiffies, pf->fd_flush_timestamp +
6270 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
6271 return;
6272
6273 /* If the flush is happening too quick and we have mostly SB rules we
6274 * should not re-enable ATR for some time.
6275 */
6276 min_flush_time = pf->fd_flush_timestamp +
6277 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
6278 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
6279
6280 if (!(time_after(jiffies, min_flush_time)) &&
6281 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
6282 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6283 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
6284 disable_atr = true;
6285 }
6286
6287 pf->fd_flush_timestamp = jiffies;
6288 pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
6289 /* flush all filters */
6290 wr32(&pf->hw, I40E_PFQF_CTL_1,
6291 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
6292 i40e_flush(&pf->hw);
6293 pf->fd_flush_cnt++;
6294 pf->fd_add_err = 0;
6295 do {
6296 /* Check FD flush status every 5-6msec */
6297 usleep_range(5000, 6000);
6298 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
6299 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
6300 break;
6301 } while (flush_wait_retry--);
6302 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
6303 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
6304 } else {
6305 /* replay sideband filters */
6306 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
6307 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
6308 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
6309 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
6310 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6311 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
6312 }
6313 }
6314
6315 /**
6316 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
6317 * @pf: board private structure
6318 **/
6319 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
6320 {
6321 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
6322 }
6323
6324 /* We can see up to 256 filter programming desc in transit if the filters are
6325 * being applied really fast; before we see the first
6326 * filter miss error on Rx queue 0. Accumulating enough error messages before
6327 * reacting will make sure we don't cause flush too often.
6328 */
6329 #define I40E_MAX_FD_PROGRAM_ERROR 256
6330
6331 /**
6332 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
6333 * @pf: board private structure
6334 **/
6335 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
6336 {
6337
6338 /* if interface is down do nothing */
6339 if (test_bit(__I40E_DOWN, pf->state))
6340 return;
6341
6342 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
6343 i40e_fdir_flush_and_replay(pf);
6344
6345 i40e_fdir_check_and_reenable(pf);
6346
6347 }
6348
6349 /**
6350 * i40e_vsi_link_event - notify VSI of a link event
6351 * @vsi: vsi to be notified
6352 * @link_up: link up or down
6353 **/
6354 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
6355 {
6356 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
6357 return;
6358
6359 switch (vsi->type) {
6360 case I40E_VSI_MAIN:
6361 if (!vsi->netdev || !vsi->netdev_registered)
6362 break;
6363
6364 if (link_up) {
6365 netif_carrier_on(vsi->netdev);
6366 netif_tx_wake_all_queues(vsi->netdev);
6367 } else {
6368 netif_carrier_off(vsi->netdev);
6369 netif_tx_stop_all_queues(vsi->netdev);
6370 }
6371 break;
6372
6373 case I40E_VSI_SRIOV:
6374 case I40E_VSI_VMDQ2:
6375 case I40E_VSI_CTRL:
6376 case I40E_VSI_IWARP:
6377 case I40E_VSI_MIRROR:
6378 default:
6379 /* there is no notification for other VSIs */
6380 break;
6381 }
6382 }
6383
6384 /**
6385 * i40e_veb_link_event - notify elements on the veb of a link event
6386 * @veb: veb to be notified
6387 * @link_up: link up or down
6388 **/
6389 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6390 {
6391 struct i40e_pf *pf;
6392 int i;
6393
6394 if (!veb || !veb->pf)
6395 return;
6396 pf = veb->pf;
6397
6398 /* depth first... */
6399 for (i = 0; i < I40E_MAX_VEB; i++)
6400 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6401 i40e_veb_link_event(pf->veb[i], link_up);
6402
6403 /* ... now the local VSIs */
6404 for (i = 0; i < pf->num_alloc_vsi; i++)
6405 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6406 i40e_vsi_link_event(pf->vsi[i], link_up);
6407 }
6408
6409 /**
6410 * i40e_link_event - Update netif_carrier status
6411 * @pf: board private structure
6412 **/
6413 static void i40e_link_event(struct i40e_pf *pf)
6414 {
6415 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6416 u8 new_link_speed, old_link_speed;
6417 i40e_status status;
6418 bool new_link, old_link;
6419
6420 /* save off old link status information */
6421 pf->hw.phy.link_info_old = pf->hw.phy.link_info;
6422
6423 /* set this to force the get_link_status call to refresh state */
6424 pf->hw.phy.get_link_info = true;
6425
6426 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
6427
6428 status = i40e_get_link_status(&pf->hw, &new_link);
6429
6430 /* On success, disable temp link polling */
6431 if (status == I40E_SUCCESS) {
6432 if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
6433 pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
6434 } else {
6435 /* Enable link polling temporarily until i40e_get_link_status
6436 * returns I40E_SUCCESS
6437 */
6438 pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
6439 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6440 status);
6441 return;
6442 }
6443
6444 old_link_speed = pf->hw.phy.link_info_old.link_speed;
6445 new_link_speed = pf->hw.phy.link_info.link_speed;
6446
6447 if (new_link == old_link &&
6448 new_link_speed == old_link_speed &&
6449 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
6450 new_link == netif_carrier_ok(vsi->netdev)))
6451 return;
6452
6453 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
6454 i40e_print_link_message(vsi, new_link);
6455
6456 /* Notify the base of the switch tree connected to
6457 * the link. Floating VEBs are not notified.
6458 */
6459 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6460 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6461 else
6462 i40e_vsi_link_event(vsi, new_link);
6463
6464 if (pf->vf)
6465 i40e_vc_notify_link_state(pf);
6466
6467 if (pf->flags & I40E_FLAG_PTP)
6468 i40e_ptp_set_increment(pf);
6469 }
6470
6471 /**
6472 * i40e_watchdog_subtask - periodic checks not using event driven response
6473 * @pf: board private structure
6474 **/
6475 static void i40e_watchdog_subtask(struct i40e_pf *pf)
6476 {
6477 int i;
6478
6479 /* if interface is down do nothing */
6480 if (test_bit(__I40E_DOWN, pf->state) ||
6481 test_bit(__I40E_CONFIG_BUSY, pf->state))
6482 return;
6483
6484 /* make sure we don't do these things too often */
6485 if (time_before(jiffies, (pf->service_timer_previous +
6486 pf->service_timer_period)))
6487 return;
6488 pf->service_timer_previous = jiffies;
6489
6490 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
6491 (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
6492 i40e_link_event(pf);
6493
6494 /* Update the stats for active netdevs so the network stack
6495 * can look at updated numbers whenever it cares to
6496 */
6497 for (i = 0; i < pf->num_alloc_vsi; i++)
6498 if (pf->vsi[i] && pf->vsi[i]->netdev)
6499 i40e_update_stats(pf->vsi[i]);
6500
6501 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6502 /* Update the stats for the active switching components */
6503 for (i = 0; i < I40E_MAX_VEB; i++)
6504 if (pf->veb[i])
6505 i40e_update_veb_stats(pf->veb[i]);
6506 }
6507
6508 i40e_ptp_rx_hang(pf);
6509 i40e_ptp_tx_hang(pf);
6510 }
6511
6512 /**
6513 * i40e_reset_subtask - Set up for resetting the device and driver
6514 * @pf: board private structure
6515 **/
6516 static void i40e_reset_subtask(struct i40e_pf *pf)
6517 {
6518 u32 reset_flags = 0;
6519
6520 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
6521 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
6522 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
6523 }
6524 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
6525 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
6526 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6527 }
6528 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
6529 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
6530 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
6531 }
6532 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
6533 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
6534 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
6535 }
6536 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
6537 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6538 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
6539 }
6540
6541 /* If there's a recovery already waiting, it takes
6542 * precedence before starting a new reset sequence.
6543 */
6544 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
6545 i40e_prep_for_reset(pf, false);
6546 i40e_reset(pf);
6547 i40e_rebuild(pf, false, false);
6548 }
6549
6550 /* If we're already down or resetting, just bail */
6551 if (reset_flags &&
6552 !test_bit(__I40E_DOWN, pf->state) &&
6553 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
6554 i40e_do_reset(pf, reset_flags, false);
6555 }
6556 }
6557
6558 /**
6559 * i40e_handle_link_event - Handle link event
6560 * @pf: board private structure
6561 * @e: event info posted on ARQ
6562 **/
6563 static void i40e_handle_link_event(struct i40e_pf *pf,
6564 struct i40e_arq_event_info *e)
6565 {
6566 struct i40e_aqc_get_link_status *status =
6567 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
6568
6569 /* Do a new status request to re-enable LSE reporting
6570 * and load new status information into the hw struct
6571 * This completely ignores any state information
6572 * in the ARQ event info, instead choosing to always
6573 * issue the AQ update link status command.
6574 */
6575 i40e_link_event(pf);
6576
6577 /* check for unqualified module, if link is down */
6578 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6579 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6580 (!(status->link_info & I40E_AQ_LINK_UP)))
6581 dev_err(&pf->pdev->dev,
6582 "The driver failed to link because an unqualified module was detected.\n");
6583 }
6584
6585 /**
6586 * i40e_clean_adminq_subtask - Clean the AdminQ rings
6587 * @pf: board private structure
6588 **/
6589 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6590 {
6591 struct i40e_arq_event_info event;
6592 struct i40e_hw *hw = &pf->hw;
6593 u16 pending, i = 0;
6594 i40e_status ret;
6595 u16 opcode;
6596 u32 oldval;
6597 u32 val;
6598
6599 /* Do not run clean AQ when PF reset fails */
6600 if (test_bit(__I40E_RESET_FAILED, pf->state))
6601 return;
6602
6603 /* check for error indications */
6604 val = rd32(&pf->hw, pf->hw.aq.arq.len);
6605 oldval = val;
6606 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
6607 if (hw->debug_mask & I40E_DEBUG_AQ)
6608 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
6609 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6610 }
6611 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
6612 if (hw->debug_mask & I40E_DEBUG_AQ)
6613 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
6614 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
6615 pf->arq_overflows++;
6616 }
6617 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
6618 if (hw->debug_mask & I40E_DEBUG_AQ)
6619 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
6620 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6621 }
6622 if (oldval != val)
6623 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6624
6625 val = rd32(&pf->hw, pf->hw.aq.asq.len);
6626 oldval = val;
6627 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
6628 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6629 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
6630 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6631 }
6632 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
6633 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6634 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
6635 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6636 }
6637 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
6638 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6639 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
6640 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6641 }
6642 if (oldval != val)
6643 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6644
6645 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6646 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
6647 if (!event.msg_buf)
6648 return;
6649
6650 do {
6651 ret = i40e_clean_arq_element(hw, &event, &pending);
6652 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6653 break;
6654 else if (ret) {
6655 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6656 break;
6657 }
6658
6659 opcode = le16_to_cpu(event.desc.opcode);
6660 switch (opcode) {
6661
6662 case i40e_aqc_opc_get_link_status:
6663 i40e_handle_link_event(pf, &event);
6664 break;
6665 case i40e_aqc_opc_send_msg_to_pf:
6666 ret = i40e_vc_process_vf_msg(pf,
6667 le16_to_cpu(event.desc.retval),
6668 le32_to_cpu(event.desc.cookie_high),
6669 le32_to_cpu(event.desc.cookie_low),
6670 event.msg_buf,
6671 event.msg_len);
6672 break;
6673 case i40e_aqc_opc_lldp_update_mib:
6674 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6675 #ifdef CONFIG_I40E_DCB
6676 rtnl_lock();
6677 ret = i40e_handle_lldp_event(pf, &event);
6678 rtnl_unlock();
6679 #endif /* CONFIG_I40E_DCB */
6680 break;
6681 case i40e_aqc_opc_event_lan_overflow:
6682 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6683 i40e_handle_lan_overflow_event(pf, &event);
6684 break;
6685 case i40e_aqc_opc_send_msg_to_peer:
6686 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6687 break;
6688 case i40e_aqc_opc_nvm_erase:
6689 case i40e_aqc_opc_nvm_update:
6690 case i40e_aqc_opc_oem_post_update:
6691 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
6692 "ARQ NVM operation 0x%04x completed\n",
6693 opcode);
6694 break;
6695 default:
6696 dev_info(&pf->pdev->dev,
6697 "ARQ: Unknown event 0x%04x ignored\n",
6698 opcode);
6699 break;
6700 }
6701 } while (i++ < pf->adminq_work_limit);
6702
6703 if (i < pf->adminq_work_limit)
6704 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
6705
6706 /* re-enable Admin queue interrupt cause */
6707 val = rd32(hw, I40E_PFINT_ICR0_ENA);
6708 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6709 wr32(hw, I40E_PFINT_ICR0_ENA, val);
6710 i40e_flush(hw);
6711
6712 kfree(event.msg_buf);
6713 }
6714
6715 /**
6716 * i40e_verify_eeprom - make sure eeprom is good to use
6717 * @pf: board private structure
6718 **/
6719 static void i40e_verify_eeprom(struct i40e_pf *pf)
6720 {
6721 int err;
6722
6723 err = i40e_diag_eeprom_test(&pf->hw);
6724 if (err) {
6725 /* retry in case of garbage read */
6726 err = i40e_diag_eeprom_test(&pf->hw);
6727 if (err) {
6728 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6729 err);
6730 set_bit(__I40E_BAD_EEPROM, pf->state);
6731 }
6732 }
6733
6734 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
6735 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6736 clear_bit(__I40E_BAD_EEPROM, pf->state);
6737 }
6738 }
6739
6740 /**
6741 * i40e_enable_pf_switch_lb
6742 * @pf: pointer to the PF structure
6743 *
6744 * enable switch loop back or die - no point in a return value
6745 **/
6746 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6747 {
6748 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6749 struct i40e_vsi_context ctxt;
6750 int ret;
6751
6752 ctxt.seid = pf->main_vsi_seid;
6753 ctxt.pf_num = pf->hw.pf_id;
6754 ctxt.vf_num = 0;
6755 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6756 if (ret) {
6757 dev_info(&pf->pdev->dev,
6758 "couldn't get PF vsi config, err %s aq_err %s\n",
6759 i40e_stat_str(&pf->hw, ret),
6760 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6761 return;
6762 }
6763 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6764 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6765 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6766
6767 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6768 if (ret) {
6769 dev_info(&pf->pdev->dev,
6770 "update vsi switch failed, err %s aq_err %s\n",
6771 i40e_stat_str(&pf->hw, ret),
6772 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6773 }
6774 }
6775
6776 /**
6777 * i40e_disable_pf_switch_lb
6778 * @pf: pointer to the PF structure
6779 *
6780 * disable switch loop back or die - no point in a return value
6781 **/
6782 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6783 {
6784 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6785 struct i40e_vsi_context ctxt;
6786 int ret;
6787
6788 ctxt.seid = pf->main_vsi_seid;
6789 ctxt.pf_num = pf->hw.pf_id;
6790 ctxt.vf_num = 0;
6791 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6792 if (ret) {
6793 dev_info(&pf->pdev->dev,
6794 "couldn't get PF vsi config, err %s aq_err %s\n",
6795 i40e_stat_str(&pf->hw, ret),
6796 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6797 return;
6798 }
6799 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6800 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6801 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6802
6803 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6804 if (ret) {
6805 dev_info(&pf->pdev->dev,
6806 "update vsi switch failed, err %s aq_err %s\n",
6807 i40e_stat_str(&pf->hw, ret),
6808 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6809 }
6810 }
6811
6812 /**
6813 * i40e_config_bridge_mode - Configure the HW bridge mode
6814 * @veb: pointer to the bridge instance
6815 *
6816 * Configure the loop back mode for the LAN VSI that is downlink to the
6817 * specified HW bridge instance. It is expected this function is called
6818 * when a new HW bridge is instantiated.
6819 **/
6820 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6821 {
6822 struct i40e_pf *pf = veb->pf;
6823
6824 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6825 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6826 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6827 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6828 i40e_disable_pf_switch_lb(pf);
6829 else
6830 i40e_enable_pf_switch_lb(pf);
6831 }
6832
6833 /**
6834 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6835 * @veb: pointer to the VEB instance
6836 *
6837 * This is a recursive function that first builds the attached VSIs then
6838 * recurses in to build the next layer of VEB. We track the connections
6839 * through our own index numbers because the seid's from the HW could
6840 * change across the reset.
6841 **/
6842 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6843 {
6844 struct i40e_vsi *ctl_vsi = NULL;
6845 struct i40e_pf *pf = veb->pf;
6846 int v, veb_idx;
6847 int ret;
6848
6849 /* build VSI that owns this VEB, temporarily attached to base VEB */
6850 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6851 if (pf->vsi[v] &&
6852 pf->vsi[v]->veb_idx == veb->idx &&
6853 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6854 ctl_vsi = pf->vsi[v];
6855 break;
6856 }
6857 }
6858 if (!ctl_vsi) {
6859 dev_info(&pf->pdev->dev,
6860 "missing owner VSI for veb_idx %d\n", veb->idx);
6861 ret = -ENOENT;
6862 goto end_reconstitute;
6863 }
6864 if (ctl_vsi != pf->vsi[pf->lan_vsi])
6865 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6866 ret = i40e_add_vsi(ctl_vsi);
6867 if (ret) {
6868 dev_info(&pf->pdev->dev,
6869 "rebuild of veb_idx %d owner VSI failed: %d\n",
6870 veb->idx, ret);
6871 goto end_reconstitute;
6872 }
6873 i40e_vsi_reset_stats(ctl_vsi);
6874
6875 /* create the VEB in the switch and move the VSI onto the VEB */
6876 ret = i40e_add_veb(veb, ctl_vsi);
6877 if (ret)
6878 goto end_reconstitute;
6879
6880 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6881 veb->bridge_mode = BRIDGE_MODE_VEB;
6882 else
6883 veb->bridge_mode = BRIDGE_MODE_VEPA;
6884 i40e_config_bridge_mode(veb);
6885
6886 /* create the remaining VSIs attached to this VEB */
6887 for (v = 0; v < pf->num_alloc_vsi; v++) {
6888 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6889 continue;
6890
6891 if (pf->vsi[v]->veb_idx == veb->idx) {
6892 struct i40e_vsi *vsi = pf->vsi[v];
6893
6894 vsi->uplink_seid = veb->seid;
6895 ret = i40e_add_vsi(vsi);
6896 if (ret) {
6897 dev_info(&pf->pdev->dev,
6898 "rebuild of vsi_idx %d failed: %d\n",
6899 v, ret);
6900 goto end_reconstitute;
6901 }
6902 i40e_vsi_reset_stats(vsi);
6903 }
6904 }
6905
6906 /* create any VEBs attached to this VEB - RECURSION */
6907 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6908 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6909 pf->veb[veb_idx]->uplink_seid = veb->seid;
6910 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6911 if (ret)
6912 break;
6913 }
6914 }
6915
6916 end_reconstitute:
6917 return ret;
6918 }
6919
6920 /**
6921 * i40e_get_capabilities - get info about the HW
6922 * @pf: the PF struct
6923 **/
6924 static int i40e_get_capabilities(struct i40e_pf *pf)
6925 {
6926 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6927 u16 data_size;
6928 int buf_len;
6929 int err;
6930
6931 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6932 do {
6933 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6934 if (!cap_buf)
6935 return -ENOMEM;
6936
6937 /* this loads the data into the hw struct for us */
6938 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6939 &data_size,
6940 i40e_aqc_opc_list_func_capabilities,
6941 NULL);
6942 /* data loaded, buffer no longer needed */
6943 kfree(cap_buf);
6944
6945 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6946 /* retry with a larger buffer */
6947 buf_len = data_size;
6948 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6949 dev_info(&pf->pdev->dev,
6950 "capability discovery failed, err %s aq_err %s\n",
6951 i40e_stat_str(&pf->hw, err),
6952 i40e_aq_str(&pf->hw,
6953 pf->hw.aq.asq_last_status));
6954 return -ENODEV;
6955 }
6956 } while (err);
6957
6958 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6959 dev_info(&pf->pdev->dev,
6960 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6961 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6962 pf->hw.func_caps.num_msix_vectors,
6963 pf->hw.func_caps.num_msix_vectors_vf,
6964 pf->hw.func_caps.fd_filters_guaranteed,
6965 pf->hw.func_caps.fd_filters_best_effort,
6966 pf->hw.func_caps.num_tx_qp,
6967 pf->hw.func_caps.num_vsis);
6968
6969 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6970 + pf->hw.func_caps.num_vfs)
6971 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6972 dev_info(&pf->pdev->dev,
6973 "got num_vsis %d, setting num_vsis to %d\n",
6974 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6975 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6976 }
6977
6978 return 0;
6979 }
6980
6981 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6982
6983 /**
6984 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6985 * @pf: board private structure
6986 **/
6987 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6988 {
6989 struct i40e_vsi *vsi;
6990
6991 /* quick workaround for an NVM issue that leaves a critical register
6992 * uninitialized
6993 */
6994 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6995 static const u32 hkey[] = {
6996 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6997 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6998 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6999 0x95b3a76d};
7000 int i;
7001
7002 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
7003 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
7004 }
7005
7006 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
7007 return;
7008
7009 /* find existing VSI and see if it needs configuring */
7010 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
7011
7012 /* create a new VSI if none exists */
7013 if (!vsi) {
7014 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
7015 pf->vsi[pf->lan_vsi]->seid, 0);
7016 if (!vsi) {
7017 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
7018 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7019 return;
7020 }
7021 }
7022
7023 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
7024 }
7025
7026 /**
7027 * i40e_fdir_teardown - release the Flow Director resources
7028 * @pf: board private structure
7029 **/
7030 static void i40e_fdir_teardown(struct i40e_pf *pf)
7031 {
7032 struct i40e_vsi *vsi;
7033
7034 i40e_fdir_filter_exit(pf);
7035 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
7036 if (vsi)
7037 i40e_vsi_release(vsi);
7038 }
7039
7040 /**
7041 * i40e_prep_for_reset - prep for the core to reset
7042 * @pf: board private structure
7043 * @lock_acquired: indicates whether or not the lock has been acquired
7044 * before this function was called.
7045 *
7046 * Close up the VFs and other things in prep for PF Reset.
7047 **/
7048 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
7049 {
7050 struct i40e_hw *hw = &pf->hw;
7051 i40e_status ret = 0;
7052 u32 v;
7053
7054 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
7055 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
7056 return;
7057 if (i40e_check_asq_alive(&pf->hw))
7058 i40e_vc_notify_reset(pf);
7059
7060 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
7061
7062 /* quiesce the VSIs and their queues that are not already DOWN */
7063 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
7064 if (!lock_acquired)
7065 rtnl_lock();
7066 i40e_pf_quiesce_all_vsi(pf);
7067 if (!lock_acquired)
7068 rtnl_unlock();
7069
7070 for (v = 0; v < pf->num_alloc_vsi; v++) {
7071 if (pf->vsi[v])
7072 pf->vsi[v]->seid = 0;
7073 }
7074
7075 i40e_shutdown_adminq(&pf->hw);
7076
7077 /* call shutdown HMC */
7078 if (hw->hmc.hmc_obj) {
7079 ret = i40e_shutdown_lan_hmc(hw);
7080 if (ret)
7081 dev_warn(&pf->pdev->dev,
7082 "shutdown_lan_hmc failed: %d\n", ret);
7083 }
7084 }
7085
7086 /**
7087 * i40e_send_version - update firmware with driver version
7088 * @pf: PF struct
7089 */
7090 static void i40e_send_version(struct i40e_pf *pf)
7091 {
7092 struct i40e_driver_version dv;
7093
7094 dv.major_version = DRV_VERSION_MAJOR;
7095 dv.minor_version = DRV_VERSION_MINOR;
7096 dv.build_version = DRV_VERSION_BUILD;
7097 dv.subbuild_version = 0;
7098 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
7099 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
7100 }
7101
7102 /**
7103 * i40e_get_oem_version - get OEM specific version information
7104 * @hw: pointer to the hardware structure
7105 **/
7106 static void i40e_get_oem_version(struct i40e_hw *hw)
7107 {
7108 u16 block_offset = 0xffff;
7109 u16 block_length = 0;
7110 u16 capabilities = 0;
7111 u16 gen_snap = 0;
7112 u16 release = 0;
7113
7114 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
7115 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
7116 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
7117 #define I40E_NVM_OEM_GEN_OFFSET 0x02
7118 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
7119 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
7120 #define I40E_NVM_OEM_LENGTH 3
7121
7122 /* Check if pointer to OEM version block is valid. */
7123 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
7124 if (block_offset == 0xffff)
7125 return;
7126
7127 /* Check if OEM version block has correct length. */
7128 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
7129 &block_length);
7130 if (block_length < I40E_NVM_OEM_LENGTH)
7131 return;
7132
7133 /* Check if OEM version format is as expected. */
7134 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
7135 &capabilities);
7136 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
7137 return;
7138
7139 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
7140 &gen_snap);
7141 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
7142 &release);
7143 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
7144 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
7145 }
7146
7147 /**
7148 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
7149 * @pf: board private structure
7150 **/
7151 static int i40e_reset(struct i40e_pf *pf)
7152 {
7153 struct i40e_hw *hw = &pf->hw;
7154 i40e_status ret;
7155
7156 ret = i40e_pf_reset(hw);
7157 if (ret) {
7158 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
7159 set_bit(__I40E_RESET_FAILED, pf->state);
7160 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
7161 } else {
7162 pf->pfr_count++;
7163 }
7164 return ret;
7165 }
7166
7167 /**
7168 * i40e_rebuild - rebuild using a saved config
7169 * @pf: board private structure
7170 * @reinit: if the Main VSI needs to re-initialized.
7171 * @lock_acquired: indicates whether or not the lock has been acquired
7172 * before this function was called.
7173 **/
7174 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
7175 {
7176 struct i40e_hw *hw = &pf->hw;
7177 u8 set_fc_aq_fail = 0;
7178 i40e_status ret;
7179 u32 val;
7180 int v;
7181
7182 if (test_bit(__I40E_DOWN, pf->state))
7183 goto clear_recovery;
7184 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
7185
7186 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
7187 ret = i40e_init_adminq(&pf->hw);
7188 if (ret) {
7189 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
7190 i40e_stat_str(&pf->hw, ret),
7191 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7192 goto clear_recovery;
7193 }
7194 i40e_get_oem_version(&pf->hw);
7195
7196 /* re-verify the eeprom if we just had an EMP reset */
7197 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
7198 i40e_verify_eeprom(pf);
7199
7200 i40e_clear_pxe_mode(hw);
7201 ret = i40e_get_capabilities(pf);
7202 if (ret)
7203 goto end_core_reset;
7204
7205 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
7206 hw->func_caps.num_rx_qp, 0, 0);
7207 if (ret) {
7208 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
7209 goto end_core_reset;
7210 }
7211 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
7212 if (ret) {
7213 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
7214 goto end_core_reset;
7215 }
7216
7217 #ifdef CONFIG_I40E_DCB
7218 ret = i40e_init_pf_dcb(pf);
7219 if (ret) {
7220 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
7221 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
7222 /* Continue without DCB enabled */
7223 }
7224 #endif /* CONFIG_I40E_DCB */
7225 /* do basic switch setup */
7226 if (!lock_acquired)
7227 rtnl_lock();
7228 ret = i40e_setup_pf_switch(pf, reinit);
7229 if (ret)
7230 goto end_unlock;
7231
7232 /* The driver only wants link up/down and module qualification
7233 * reports from firmware. Note the negative logic.
7234 */
7235 ret = i40e_aq_set_phy_int_mask(&pf->hw,
7236 ~(I40E_AQ_EVENT_LINK_UPDOWN |
7237 I40E_AQ_EVENT_MEDIA_NA |
7238 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
7239 if (ret)
7240 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
7241 i40e_stat_str(&pf->hw, ret),
7242 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7243
7244 /* make sure our flow control settings are restored */
7245 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
7246 if (ret)
7247 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
7248 i40e_stat_str(&pf->hw, ret),
7249 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7250
7251 /* Rebuild the VSIs and VEBs that existed before reset.
7252 * They are still in our local switch element arrays, so only
7253 * need to rebuild the switch model in the HW.
7254 *
7255 * If there were VEBs but the reconstitution failed, we'll try
7256 * try to recover minimal use by getting the basic PF VSI working.
7257 */
7258 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
7259 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
7260 /* find the one VEB connected to the MAC, and find orphans */
7261 for (v = 0; v < I40E_MAX_VEB; v++) {
7262 if (!pf->veb[v])
7263 continue;
7264
7265 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
7266 pf->veb[v]->uplink_seid == 0) {
7267 ret = i40e_reconstitute_veb(pf->veb[v]);
7268
7269 if (!ret)
7270 continue;
7271
7272 /* If Main VEB failed, we're in deep doodoo,
7273 * so give up rebuilding the switch and set up
7274 * for minimal rebuild of PF VSI.
7275 * If orphan failed, we'll report the error
7276 * but try to keep going.
7277 */
7278 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
7279 dev_info(&pf->pdev->dev,
7280 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
7281 ret);
7282 pf->vsi[pf->lan_vsi]->uplink_seid
7283 = pf->mac_seid;
7284 break;
7285 } else if (pf->veb[v]->uplink_seid == 0) {
7286 dev_info(&pf->pdev->dev,
7287 "rebuild of orphan VEB failed: %d\n",
7288 ret);
7289 }
7290 }
7291 }
7292 }
7293
7294 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
7295 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
7296 /* no VEB, so rebuild only the Main VSI */
7297 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
7298 if (ret) {
7299 dev_info(&pf->pdev->dev,
7300 "rebuild of Main VSI failed: %d\n", ret);
7301 goto end_unlock;
7302 }
7303 }
7304
7305 /* Reconfigure hardware for allowing smaller MSS in the case
7306 * of TSO, so that we avoid the MDD being fired and causing
7307 * a reset in the case of small MSS+TSO.
7308 */
7309 #define I40E_REG_MSS 0x000E64DC
7310 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
7311 #define I40E_64BYTE_MSS 0x400000
7312 val = rd32(hw, I40E_REG_MSS);
7313 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
7314 val &= ~I40E_REG_MSS_MIN_MASK;
7315 val |= I40E_64BYTE_MSS;
7316 wr32(hw, I40E_REG_MSS, val);
7317 }
7318
7319 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
7320 msleep(75);
7321 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
7322 if (ret)
7323 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
7324 i40e_stat_str(&pf->hw, ret),
7325 i40e_aq_str(&pf->hw,
7326 pf->hw.aq.asq_last_status));
7327 }
7328 /* reinit the misc interrupt */
7329 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7330 ret = i40e_setup_misc_vector(pf);
7331
7332 /* Add a filter to drop all Flow control frames from any VSI from being
7333 * transmitted. By doing so we stop a malicious VF from sending out
7334 * PAUSE or PFC frames and potentially controlling traffic for other
7335 * PF/VF VSIs.
7336 * The FW can still send Flow control frames if enabled.
7337 */
7338 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
7339 pf->main_vsi_seid);
7340
7341 /* restart the VSIs that were rebuilt and running before the reset */
7342 i40e_pf_unquiesce_all_vsi(pf);
7343
7344 /* Release the RTNL lock before we start resetting VFs */
7345 if (!lock_acquired)
7346 rtnl_unlock();
7347
7348 i40e_reset_all_vfs(pf, true);
7349
7350 /* tell the firmware that we're starting */
7351 i40e_send_version(pf);
7352
7353 /* We've already released the lock, so don't do it again */
7354 goto end_core_reset;
7355
7356 end_unlock:
7357 if (!lock_acquired)
7358 rtnl_unlock();
7359 end_core_reset:
7360 clear_bit(__I40E_RESET_FAILED, pf->state);
7361 clear_recovery:
7362 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
7363 }
7364
7365 /**
7366 * i40e_reset_and_rebuild - reset and rebuild using a saved config
7367 * @pf: board private structure
7368 * @reinit: if the Main VSI needs to re-initialized.
7369 * @lock_acquired: indicates whether or not the lock has been acquired
7370 * before this function was called.
7371 **/
7372 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
7373 bool lock_acquired)
7374 {
7375 int ret;
7376 /* Now we wait for GRST to settle out.
7377 * We don't have to delete the VEBs or VSIs from the hw switch
7378 * because the reset will make them disappear.
7379 */
7380 ret = i40e_reset(pf);
7381 if (!ret)
7382 i40e_rebuild(pf, reinit, lock_acquired);
7383 }
7384
7385 /**
7386 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
7387 * @pf: board private structure
7388 *
7389 * Close up the VFs and other things in prep for a Core Reset,
7390 * then get ready to rebuild the world.
7391 * @lock_acquired: indicates whether or not the lock has been acquired
7392 * before this function was called.
7393 **/
7394 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
7395 {
7396 i40e_prep_for_reset(pf, lock_acquired);
7397 i40e_reset_and_rebuild(pf, false, lock_acquired);
7398 }
7399
7400 /**
7401 * i40e_handle_mdd_event
7402 * @pf: pointer to the PF structure
7403 *
7404 * Called from the MDD irq handler to identify possibly malicious vfs
7405 **/
7406 static void i40e_handle_mdd_event(struct i40e_pf *pf)
7407 {
7408 struct i40e_hw *hw = &pf->hw;
7409 bool mdd_detected = false;
7410 bool pf_mdd_detected = false;
7411 struct i40e_vf *vf;
7412 u32 reg;
7413 int i;
7414
7415 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
7416 return;
7417
7418 /* find what triggered the MDD event */
7419 reg = rd32(hw, I40E_GL_MDET_TX);
7420 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
7421 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
7422 I40E_GL_MDET_TX_PF_NUM_SHIFT;
7423 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
7424 I40E_GL_MDET_TX_VF_NUM_SHIFT;
7425 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
7426 I40E_GL_MDET_TX_EVENT_SHIFT;
7427 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
7428 I40E_GL_MDET_TX_QUEUE_SHIFT) -
7429 pf->hw.func_caps.base_queue;
7430 if (netif_msg_tx_err(pf))
7431 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
7432 event, queue, pf_num, vf_num);
7433 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
7434 mdd_detected = true;
7435 }
7436 reg = rd32(hw, I40E_GL_MDET_RX);
7437 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
7438 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
7439 I40E_GL_MDET_RX_FUNCTION_SHIFT;
7440 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
7441 I40E_GL_MDET_RX_EVENT_SHIFT;
7442 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
7443 I40E_GL_MDET_RX_QUEUE_SHIFT) -
7444 pf->hw.func_caps.base_queue;
7445 if (netif_msg_rx_err(pf))
7446 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
7447 event, queue, func);
7448 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
7449 mdd_detected = true;
7450 }
7451
7452 if (mdd_detected) {
7453 reg = rd32(hw, I40E_PF_MDET_TX);
7454 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
7455 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
7456 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
7457 pf_mdd_detected = true;
7458 }
7459 reg = rd32(hw, I40E_PF_MDET_RX);
7460 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
7461 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
7462 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
7463 pf_mdd_detected = true;
7464 }
7465 /* Queue belongs to the PF, initiate a reset */
7466 if (pf_mdd_detected) {
7467 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
7468 i40e_service_event_schedule(pf);
7469 }
7470 }
7471
7472 /* see if one of the VFs needs its hand slapped */
7473 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
7474 vf = &(pf->vf[i]);
7475 reg = rd32(hw, I40E_VP_MDET_TX(i));
7476 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7477 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
7478 vf->num_mdd_events++;
7479 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
7480 i);
7481 }
7482
7483 reg = rd32(hw, I40E_VP_MDET_RX(i));
7484 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7485 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7486 vf->num_mdd_events++;
7487 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7488 i);
7489 }
7490
7491 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7492 dev_info(&pf->pdev->dev,
7493 "Too many MDD events on VF %d, disabled\n", i);
7494 dev_info(&pf->pdev->dev,
7495 "Use PF Control I/F to re-enable the VF\n");
7496 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
7497 }
7498 }
7499
7500 /* re-enable mdd interrupt cause */
7501 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
7502 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7503 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7504 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7505 i40e_flush(hw);
7506 }
7507
7508 static const char *i40e_tunnel_name(struct i40e_udp_port_config *port)
7509 {
7510 switch (port->type) {
7511 case UDP_TUNNEL_TYPE_VXLAN:
7512 return "vxlan";
7513 case UDP_TUNNEL_TYPE_GENEVE:
7514 return "geneve";
7515 default:
7516 return "unknown";
7517 }
7518 }
7519
7520 /**
7521 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
7522 * @pf: board private structure
7523 **/
7524 static void i40e_sync_udp_filters(struct i40e_pf *pf)
7525 {
7526 int i;
7527
7528 /* loop through and set pending bit for all active UDP filters */
7529 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7530 if (pf->udp_ports[i].port)
7531 pf->pending_udp_bitmap |= BIT_ULL(i);
7532 }
7533
7534 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
7535 }
7536
7537 /**
7538 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
7539 * @pf: board private structure
7540 **/
7541 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7542 {
7543 struct i40e_hw *hw = &pf->hw;
7544 i40e_status ret;
7545 u16 port;
7546 int i;
7547
7548 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
7549 return;
7550
7551 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
7552
7553 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7554 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
7555 pf->pending_udp_bitmap &= ~BIT_ULL(i);
7556 port = pf->udp_ports[i].port;
7557 if (port)
7558 ret = i40e_aq_add_udp_tunnel(hw, port,
7559 pf->udp_ports[i].type,
7560 NULL, NULL);
7561 else
7562 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
7563
7564 if (ret) {
7565 dev_info(&pf->pdev->dev,
7566 "%s %s port %d, index %d failed, err %s aq_err %s\n",
7567 i40e_tunnel_name(&pf->udp_ports[i]),
7568 port ? "add" : "delete",
7569 port, i,
7570 i40e_stat_str(&pf->hw, ret),
7571 i40e_aq_str(&pf->hw,
7572 pf->hw.aq.asq_last_status));
7573 pf->udp_ports[i].port = 0;
7574 }
7575 }
7576 }
7577 }
7578
7579 /**
7580 * i40e_service_task - Run the driver's async subtasks
7581 * @work: pointer to work_struct containing our data
7582 **/
7583 static void i40e_service_task(struct work_struct *work)
7584 {
7585 struct i40e_pf *pf = container_of(work,
7586 struct i40e_pf,
7587 service_task);
7588 unsigned long start_time = jiffies;
7589
7590 /* don't bother with service tasks if a reset is in progress */
7591 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
7592 return;
7593
7594 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
7595 return;
7596
7597 i40e_detect_recover_hung(pf);
7598 i40e_sync_filters_subtask(pf);
7599 i40e_reset_subtask(pf);
7600 i40e_handle_mdd_event(pf);
7601 i40e_vc_process_vflr_event(pf);
7602 i40e_watchdog_subtask(pf);
7603 i40e_fdir_reinit_subtask(pf);
7604 if (pf->flags & I40E_FLAG_CLIENT_RESET) {
7605 /* Client subtask will reopen next time through. */
7606 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
7607 pf->flags &= ~I40E_FLAG_CLIENT_RESET;
7608 } else {
7609 i40e_client_subtask(pf);
7610 if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
7611 i40e_notify_client_of_l2_param_changes(
7612 pf->vsi[pf->lan_vsi]);
7613 pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
7614 }
7615 }
7616 i40e_sync_filters_subtask(pf);
7617 i40e_sync_udp_filters_subtask(pf);
7618 i40e_clean_adminq_subtask(pf);
7619
7620 /* flush memory to make sure state is correct before next watchdog */
7621 smp_mb__before_atomic();
7622 clear_bit(__I40E_SERVICE_SCHED, pf->state);
7623
7624 /* If the tasks have taken longer than one timer cycle or there
7625 * is more work to be done, reschedule the service task now
7626 * rather than wait for the timer to tick again.
7627 */
7628 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
7629 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
7630 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
7631 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
7632 i40e_service_event_schedule(pf);
7633 }
7634
7635 /**
7636 * i40e_service_timer - timer callback
7637 * @data: pointer to PF struct
7638 **/
7639 static void i40e_service_timer(unsigned long data)
7640 {
7641 struct i40e_pf *pf = (struct i40e_pf *)data;
7642
7643 mod_timer(&pf->service_timer,
7644 round_jiffies(jiffies + pf->service_timer_period));
7645 i40e_service_event_schedule(pf);
7646 }
7647
7648 /**
7649 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
7650 * @vsi: the VSI being configured
7651 **/
7652 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7653 {
7654 struct i40e_pf *pf = vsi->back;
7655
7656 switch (vsi->type) {
7657 case I40E_VSI_MAIN:
7658 vsi->alloc_queue_pairs = pf->num_lan_qps;
7659 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7660 I40E_REQ_DESCRIPTOR_MULTIPLE);
7661 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7662 vsi->num_q_vectors = pf->num_lan_msix;
7663 else
7664 vsi->num_q_vectors = 1;
7665
7666 break;
7667
7668 case I40E_VSI_FDIR:
7669 vsi->alloc_queue_pairs = 1;
7670 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7671 I40E_REQ_DESCRIPTOR_MULTIPLE);
7672 vsi->num_q_vectors = pf->num_fdsb_msix;
7673 break;
7674
7675 case I40E_VSI_VMDQ2:
7676 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7677 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7678 I40E_REQ_DESCRIPTOR_MULTIPLE);
7679 vsi->num_q_vectors = pf->num_vmdq_msix;
7680 break;
7681
7682 case I40E_VSI_SRIOV:
7683 vsi->alloc_queue_pairs = pf->num_vf_qps;
7684 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7685 I40E_REQ_DESCRIPTOR_MULTIPLE);
7686 break;
7687
7688 default:
7689 WARN_ON(1);
7690 return -ENODATA;
7691 }
7692
7693 return 0;
7694 }
7695
7696 /**
7697 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
7698 * @type: VSI pointer
7699 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
7700 *
7701 * On error: returns error code (negative)
7702 * On success: returns 0
7703 **/
7704 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
7705 {
7706 struct i40e_ring **next_rings;
7707 int size;
7708 int ret = 0;
7709
7710 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
7711 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
7712 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
7713 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7714 if (!vsi->tx_rings)
7715 return -ENOMEM;
7716 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
7717 if (i40e_enabled_xdp_vsi(vsi)) {
7718 vsi->xdp_rings = next_rings;
7719 next_rings += vsi->alloc_queue_pairs;
7720 }
7721 vsi->rx_rings = next_rings;
7722
7723 if (alloc_qvectors) {
7724 /* allocate memory for q_vector pointers */
7725 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
7726 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7727 if (!vsi->q_vectors) {
7728 ret = -ENOMEM;
7729 goto err_vectors;
7730 }
7731 }
7732 return ret;
7733
7734 err_vectors:
7735 kfree(vsi->tx_rings);
7736 return ret;
7737 }
7738
7739 /**
7740 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
7741 * @pf: board private structure
7742 * @type: type of VSI
7743 *
7744 * On error: returns error code (negative)
7745 * On success: returns vsi index in PF (positive)
7746 **/
7747 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7748 {
7749 int ret = -ENODEV;
7750 struct i40e_vsi *vsi;
7751 int vsi_idx;
7752 int i;
7753
7754 /* Need to protect the allocation of the VSIs at the PF level */
7755 mutex_lock(&pf->switch_mutex);
7756
7757 /* VSI list may be fragmented if VSI creation/destruction has
7758 * been happening. We can afford to do a quick scan to look
7759 * for any free VSIs in the list.
7760 *
7761 * find next empty vsi slot, looping back around if necessary
7762 */
7763 i = pf->next_vsi;
7764 while (i < pf->num_alloc_vsi && pf->vsi[i])
7765 i++;
7766 if (i >= pf->num_alloc_vsi) {
7767 i = 0;
7768 while (i < pf->next_vsi && pf->vsi[i])
7769 i++;
7770 }
7771
7772 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
7773 vsi_idx = i; /* Found one! */
7774 } else {
7775 ret = -ENODEV;
7776 goto unlock_pf; /* out of VSI slots! */
7777 }
7778 pf->next_vsi = ++i;
7779
7780 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7781 if (!vsi) {
7782 ret = -ENOMEM;
7783 goto unlock_pf;
7784 }
7785 vsi->type = type;
7786 vsi->back = pf;
7787 set_bit(__I40E_VSI_DOWN, vsi->state);
7788 vsi->flags = 0;
7789 vsi->idx = vsi_idx;
7790 vsi->int_rate_limit = 0;
7791 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7792 pf->rss_table_size : 64;
7793 vsi->netdev_registered = false;
7794 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7795 hash_init(vsi->mac_filter_hash);
7796 vsi->irqs_ready = false;
7797
7798 ret = i40e_set_num_rings_in_vsi(vsi);
7799 if (ret)
7800 goto err_rings;
7801
7802 ret = i40e_vsi_alloc_arrays(vsi, true);
7803 if (ret)
7804 goto err_rings;
7805
7806 /* Setup default MSIX irq handler for VSI */
7807 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7808
7809 /* Initialize VSI lock */
7810 spin_lock_init(&vsi->mac_filter_hash_lock);
7811 pf->vsi[vsi_idx] = vsi;
7812 ret = vsi_idx;
7813 goto unlock_pf;
7814
7815 err_rings:
7816 pf->next_vsi = i - 1;
7817 kfree(vsi);
7818 unlock_pf:
7819 mutex_unlock(&pf->switch_mutex);
7820 return ret;
7821 }
7822
7823 /**
7824 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7825 * @type: VSI pointer
7826 * @free_qvectors: a bool to specify if q_vectors need to be freed.
7827 *
7828 * On error: returns error code (negative)
7829 * On success: returns 0
7830 **/
7831 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7832 {
7833 /* free the ring and vector containers */
7834 if (free_qvectors) {
7835 kfree(vsi->q_vectors);
7836 vsi->q_vectors = NULL;
7837 }
7838 kfree(vsi->tx_rings);
7839 vsi->tx_rings = NULL;
7840 vsi->rx_rings = NULL;
7841 vsi->xdp_rings = NULL;
7842 }
7843
7844 /**
7845 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
7846 * and lookup table
7847 * @vsi: Pointer to VSI structure
7848 */
7849 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
7850 {
7851 if (!vsi)
7852 return;
7853
7854 kfree(vsi->rss_hkey_user);
7855 vsi->rss_hkey_user = NULL;
7856
7857 kfree(vsi->rss_lut_user);
7858 vsi->rss_lut_user = NULL;
7859 }
7860
7861 /**
7862 * i40e_vsi_clear - Deallocate the VSI provided
7863 * @vsi: the VSI being un-configured
7864 **/
7865 static int i40e_vsi_clear(struct i40e_vsi *vsi)
7866 {
7867 struct i40e_pf *pf;
7868
7869 if (!vsi)
7870 return 0;
7871
7872 if (!vsi->back)
7873 goto free_vsi;
7874 pf = vsi->back;
7875
7876 mutex_lock(&pf->switch_mutex);
7877 if (!pf->vsi[vsi->idx]) {
7878 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7879 vsi->idx, vsi->idx, vsi, vsi->type);
7880 goto unlock_vsi;
7881 }
7882
7883 if (pf->vsi[vsi->idx] != vsi) {
7884 dev_err(&pf->pdev->dev,
7885 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7886 pf->vsi[vsi->idx]->idx,
7887 pf->vsi[vsi->idx],
7888 pf->vsi[vsi->idx]->type,
7889 vsi->idx, vsi, vsi->type);
7890 goto unlock_vsi;
7891 }
7892
7893 /* updates the PF for this cleared vsi */
7894 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7895 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7896
7897 i40e_vsi_free_arrays(vsi, true);
7898 i40e_clear_rss_config_user(vsi);
7899
7900 pf->vsi[vsi->idx] = NULL;
7901 if (vsi->idx < pf->next_vsi)
7902 pf->next_vsi = vsi->idx;
7903
7904 unlock_vsi:
7905 mutex_unlock(&pf->switch_mutex);
7906 free_vsi:
7907 kfree(vsi);
7908
7909 return 0;
7910 }
7911
7912 /**
7913 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7914 * @vsi: the VSI being cleaned
7915 **/
7916 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7917 {
7918 int i;
7919
7920 if (vsi->tx_rings && vsi->tx_rings[0]) {
7921 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7922 kfree_rcu(vsi->tx_rings[i], rcu);
7923 vsi->tx_rings[i] = NULL;
7924 vsi->rx_rings[i] = NULL;
7925 if (vsi->xdp_rings)
7926 vsi->xdp_rings[i] = NULL;
7927 }
7928 }
7929 }
7930
7931 /**
7932 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7933 * @vsi: the VSI being configured
7934 **/
7935 static int i40e_alloc_rings(struct i40e_vsi *vsi)
7936 {
7937 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
7938 struct i40e_pf *pf = vsi->back;
7939 struct i40e_ring *ring;
7940
7941 /* Set basic values in the rings to be used later during open() */
7942 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7943 /* allocate space for both Tx and Rx in one shot */
7944 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
7945 if (!ring)
7946 goto err_out;
7947
7948 ring->queue_index = i;
7949 ring->reg_idx = vsi->base_queue + i;
7950 ring->ring_active = false;
7951 ring->vsi = vsi;
7952 ring->netdev = vsi->netdev;
7953 ring->dev = &pf->pdev->dev;
7954 ring->count = vsi->num_desc;
7955 ring->size = 0;
7956 ring->dcb_tc = 0;
7957 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
7958 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7959 ring->tx_itr_setting = pf->tx_itr_default;
7960 vsi->tx_rings[i] = ring++;
7961
7962 if (!i40e_enabled_xdp_vsi(vsi))
7963 goto setup_rx;
7964
7965 ring->queue_index = vsi->alloc_queue_pairs + i;
7966 ring->reg_idx = vsi->base_queue + ring->queue_index;
7967 ring->ring_active = false;
7968 ring->vsi = vsi;
7969 ring->netdev = NULL;
7970 ring->dev = &pf->pdev->dev;
7971 ring->count = vsi->num_desc;
7972 ring->size = 0;
7973 ring->dcb_tc = 0;
7974 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
7975 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7976 set_ring_xdp(ring);
7977 ring->tx_itr_setting = pf->tx_itr_default;
7978 vsi->xdp_rings[i] = ring++;
7979
7980 setup_rx:
7981 ring->queue_index = i;
7982 ring->reg_idx = vsi->base_queue + i;
7983 ring->ring_active = false;
7984 ring->vsi = vsi;
7985 ring->netdev = vsi->netdev;
7986 ring->dev = &pf->pdev->dev;
7987 ring->count = vsi->num_desc;
7988 ring->size = 0;
7989 ring->dcb_tc = 0;
7990 ring->rx_itr_setting = pf->rx_itr_default;
7991 vsi->rx_rings[i] = ring;
7992 }
7993
7994 return 0;
7995
7996 err_out:
7997 i40e_vsi_clear_rings(vsi);
7998 return -ENOMEM;
7999 }
8000
8001 /**
8002 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
8003 * @pf: board private structure
8004 * @vectors: the number of MSI-X vectors to request
8005 *
8006 * Returns the number of vectors reserved, or error
8007 **/
8008 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
8009 {
8010 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
8011 I40E_MIN_MSIX, vectors);
8012 if (vectors < 0) {
8013 dev_info(&pf->pdev->dev,
8014 "MSI-X vector reservation failed: %d\n", vectors);
8015 vectors = 0;
8016 }
8017
8018 return vectors;
8019 }
8020
8021 /**
8022 * i40e_init_msix - Setup the MSIX capability
8023 * @pf: board private structure
8024 *
8025 * Work with the OS to set up the MSIX vectors needed.
8026 *
8027 * Returns the number of vectors reserved or negative on failure
8028 **/
8029 static int i40e_init_msix(struct i40e_pf *pf)
8030 {
8031 struct i40e_hw *hw = &pf->hw;
8032 int cpus, extra_vectors;
8033 int vectors_left;
8034 int v_budget, i;
8035 int v_actual;
8036 int iwarp_requested = 0;
8037
8038 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
8039 return -ENODEV;
8040
8041 /* The number of vectors we'll request will be comprised of:
8042 * - Add 1 for "other" cause for Admin Queue events, etc.
8043 * - The number of LAN queue pairs
8044 * - Queues being used for RSS.
8045 * We don't need as many as max_rss_size vectors.
8046 * use rss_size instead in the calculation since that
8047 * is governed by number of cpus in the system.
8048 * - assumes symmetric Tx/Rx pairing
8049 * - The number of VMDq pairs
8050 * - The CPU count within the NUMA node if iWARP is enabled
8051 * Once we count this up, try the request.
8052 *
8053 * If we can't get what we want, we'll simplify to nearly nothing
8054 * and try again. If that still fails, we punt.
8055 */
8056 vectors_left = hw->func_caps.num_msix_vectors;
8057 v_budget = 0;
8058
8059 /* reserve one vector for miscellaneous handler */
8060 if (vectors_left) {
8061 v_budget++;
8062 vectors_left--;
8063 }
8064
8065 /* reserve some vectors for the main PF traffic queues. Initially we
8066 * only reserve at most 50% of the available vectors, in the case that
8067 * the number of online CPUs is large. This ensures that we can enable
8068 * extra features as well. Once we've enabled the other features, we
8069 * will use any remaining vectors to reach as close as we can to the
8070 * number of online CPUs.
8071 */
8072 cpus = num_online_cpus();
8073 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
8074 vectors_left -= pf->num_lan_msix;
8075
8076 /* reserve one vector for sideband flow director */
8077 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8078 if (vectors_left) {
8079 pf->num_fdsb_msix = 1;
8080 v_budget++;
8081 vectors_left--;
8082 } else {
8083 pf->num_fdsb_msix = 0;
8084 }
8085 }
8086
8087 /* can we reserve enough for iWARP? */
8088 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
8089 iwarp_requested = pf->num_iwarp_msix;
8090
8091 if (!vectors_left)
8092 pf->num_iwarp_msix = 0;
8093 else if (vectors_left < pf->num_iwarp_msix)
8094 pf->num_iwarp_msix = 1;
8095 v_budget += pf->num_iwarp_msix;
8096 vectors_left -= pf->num_iwarp_msix;
8097 }
8098
8099 /* any vectors left over go for VMDq support */
8100 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
8101 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
8102 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
8103
8104 if (!vectors_left) {
8105 pf->num_vmdq_msix = 0;
8106 pf->num_vmdq_qps = 0;
8107 } else {
8108 /* if we're short on vectors for what's desired, we limit
8109 * the queues per vmdq. If this is still more than are
8110 * available, the user will need to change the number of
8111 * queues/vectors used by the PF later with the ethtool
8112 * channels command
8113 */
8114 if (vmdq_vecs < vmdq_vecs_wanted)
8115 pf->num_vmdq_qps = 1;
8116 pf->num_vmdq_msix = pf->num_vmdq_qps;
8117
8118 v_budget += vmdq_vecs;
8119 vectors_left -= vmdq_vecs;
8120 }
8121 }
8122
8123 /* On systems with a large number of SMP cores, we previously limited
8124 * the number of vectors for num_lan_msix to be at most 50% of the
8125 * available vectors, to allow for other features. Now, we add back
8126 * the remaining vectors. However, we ensure that the total
8127 * num_lan_msix will not exceed num_online_cpus(). To do this, we
8128 * calculate the number of vectors we can add without going over the
8129 * cap of CPUs. For systems with a small number of CPUs this will be
8130 * zero.
8131 */
8132 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
8133 pf->num_lan_msix += extra_vectors;
8134 vectors_left -= extra_vectors;
8135
8136 WARN(vectors_left < 0,
8137 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
8138
8139 v_budget += pf->num_lan_msix;
8140 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
8141 GFP_KERNEL);
8142 if (!pf->msix_entries)
8143 return -ENOMEM;
8144
8145 for (i = 0; i < v_budget; i++)
8146 pf->msix_entries[i].entry = i;
8147 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
8148
8149 if (v_actual < I40E_MIN_MSIX) {
8150 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
8151 kfree(pf->msix_entries);
8152 pf->msix_entries = NULL;
8153 pci_disable_msix(pf->pdev);
8154 return -ENODEV;
8155
8156 } else if (v_actual == I40E_MIN_MSIX) {
8157 /* Adjust for minimal MSIX use */
8158 pf->num_vmdq_vsis = 0;
8159 pf->num_vmdq_qps = 0;
8160 pf->num_lan_qps = 1;
8161 pf->num_lan_msix = 1;
8162
8163 } else if (!vectors_left) {
8164 /* If we have limited resources, we will start with no vectors
8165 * for the special features and then allocate vectors to some
8166 * of these features based on the policy and at the end disable
8167 * the features that did not get any vectors.
8168 */
8169 int vec;
8170
8171 dev_info(&pf->pdev->dev,
8172 "MSI-X vector limit reached, attempting to redistribute vectors\n");
8173 /* reserve the misc vector */
8174 vec = v_actual - 1;
8175
8176 /* Scale vector usage down */
8177 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
8178 pf->num_vmdq_vsis = 1;
8179 pf->num_vmdq_qps = 1;
8180
8181 /* partition out the remaining vectors */
8182 switch (vec) {
8183 case 2:
8184 pf->num_lan_msix = 1;
8185 break;
8186 case 3:
8187 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
8188 pf->num_lan_msix = 1;
8189 pf->num_iwarp_msix = 1;
8190 } else {
8191 pf->num_lan_msix = 2;
8192 }
8193 break;
8194 default:
8195 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
8196 pf->num_iwarp_msix = min_t(int, (vec / 3),
8197 iwarp_requested);
8198 pf->num_vmdq_vsis = min_t(int, (vec / 3),
8199 I40E_DEFAULT_NUM_VMDQ_VSI);
8200 } else {
8201 pf->num_vmdq_vsis = min_t(int, (vec / 2),
8202 I40E_DEFAULT_NUM_VMDQ_VSI);
8203 }
8204 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8205 pf->num_fdsb_msix = 1;
8206 vec--;
8207 }
8208 pf->num_lan_msix = min_t(int,
8209 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
8210 pf->num_lan_msix);
8211 pf->num_lan_qps = pf->num_lan_msix;
8212 break;
8213 }
8214 }
8215
8216 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8217 (pf->num_fdsb_msix == 0)) {
8218 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
8219 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8220 }
8221 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
8222 (pf->num_vmdq_msix == 0)) {
8223 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
8224 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
8225 }
8226
8227 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
8228 (pf->num_iwarp_msix == 0)) {
8229 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
8230 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
8231 }
8232 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
8233 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
8234 pf->num_lan_msix,
8235 pf->num_vmdq_msix * pf->num_vmdq_vsis,
8236 pf->num_fdsb_msix,
8237 pf->num_iwarp_msix);
8238
8239 return v_actual;
8240 }
8241
8242 /**
8243 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
8244 * @vsi: the VSI being configured
8245 * @v_idx: index of the vector in the vsi struct
8246 * @cpu: cpu to be used on affinity_mask
8247 *
8248 * We allocate one q_vector. If allocation fails we return -ENOMEM.
8249 **/
8250 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
8251 {
8252 struct i40e_q_vector *q_vector;
8253
8254 /* allocate q_vector */
8255 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
8256 if (!q_vector)
8257 return -ENOMEM;
8258
8259 q_vector->vsi = vsi;
8260 q_vector->v_idx = v_idx;
8261 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
8262
8263 if (vsi->netdev)
8264 netif_napi_add(vsi->netdev, &q_vector->napi,
8265 i40e_napi_poll, NAPI_POLL_WEIGHT);
8266
8267 q_vector->rx.latency_range = I40E_LOW_LATENCY;
8268 q_vector->tx.latency_range = I40E_LOW_LATENCY;
8269
8270 /* tie q_vector and vsi together */
8271 vsi->q_vectors[v_idx] = q_vector;
8272
8273 return 0;
8274 }
8275
8276 /**
8277 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
8278 * @vsi: the VSI being configured
8279 *
8280 * We allocate one q_vector per queue interrupt. If allocation fails we
8281 * return -ENOMEM.
8282 **/
8283 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
8284 {
8285 struct i40e_pf *pf = vsi->back;
8286 int err, v_idx, num_q_vectors, current_cpu;
8287
8288 /* if not MSIX, give the one vector only to the LAN VSI */
8289 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
8290 num_q_vectors = vsi->num_q_vectors;
8291 else if (vsi == pf->vsi[pf->lan_vsi])
8292 num_q_vectors = 1;
8293 else
8294 return -EINVAL;
8295
8296 current_cpu = cpumask_first(cpu_online_mask);
8297
8298 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
8299 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
8300 if (err)
8301 goto err_out;
8302 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
8303 if (unlikely(current_cpu >= nr_cpu_ids))
8304 current_cpu = cpumask_first(cpu_online_mask);
8305 }
8306
8307 return 0;
8308
8309 err_out:
8310 while (v_idx--)
8311 i40e_free_q_vector(vsi, v_idx);
8312
8313 return err;
8314 }
8315
8316 /**
8317 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
8318 * @pf: board private structure to initialize
8319 **/
8320 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
8321 {
8322 int vectors = 0;
8323 ssize_t size;
8324
8325 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
8326 vectors = i40e_init_msix(pf);
8327 if (vectors < 0) {
8328 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
8329 I40E_FLAG_IWARP_ENABLED |
8330 I40E_FLAG_RSS_ENABLED |
8331 I40E_FLAG_DCB_CAPABLE |
8332 I40E_FLAG_DCB_ENABLED |
8333 I40E_FLAG_SRIOV_ENABLED |
8334 I40E_FLAG_FD_SB_ENABLED |
8335 I40E_FLAG_FD_ATR_ENABLED |
8336 I40E_FLAG_VMDQ_ENABLED);
8337
8338 /* rework the queue expectations without MSIX */
8339 i40e_determine_queue_usage(pf);
8340 }
8341 }
8342
8343 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
8344 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
8345 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
8346 vectors = pci_enable_msi(pf->pdev);
8347 if (vectors < 0) {
8348 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
8349 vectors);
8350 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
8351 }
8352 vectors = 1; /* one MSI or Legacy vector */
8353 }
8354
8355 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
8356 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
8357
8358 /* set up vector assignment tracking */
8359 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
8360 pf->irq_pile = kzalloc(size, GFP_KERNEL);
8361 if (!pf->irq_pile) {
8362 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
8363 return -ENOMEM;
8364 }
8365 pf->irq_pile->num_entries = vectors;
8366 pf->irq_pile->search_hint = 0;
8367
8368 /* track first vector for misc interrupts, ignore return */
8369 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
8370
8371 return 0;
8372 }
8373
8374 /**
8375 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
8376 * @pf: board private structure
8377 *
8378 * This sets up the handler for MSIX 0, which is used to manage the
8379 * non-queue interrupts, e.g. AdminQ and errors. This is not used
8380 * when in MSI or Legacy interrupt mode.
8381 **/
8382 static int i40e_setup_misc_vector(struct i40e_pf *pf)
8383 {
8384 struct i40e_hw *hw = &pf->hw;
8385 int err = 0;
8386
8387 /* Only request the irq if this is the first time through, and
8388 * not when we're rebuilding after a Reset
8389 */
8390 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
8391 err = request_irq(pf->msix_entries[0].vector,
8392 i40e_intr, 0, pf->int_name, pf);
8393 if (err) {
8394 dev_info(&pf->pdev->dev,
8395 "request_irq for %s failed: %d\n",
8396 pf->int_name, err);
8397 return -EFAULT;
8398 }
8399 }
8400
8401 i40e_enable_misc_int_causes(pf);
8402
8403 /* associate no queues to the misc vector */
8404 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
8405 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
8406
8407 i40e_flush(hw);
8408
8409 i40e_irq_dynamic_enable_icr0(pf, true);
8410
8411 return err;
8412 }
8413
8414 /**
8415 * i40e_config_rss_aq - Prepare for RSS using AQ commands
8416 * @vsi: vsi structure
8417 * @seed: RSS hash seed
8418 **/
8419 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8420 u8 *lut, u16 lut_size)
8421 {
8422 struct i40e_pf *pf = vsi->back;
8423 struct i40e_hw *hw = &pf->hw;
8424 int ret = 0;
8425
8426 if (seed) {
8427 struct i40e_aqc_get_set_rss_key_data *seed_dw =
8428 (struct i40e_aqc_get_set_rss_key_data *)seed;
8429 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
8430 if (ret) {
8431 dev_info(&pf->pdev->dev,
8432 "Cannot set RSS key, err %s aq_err %s\n",
8433 i40e_stat_str(hw, ret),
8434 i40e_aq_str(hw, hw->aq.asq_last_status));
8435 return ret;
8436 }
8437 }
8438 if (lut) {
8439 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8440
8441 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8442 if (ret) {
8443 dev_info(&pf->pdev->dev,
8444 "Cannot set RSS lut, err %s aq_err %s\n",
8445 i40e_stat_str(hw, ret),
8446 i40e_aq_str(hw, hw->aq.asq_last_status));
8447 return ret;
8448 }
8449 }
8450 return ret;
8451 }
8452
8453 /**
8454 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
8455 * @vsi: Pointer to vsi structure
8456 * @seed: Buffter to store the hash keys
8457 * @lut: Buffer to store the lookup table entries
8458 * @lut_size: Size of buffer to store the lookup table entries
8459 *
8460 * Return 0 on success, negative on failure
8461 */
8462 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8463 u8 *lut, u16 lut_size)
8464 {
8465 struct i40e_pf *pf = vsi->back;
8466 struct i40e_hw *hw = &pf->hw;
8467 int ret = 0;
8468
8469 if (seed) {
8470 ret = i40e_aq_get_rss_key(hw, vsi->id,
8471 (struct i40e_aqc_get_set_rss_key_data *)seed);
8472 if (ret) {
8473 dev_info(&pf->pdev->dev,
8474 "Cannot get RSS key, err %s aq_err %s\n",
8475 i40e_stat_str(&pf->hw, ret),
8476 i40e_aq_str(&pf->hw,
8477 pf->hw.aq.asq_last_status));
8478 return ret;
8479 }
8480 }
8481
8482 if (lut) {
8483 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8484
8485 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8486 if (ret) {
8487 dev_info(&pf->pdev->dev,
8488 "Cannot get RSS lut, err %s aq_err %s\n",
8489 i40e_stat_str(&pf->hw, ret),
8490 i40e_aq_str(&pf->hw,
8491 pf->hw.aq.asq_last_status));
8492 return ret;
8493 }
8494 }
8495
8496 return ret;
8497 }
8498
8499 /**
8500 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
8501 * @vsi: VSI structure
8502 **/
8503 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
8504 {
8505 u8 seed[I40E_HKEY_ARRAY_SIZE];
8506 struct i40e_pf *pf = vsi->back;
8507 u8 *lut;
8508 int ret;
8509
8510 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
8511 return 0;
8512
8513 if (!vsi->rss_size)
8514 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8515 vsi->num_queue_pairs);
8516 if (!vsi->rss_size)
8517 return -EINVAL;
8518
8519 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8520 if (!lut)
8521 return -ENOMEM;
8522 /* Use the user configured hash keys and lookup table if there is one,
8523 * otherwise use default
8524 */
8525 if (vsi->rss_lut_user)
8526 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8527 else
8528 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8529 if (vsi->rss_hkey_user)
8530 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8531 else
8532 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8533 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
8534 kfree(lut);
8535
8536 return ret;
8537 }
8538
8539 /**
8540 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
8541 * @vsi: Pointer to vsi structure
8542 * @seed: RSS hash seed
8543 * @lut: Lookup table
8544 * @lut_size: Lookup table size
8545 *
8546 * Returns 0 on success, negative on failure
8547 **/
8548 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8549 const u8 *lut, u16 lut_size)
8550 {
8551 struct i40e_pf *pf = vsi->back;
8552 struct i40e_hw *hw = &pf->hw;
8553 u16 vf_id = vsi->vf_id;
8554 u8 i;
8555
8556 /* Fill out hash function seed */
8557 if (seed) {
8558 u32 *seed_dw = (u32 *)seed;
8559
8560 if (vsi->type == I40E_VSI_MAIN) {
8561 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8562 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
8563 } else if (vsi->type == I40E_VSI_SRIOV) {
8564 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
8565 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
8566 } else {
8567 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
8568 }
8569 }
8570
8571 if (lut) {
8572 u32 *lut_dw = (u32 *)lut;
8573
8574 if (vsi->type == I40E_VSI_MAIN) {
8575 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8576 return -EINVAL;
8577 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8578 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
8579 } else if (vsi->type == I40E_VSI_SRIOV) {
8580 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
8581 return -EINVAL;
8582 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8583 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
8584 } else {
8585 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8586 }
8587 }
8588 i40e_flush(hw);
8589
8590 return 0;
8591 }
8592
8593 /**
8594 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
8595 * @vsi: Pointer to VSI structure
8596 * @seed: Buffer to store the keys
8597 * @lut: Buffer to store the lookup table entries
8598 * @lut_size: Size of buffer to store the lookup table entries
8599 *
8600 * Returns 0 on success, negative on failure
8601 */
8602 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
8603 u8 *lut, u16 lut_size)
8604 {
8605 struct i40e_pf *pf = vsi->back;
8606 struct i40e_hw *hw = &pf->hw;
8607 u16 i;
8608
8609 if (seed) {
8610 u32 *seed_dw = (u32 *)seed;
8611
8612 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8613 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
8614 }
8615 if (lut) {
8616 u32 *lut_dw = (u32 *)lut;
8617
8618 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8619 return -EINVAL;
8620 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8621 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
8622 }
8623
8624 return 0;
8625 }
8626
8627 /**
8628 * i40e_config_rss - Configure RSS keys and lut
8629 * @vsi: Pointer to VSI structure
8630 * @seed: RSS hash seed
8631 * @lut: Lookup table
8632 * @lut_size: Lookup table size
8633 *
8634 * Returns 0 on success, negative on failure
8635 */
8636 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8637 {
8638 struct i40e_pf *pf = vsi->back;
8639
8640 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
8641 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
8642 else
8643 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
8644 }
8645
8646 /**
8647 * i40e_get_rss - Get RSS keys and lut
8648 * @vsi: Pointer to VSI structure
8649 * @seed: Buffer to store the keys
8650 * @lut: Buffer to store the lookup table entries
8651 * lut_size: Size of buffer to store the lookup table entries
8652 *
8653 * Returns 0 on success, negative on failure
8654 */
8655 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8656 {
8657 struct i40e_pf *pf = vsi->back;
8658
8659 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
8660 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
8661 else
8662 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
8663 }
8664
8665 /**
8666 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
8667 * @pf: Pointer to board private structure
8668 * @lut: Lookup table
8669 * @rss_table_size: Lookup table size
8670 * @rss_size: Range of queue number for hashing
8671 */
8672 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
8673 u16 rss_table_size, u16 rss_size)
8674 {
8675 u16 i;
8676
8677 for (i = 0; i < rss_table_size; i++)
8678 lut[i] = i % rss_size;
8679 }
8680
8681 /**
8682 * i40e_pf_config_rss - Prepare for RSS if used
8683 * @pf: board private structure
8684 **/
8685 static int i40e_pf_config_rss(struct i40e_pf *pf)
8686 {
8687 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8688 u8 seed[I40E_HKEY_ARRAY_SIZE];
8689 u8 *lut;
8690 struct i40e_hw *hw = &pf->hw;
8691 u32 reg_val;
8692 u64 hena;
8693 int ret;
8694
8695 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
8696 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
8697 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
8698 hena |= i40e_pf_get_default_rss_hena(pf);
8699
8700 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
8701 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
8702
8703 /* Determine the RSS table size based on the hardware capabilities */
8704 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
8705 reg_val = (pf->rss_table_size == 512) ?
8706 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
8707 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
8708 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
8709
8710 /* Determine the RSS size of the VSI */
8711 if (!vsi->rss_size) {
8712 u16 qcount;
8713
8714 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
8715 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
8716 }
8717 if (!vsi->rss_size)
8718 return -EINVAL;
8719
8720 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8721 if (!lut)
8722 return -ENOMEM;
8723
8724 /* Use user configured lut if there is one, otherwise use default */
8725 if (vsi->rss_lut_user)
8726 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8727 else
8728 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8729
8730 /* Use user configured hash key if there is one, otherwise
8731 * use default.
8732 */
8733 if (vsi->rss_hkey_user)
8734 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8735 else
8736 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8737 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
8738 kfree(lut);
8739
8740 return ret;
8741 }
8742
8743 /**
8744 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
8745 * @pf: board private structure
8746 * @queue_count: the requested queue count for rss.
8747 *
8748 * returns 0 if rss is not enabled, if enabled returns the final rss queue
8749 * count which may be different from the requested queue count.
8750 * Note: expects to be called while under rtnl_lock()
8751 **/
8752 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
8753 {
8754 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8755 int new_rss_size;
8756
8757 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
8758 return 0;
8759
8760 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
8761
8762 if (queue_count != vsi->num_queue_pairs) {
8763 u16 qcount;
8764
8765 vsi->req_queue_pairs = queue_count;
8766 i40e_prep_for_reset(pf, true);
8767
8768 pf->alloc_rss_size = new_rss_size;
8769
8770 i40e_reset_and_rebuild(pf, true, true);
8771
8772 /* Discard the user configured hash keys and lut, if less
8773 * queues are enabled.
8774 */
8775 if (queue_count < vsi->rss_size) {
8776 i40e_clear_rss_config_user(vsi);
8777 dev_dbg(&pf->pdev->dev,
8778 "discard user configured hash keys and lut\n");
8779 }
8780
8781 /* Reset vsi->rss_size, as number of enabled queues changed */
8782 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
8783 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
8784
8785 i40e_pf_config_rss(pf);
8786 }
8787 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
8788 vsi->req_queue_pairs, pf->rss_size_max);
8789 return pf->alloc_rss_size;
8790 }
8791
8792 /**
8793 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
8794 * @pf: board private structure
8795 **/
8796 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
8797 {
8798 i40e_status status;
8799 bool min_valid, max_valid;
8800 u32 max_bw, min_bw;
8801
8802 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8803 &min_valid, &max_valid);
8804
8805 if (!status) {
8806 if (min_valid)
8807 pf->min_bw = min_bw;
8808 if (max_valid)
8809 pf->max_bw = max_bw;
8810 }
8811
8812 return status;
8813 }
8814
8815 /**
8816 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
8817 * @pf: board private structure
8818 **/
8819 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
8820 {
8821 struct i40e_aqc_configure_partition_bw_data bw_data;
8822 i40e_status status;
8823
8824 /* Set the valid bit for this PF */
8825 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
8826 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
8827 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
8828
8829 /* Set the new bandwidths */
8830 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8831
8832 return status;
8833 }
8834
8835 /**
8836 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
8837 * @pf: board private structure
8838 **/
8839 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
8840 {
8841 /* Commit temporary BW setting to permanent NVM image */
8842 enum i40e_admin_queue_err last_aq_status;
8843 i40e_status ret;
8844 u16 nvm_word;
8845
8846 if (pf->hw.partition_id != 1) {
8847 dev_info(&pf->pdev->dev,
8848 "Commit BW only works on partition 1! This is partition %d",
8849 pf->hw.partition_id);
8850 ret = I40E_NOT_SUPPORTED;
8851 goto bw_commit_out;
8852 }
8853
8854 /* Acquire NVM for read access */
8855 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8856 last_aq_status = pf->hw.aq.asq_last_status;
8857 if (ret) {
8858 dev_info(&pf->pdev->dev,
8859 "Cannot acquire NVM for read access, err %s aq_err %s\n",
8860 i40e_stat_str(&pf->hw, ret),
8861 i40e_aq_str(&pf->hw, last_aq_status));
8862 goto bw_commit_out;
8863 }
8864
8865 /* Read word 0x10 of NVM - SW compatibility word 1 */
8866 ret = i40e_aq_read_nvm(&pf->hw,
8867 I40E_SR_NVM_CONTROL_WORD,
8868 0x10, sizeof(nvm_word), &nvm_word,
8869 false, NULL);
8870 /* Save off last admin queue command status before releasing
8871 * the NVM
8872 */
8873 last_aq_status = pf->hw.aq.asq_last_status;
8874 i40e_release_nvm(&pf->hw);
8875 if (ret) {
8876 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8877 i40e_stat_str(&pf->hw, ret),
8878 i40e_aq_str(&pf->hw, last_aq_status));
8879 goto bw_commit_out;
8880 }
8881
8882 /* Wait a bit for NVM release to complete */
8883 msleep(50);
8884
8885 /* Acquire NVM for write access */
8886 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8887 last_aq_status = pf->hw.aq.asq_last_status;
8888 if (ret) {
8889 dev_info(&pf->pdev->dev,
8890 "Cannot acquire NVM for write access, err %s aq_err %s\n",
8891 i40e_stat_str(&pf->hw, ret),
8892 i40e_aq_str(&pf->hw, last_aq_status));
8893 goto bw_commit_out;
8894 }
8895 /* Write it back out unchanged to initiate update NVM,
8896 * which will force a write of the shadow (alt) RAM to
8897 * the NVM - thus storing the bandwidth values permanently.
8898 */
8899 ret = i40e_aq_update_nvm(&pf->hw,
8900 I40E_SR_NVM_CONTROL_WORD,
8901 0x10, sizeof(nvm_word),
8902 &nvm_word, true, NULL);
8903 /* Save off last admin queue command status before releasing
8904 * the NVM
8905 */
8906 last_aq_status = pf->hw.aq.asq_last_status;
8907 i40e_release_nvm(&pf->hw);
8908 if (ret)
8909 dev_info(&pf->pdev->dev,
8910 "BW settings NOT SAVED, err %s aq_err %s\n",
8911 i40e_stat_str(&pf->hw, ret),
8912 i40e_aq_str(&pf->hw, last_aq_status));
8913 bw_commit_out:
8914
8915 return ret;
8916 }
8917
8918 /**
8919 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
8920 * @pf: board private structure to initialize
8921 *
8922 * i40e_sw_init initializes the Adapter private data structure.
8923 * Fields are initialized based on PCI device information and
8924 * OS network device settings (MTU size).
8925 **/
8926 static int i40e_sw_init(struct i40e_pf *pf)
8927 {
8928 int err = 0;
8929 int size;
8930
8931 /* Set default capability flags */
8932 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8933 I40E_FLAG_MSI_ENABLED |
8934 I40E_FLAG_MSIX_ENABLED;
8935
8936 /* Set default ITR */
8937 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8938 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8939
8940 /* Depending on PF configurations, it is possible that the RSS
8941 * maximum might end up larger than the available queues
8942 */
8943 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
8944 pf->alloc_rss_size = 1;
8945 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
8946 pf->rss_size_max = min_t(int, pf->rss_size_max,
8947 pf->hw.func_caps.num_tx_qp);
8948 if (pf->hw.func_caps.rss) {
8949 pf->flags |= I40E_FLAG_RSS_ENABLED;
8950 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
8951 num_online_cpus());
8952 }
8953
8954 /* MFP mode enabled */
8955 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
8956 pf->flags |= I40E_FLAG_MFP_ENABLED;
8957 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
8958 if (i40e_get_partition_bw_setting(pf)) {
8959 dev_warn(&pf->pdev->dev,
8960 "Could not get partition bw settings\n");
8961 } else {
8962 dev_info(&pf->pdev->dev,
8963 "Partition BW Min = %8.8x, Max = %8.8x\n",
8964 pf->min_bw, pf->max_bw);
8965
8966 /* nudge the Tx scheduler */
8967 i40e_set_partition_bw_setting(pf);
8968 }
8969 }
8970
8971 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
8972 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
8973 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8974 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
8975 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
8976 pf->hw.num_partitions > 1)
8977 dev_info(&pf->pdev->dev,
8978 "Flow Director Sideband mode Disabled in MFP mode\n");
8979 else
8980 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8981 pf->fdir_pf_filter_count =
8982 pf->hw.func_caps.fd_filters_guaranteed;
8983 pf->hw.fdir_shared_filter_count =
8984 pf->hw.func_caps.fd_filters_best_effort;
8985 }
8986
8987 if (pf->hw.mac.type == I40E_MAC_X722) {
8988 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
8989 I40E_HW_128_QP_RSS_CAPABLE |
8990 I40E_HW_ATR_EVICT_CAPABLE |
8991 I40E_HW_WB_ON_ITR_CAPABLE |
8992 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
8993 I40E_HW_NO_PCI_LINK_CHECK |
8994 I40E_HW_USE_SET_LLDP_MIB |
8995 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
8996 I40E_HW_PTP_L4_CAPABLE |
8997 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
8998 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
8999
9000 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
9001 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
9002 I40E_FDEVICT_PCTYPE_DEFAULT) {
9003 dev_warn(&pf->pdev->dev,
9004 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
9005 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
9006 }
9007 } else if ((pf->hw.aq.api_maj_ver > 1) ||
9008 ((pf->hw.aq.api_maj_ver == 1) &&
9009 (pf->hw.aq.api_min_ver > 4))) {
9010 /* Supported in FW API version higher than 1.4 */
9011 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
9012 }
9013
9014 /* Enable HW ATR eviction if possible */
9015 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
9016 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
9017
9018 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
9019 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
9020 (pf->hw.aq.fw_maj_ver < 4))) {
9021 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
9022 /* No DCB support for FW < v4.33 */
9023 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
9024 }
9025
9026 /* Disable FW LLDP if FW < v4.3 */
9027 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
9028 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
9029 (pf->hw.aq.fw_maj_ver < 4)))
9030 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
9031
9032 /* Use the FW Set LLDP MIB API if FW > v4.40 */
9033 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
9034 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
9035 (pf->hw.aq.fw_maj_ver >= 5)))
9036 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
9037
9038 if (pf->hw.func_caps.vmdq) {
9039 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
9040 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
9041 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
9042 }
9043
9044 if (pf->hw.func_caps.iwarp) {
9045 pf->flags |= I40E_FLAG_IWARP_ENABLED;
9046 /* IWARP needs one extra vector for CQP just like MISC.*/
9047 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
9048 }
9049
9050 #ifdef CONFIG_PCI_IOV
9051 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
9052 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
9053 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
9054 pf->num_req_vfs = min_t(int,
9055 pf->hw.func_caps.num_vfs,
9056 I40E_MAX_VF_COUNT);
9057 }
9058 #endif /* CONFIG_PCI_IOV */
9059 pf->eeprom_version = 0xDEAD;
9060 pf->lan_veb = I40E_NO_VEB;
9061 pf->lan_vsi = I40E_NO_VSI;
9062
9063 /* By default FW has this off for performance reasons */
9064 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
9065
9066 /* set up queue assignment tracking */
9067 size = sizeof(struct i40e_lump_tracking)
9068 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
9069 pf->qp_pile = kzalloc(size, GFP_KERNEL);
9070 if (!pf->qp_pile) {
9071 err = -ENOMEM;
9072 goto sw_init_done;
9073 }
9074 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
9075 pf->qp_pile->search_hint = 0;
9076
9077 pf->tx_timeout_recovery_level = 1;
9078
9079 mutex_init(&pf->switch_mutex);
9080
9081 sw_init_done:
9082 return err;
9083 }
9084
9085 /**
9086 * i40e_set_ntuple - set the ntuple feature flag and take action
9087 * @pf: board private structure to initialize
9088 * @features: the feature set that the stack is suggesting
9089 *
9090 * returns a bool to indicate if reset needs to happen
9091 **/
9092 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
9093 {
9094 bool need_reset = false;
9095
9096 /* Check if Flow Director n-tuple support was enabled or disabled. If
9097 * the state changed, we need to reset.
9098 */
9099 if (features & NETIF_F_NTUPLE) {
9100 /* Enable filters and mark for reset */
9101 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9102 need_reset = true;
9103 /* enable FD_SB only if there is MSI-X vector */
9104 if (pf->num_fdsb_msix > 0)
9105 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
9106 } else {
9107 /* turn off filters, mark for reset and clear SW filter list */
9108 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9109 need_reset = true;
9110 i40e_fdir_filter_exit(pf);
9111 }
9112 pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
9113 I40E_FLAG_FD_SB_AUTO_DISABLED);
9114 /* reset fd counters */
9115 pf->fd_add_err = 0;
9116 pf->fd_atr_cnt = 0;
9117 /* if ATR was auto disabled it can be re-enabled. */
9118 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
9119 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
9120 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9121 (I40E_DEBUG_FD & pf->hw.debug_mask))
9122 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
9123 }
9124 }
9125 return need_reset;
9126 }
9127
9128 /**
9129 * i40e_clear_rss_lut - clear the rx hash lookup table
9130 * @vsi: the VSI being configured
9131 **/
9132 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
9133 {
9134 struct i40e_pf *pf = vsi->back;
9135 struct i40e_hw *hw = &pf->hw;
9136 u16 vf_id = vsi->vf_id;
9137 u8 i;
9138
9139 if (vsi->type == I40E_VSI_MAIN) {
9140 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
9141 wr32(hw, I40E_PFQF_HLUT(i), 0);
9142 } else if (vsi->type == I40E_VSI_SRIOV) {
9143 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
9144 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
9145 } else {
9146 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
9147 }
9148 }
9149
9150 /**
9151 * i40e_set_features - set the netdev feature flags
9152 * @netdev: ptr to the netdev being adjusted
9153 * @features: the feature set that the stack is suggesting
9154 * Note: expects to be called while under rtnl_lock()
9155 **/
9156 static int i40e_set_features(struct net_device *netdev,
9157 netdev_features_t features)
9158 {
9159 struct i40e_netdev_priv *np = netdev_priv(netdev);
9160 struct i40e_vsi *vsi = np->vsi;
9161 struct i40e_pf *pf = vsi->back;
9162 bool need_reset;
9163
9164 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
9165 i40e_pf_config_rss(pf);
9166 else if (!(features & NETIF_F_RXHASH) &&
9167 netdev->features & NETIF_F_RXHASH)
9168 i40e_clear_rss_lut(vsi);
9169
9170 if (features & NETIF_F_HW_VLAN_CTAG_RX)
9171 i40e_vlan_stripping_enable(vsi);
9172 else
9173 i40e_vlan_stripping_disable(vsi);
9174
9175 need_reset = i40e_set_ntuple(pf, features);
9176
9177 if (need_reset)
9178 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
9179
9180 return 0;
9181 }
9182
9183 /**
9184 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
9185 * @pf: board private structure
9186 * @port: The UDP port to look up
9187 *
9188 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
9189 **/
9190 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
9191 {
9192 u8 i;
9193
9194 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9195 if (pf->udp_ports[i].port == port)
9196 return i;
9197 }
9198
9199 return i;
9200 }
9201
9202 /**
9203 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
9204 * @netdev: This physical port's netdev
9205 * @ti: Tunnel endpoint information
9206 **/
9207 static void i40e_udp_tunnel_add(struct net_device *netdev,
9208 struct udp_tunnel_info *ti)
9209 {
9210 struct i40e_netdev_priv *np = netdev_priv(netdev);
9211 struct i40e_vsi *vsi = np->vsi;
9212 struct i40e_pf *pf = vsi->back;
9213 u16 port = ntohs(ti->port);
9214 u8 next_idx;
9215 u8 idx;
9216
9217 idx = i40e_get_udp_port_idx(pf, port);
9218
9219 /* Check if port already exists */
9220 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
9221 netdev_info(netdev, "port %d already offloaded\n", port);
9222 return;
9223 }
9224
9225 /* Now check if there is space to add the new port */
9226 next_idx = i40e_get_udp_port_idx(pf, 0);
9227
9228 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
9229 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
9230 port);
9231 return;
9232 }
9233
9234 switch (ti->type) {
9235 case UDP_TUNNEL_TYPE_VXLAN:
9236 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
9237 break;
9238 case UDP_TUNNEL_TYPE_GENEVE:
9239 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
9240 return;
9241 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
9242 break;
9243 default:
9244 return;
9245 }
9246
9247 /* New port: add it and mark its index in the bitmap */
9248 pf->udp_ports[next_idx].port = port;
9249 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
9250 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
9251 }
9252
9253 /**
9254 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
9255 * @netdev: This physical port's netdev
9256 * @ti: Tunnel endpoint information
9257 **/
9258 static void i40e_udp_tunnel_del(struct net_device *netdev,
9259 struct udp_tunnel_info *ti)
9260 {
9261 struct i40e_netdev_priv *np = netdev_priv(netdev);
9262 struct i40e_vsi *vsi = np->vsi;
9263 struct i40e_pf *pf = vsi->back;
9264 u16 port = ntohs(ti->port);
9265 u8 idx;
9266
9267 idx = i40e_get_udp_port_idx(pf, port);
9268
9269 /* Check if port already exists */
9270 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
9271 goto not_found;
9272
9273 switch (ti->type) {
9274 case UDP_TUNNEL_TYPE_VXLAN:
9275 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
9276 goto not_found;
9277 break;
9278 case UDP_TUNNEL_TYPE_GENEVE:
9279 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
9280 goto not_found;
9281 break;
9282 default:
9283 goto not_found;
9284 }
9285
9286 /* if port exists, set it to 0 (mark for deletion)
9287 * and make it pending
9288 */
9289 pf->udp_ports[idx].port = 0;
9290 pf->pending_udp_bitmap |= BIT_ULL(idx);
9291 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
9292
9293 return;
9294 not_found:
9295 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
9296 port);
9297 }
9298
9299 static int i40e_get_phys_port_id(struct net_device *netdev,
9300 struct netdev_phys_item_id *ppid)
9301 {
9302 struct i40e_netdev_priv *np = netdev_priv(netdev);
9303 struct i40e_pf *pf = np->vsi->back;
9304 struct i40e_hw *hw = &pf->hw;
9305
9306 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
9307 return -EOPNOTSUPP;
9308
9309 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
9310 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
9311
9312 return 0;
9313 }
9314
9315 /**
9316 * i40e_ndo_fdb_add - add an entry to the hardware database
9317 * @ndm: the input from the stack
9318 * @tb: pointer to array of nladdr (unused)
9319 * @dev: the net device pointer
9320 * @addr: the MAC address entry being added
9321 * @flags: instructions from stack about fdb operation
9322 */
9323 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9324 struct net_device *dev,
9325 const unsigned char *addr, u16 vid,
9326 u16 flags)
9327 {
9328 struct i40e_netdev_priv *np = netdev_priv(dev);
9329 struct i40e_pf *pf = np->vsi->back;
9330 int err = 0;
9331
9332 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
9333 return -EOPNOTSUPP;
9334
9335 if (vid) {
9336 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
9337 return -EINVAL;
9338 }
9339
9340 /* Hardware does not support aging addresses so if a
9341 * ndm_state is given only allow permanent addresses
9342 */
9343 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
9344 netdev_info(dev, "FDB only supports static addresses\n");
9345 return -EINVAL;
9346 }
9347
9348 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
9349 err = dev_uc_add_excl(dev, addr);
9350 else if (is_multicast_ether_addr(addr))
9351 err = dev_mc_add_excl(dev, addr);
9352 else
9353 err = -EINVAL;
9354
9355 /* Only return duplicate errors if NLM_F_EXCL is set */
9356 if (err == -EEXIST && !(flags & NLM_F_EXCL))
9357 err = 0;
9358
9359 return err;
9360 }
9361
9362 /**
9363 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
9364 * @dev: the netdev being configured
9365 * @nlh: RTNL message
9366 *
9367 * Inserts a new hardware bridge if not already created and
9368 * enables the bridging mode requested (VEB or VEPA). If the
9369 * hardware bridge has already been inserted and the request
9370 * is to change the mode then that requires a PF reset to
9371 * allow rebuild of the components with required hardware
9372 * bridge mode enabled.
9373 *
9374 * Note: expects to be called while under rtnl_lock()
9375 **/
9376 static int i40e_ndo_bridge_setlink(struct net_device *dev,
9377 struct nlmsghdr *nlh,
9378 u16 flags)
9379 {
9380 struct i40e_netdev_priv *np = netdev_priv(dev);
9381 struct i40e_vsi *vsi = np->vsi;
9382 struct i40e_pf *pf = vsi->back;
9383 struct i40e_veb *veb = NULL;
9384 struct nlattr *attr, *br_spec;
9385 int i, rem;
9386
9387 /* Only for PF VSI for now */
9388 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9389 return -EOPNOTSUPP;
9390
9391 /* Find the HW bridge for PF VSI */
9392 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9393 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9394 veb = pf->veb[i];
9395 }
9396
9397 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9398
9399 nla_for_each_nested(attr, br_spec, rem) {
9400 __u16 mode;
9401
9402 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9403 continue;
9404
9405 mode = nla_get_u16(attr);
9406 if ((mode != BRIDGE_MODE_VEPA) &&
9407 (mode != BRIDGE_MODE_VEB))
9408 return -EINVAL;
9409
9410 /* Insert a new HW bridge */
9411 if (!veb) {
9412 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9413 vsi->tc_config.enabled_tc);
9414 if (veb) {
9415 veb->bridge_mode = mode;
9416 i40e_config_bridge_mode(veb);
9417 } else {
9418 /* No Bridge HW offload available */
9419 return -ENOENT;
9420 }
9421 break;
9422 } else if (mode != veb->bridge_mode) {
9423 /* Existing HW bridge but different mode needs reset */
9424 veb->bridge_mode = mode;
9425 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
9426 if (mode == BRIDGE_MODE_VEB)
9427 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
9428 else
9429 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9430 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
9431 true);
9432 break;
9433 }
9434 }
9435
9436 return 0;
9437 }
9438
9439 /**
9440 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
9441 * @skb: skb buff
9442 * @pid: process id
9443 * @seq: RTNL message seq #
9444 * @dev: the netdev being configured
9445 * @filter_mask: unused
9446 * @nlflags: netlink flags passed in
9447 *
9448 * Return the mode in which the hardware bridge is operating in
9449 * i.e VEB or VEPA.
9450 **/
9451 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9452 struct net_device *dev,
9453 u32 __always_unused filter_mask,
9454 int nlflags)
9455 {
9456 struct i40e_netdev_priv *np = netdev_priv(dev);
9457 struct i40e_vsi *vsi = np->vsi;
9458 struct i40e_pf *pf = vsi->back;
9459 struct i40e_veb *veb = NULL;
9460 int i;
9461
9462 /* Only for PF VSI for now */
9463 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9464 return -EOPNOTSUPP;
9465
9466 /* Find the HW bridge for the PF VSI */
9467 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9468 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9469 veb = pf->veb[i];
9470 }
9471
9472 if (!veb)
9473 return 0;
9474
9475 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
9476 0, 0, nlflags, filter_mask, NULL);
9477 }
9478
9479 /**
9480 * i40e_features_check - Validate encapsulated packet conforms to limits
9481 * @skb: skb buff
9482 * @dev: This physical port's netdev
9483 * @features: Offload features that the stack believes apply
9484 **/
9485 static netdev_features_t i40e_features_check(struct sk_buff *skb,
9486 struct net_device *dev,
9487 netdev_features_t features)
9488 {
9489 size_t len;
9490
9491 /* No point in doing any of this if neither checksum nor GSO are
9492 * being requested for this frame. We can rule out both by just
9493 * checking for CHECKSUM_PARTIAL
9494 */
9495 if (skb->ip_summed != CHECKSUM_PARTIAL)
9496 return features;
9497
9498 /* We cannot support GSO if the MSS is going to be less than
9499 * 64 bytes. If it is then we need to drop support for GSO.
9500 */
9501 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
9502 features &= ~NETIF_F_GSO_MASK;
9503
9504 /* MACLEN can support at most 63 words */
9505 len = skb_network_header(skb) - skb->data;
9506 if (len & ~(63 * 2))
9507 goto out_err;
9508
9509 /* IPLEN and EIPLEN can support at most 127 dwords */
9510 len = skb_transport_header(skb) - skb_network_header(skb);
9511 if (len & ~(127 * 4))
9512 goto out_err;
9513
9514 if (skb->encapsulation) {
9515 /* L4TUNLEN can support 127 words */
9516 len = skb_inner_network_header(skb) - skb_transport_header(skb);
9517 if (len & ~(127 * 2))
9518 goto out_err;
9519
9520 /* IPLEN can support at most 127 dwords */
9521 len = skb_inner_transport_header(skb) -
9522 skb_inner_network_header(skb);
9523 if (len & ~(127 * 4))
9524 goto out_err;
9525 }
9526
9527 /* No need to validate L4LEN as TCP is the only protocol with a
9528 * a flexible value and we support all possible values supported
9529 * by TCP, which is at most 15 dwords
9530 */
9531
9532 return features;
9533 out_err:
9534 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9535 }
9536
9537 /**
9538 * i40e_xdp_setup - add/remove an XDP program
9539 * @vsi: VSI to changed
9540 * @prog: XDP program
9541 **/
9542 static int i40e_xdp_setup(struct i40e_vsi *vsi,
9543 struct bpf_prog *prog)
9544 {
9545 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
9546 struct i40e_pf *pf = vsi->back;
9547 struct bpf_prog *old_prog;
9548 bool need_reset;
9549 int i;
9550
9551 /* Don't allow frames that span over multiple buffers */
9552 if (frame_size > vsi->rx_buf_len)
9553 return -EINVAL;
9554
9555 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
9556 return 0;
9557
9558 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
9559 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
9560
9561 if (need_reset)
9562 i40e_prep_for_reset(pf, true);
9563
9564 old_prog = xchg(&vsi->xdp_prog, prog);
9565
9566 if (need_reset)
9567 i40e_reset_and_rebuild(pf, true, true);
9568
9569 for (i = 0; i < vsi->num_queue_pairs; i++)
9570 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
9571
9572 if (old_prog)
9573 bpf_prog_put(old_prog);
9574
9575 return 0;
9576 }
9577
9578 /**
9579 * i40e_xdp - implements ndo_xdp for i40e
9580 * @dev: netdevice
9581 * @xdp: XDP command
9582 **/
9583 static int i40e_xdp(struct net_device *dev,
9584 struct netdev_xdp *xdp)
9585 {
9586 struct i40e_netdev_priv *np = netdev_priv(dev);
9587 struct i40e_vsi *vsi = np->vsi;
9588
9589 if (vsi->type != I40E_VSI_MAIN)
9590 return -EINVAL;
9591
9592 switch (xdp->command) {
9593 case XDP_SETUP_PROG:
9594 return i40e_xdp_setup(vsi, xdp->prog);
9595 case XDP_QUERY_PROG:
9596 xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
9597 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
9598 return 0;
9599 default:
9600 return -EINVAL;
9601 }
9602 }
9603
9604 static const struct net_device_ops i40e_netdev_ops = {
9605 .ndo_open = i40e_open,
9606 .ndo_stop = i40e_close,
9607 .ndo_start_xmit = i40e_lan_xmit_frame,
9608 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
9609 .ndo_set_rx_mode = i40e_set_rx_mode,
9610 .ndo_validate_addr = eth_validate_addr,
9611 .ndo_set_mac_address = i40e_set_mac,
9612 .ndo_change_mtu = i40e_change_mtu,
9613 .ndo_do_ioctl = i40e_ioctl,
9614 .ndo_tx_timeout = i40e_tx_timeout,
9615 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
9616 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
9617 #ifdef CONFIG_NET_POLL_CONTROLLER
9618 .ndo_poll_controller = i40e_netpoll,
9619 #endif
9620 .ndo_setup_tc = __i40e_setup_tc,
9621 .ndo_set_features = i40e_set_features,
9622 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
9623 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
9624 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
9625 .ndo_get_vf_config = i40e_ndo_get_vf_config,
9626 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
9627 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
9628 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
9629 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
9630 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
9631 .ndo_get_phys_port_id = i40e_get_phys_port_id,
9632 .ndo_fdb_add = i40e_ndo_fdb_add,
9633 .ndo_features_check = i40e_features_check,
9634 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
9635 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
9636 .ndo_xdp = i40e_xdp,
9637 };
9638
9639 /**
9640 * i40e_config_netdev - Setup the netdev flags
9641 * @vsi: the VSI being configured
9642 *
9643 * Returns 0 on success, negative value on failure
9644 **/
9645 static int i40e_config_netdev(struct i40e_vsi *vsi)
9646 {
9647 struct i40e_pf *pf = vsi->back;
9648 struct i40e_hw *hw = &pf->hw;
9649 struct i40e_netdev_priv *np;
9650 struct net_device *netdev;
9651 u8 broadcast[ETH_ALEN];
9652 u8 mac_addr[ETH_ALEN];
9653 int etherdev_size;
9654 netdev_features_t hw_enc_features;
9655 netdev_features_t hw_features;
9656
9657 etherdev_size = sizeof(struct i40e_netdev_priv);
9658 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
9659 if (!netdev)
9660 return -ENOMEM;
9661
9662 vsi->netdev = netdev;
9663 np = netdev_priv(netdev);
9664 np->vsi = vsi;
9665
9666 hw_enc_features = NETIF_F_SG |
9667 NETIF_F_IP_CSUM |
9668 NETIF_F_IPV6_CSUM |
9669 NETIF_F_HIGHDMA |
9670 NETIF_F_SOFT_FEATURES |
9671 NETIF_F_TSO |
9672 NETIF_F_TSO_ECN |
9673 NETIF_F_TSO6 |
9674 NETIF_F_GSO_GRE |
9675 NETIF_F_GSO_GRE_CSUM |
9676 NETIF_F_GSO_PARTIAL |
9677 NETIF_F_GSO_UDP_TUNNEL |
9678 NETIF_F_GSO_UDP_TUNNEL_CSUM |
9679 NETIF_F_SCTP_CRC |
9680 NETIF_F_RXHASH |
9681 NETIF_F_RXCSUM |
9682 0;
9683
9684 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
9685 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
9686
9687 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
9688
9689 netdev->hw_enc_features |= hw_enc_features;
9690
9691 /* record features VLANs can make use of */
9692 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
9693
9694 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
9695 netdev->hw_features |= NETIF_F_NTUPLE;
9696 hw_features = hw_enc_features |
9697 NETIF_F_HW_VLAN_CTAG_TX |
9698 NETIF_F_HW_VLAN_CTAG_RX;
9699
9700 netdev->hw_features |= hw_features;
9701
9702 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
9703 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
9704
9705 if (vsi->type == I40E_VSI_MAIN) {
9706 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9707 ether_addr_copy(mac_addr, hw->mac.perm_addr);
9708 /* The following steps are necessary for two reasons. First,
9709 * some older NVM configurations load a default MAC-VLAN
9710 * filter that will accept any tagged packet, and we want to
9711 * replace this with a normal filter. Additionally, it is
9712 * possible our MAC address was provided by the platform using
9713 * Open Firmware or similar.
9714 *
9715 * Thus, we need to remove the default filter and install one
9716 * specific to the MAC address.
9717 */
9718 i40e_rm_default_mac_filter(vsi, mac_addr);
9719 spin_lock_bh(&vsi->mac_filter_hash_lock);
9720 i40e_add_mac_filter(vsi, mac_addr);
9721 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9722 } else {
9723 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
9724 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
9725 * the end, which is 4 bytes long, so force truncation of the
9726 * original name by IFNAMSIZ - 4
9727 */
9728 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
9729 IFNAMSIZ - 4,
9730 pf->vsi[pf->lan_vsi]->netdev->name);
9731 random_ether_addr(mac_addr);
9732
9733 spin_lock_bh(&vsi->mac_filter_hash_lock);
9734 i40e_add_mac_filter(vsi, mac_addr);
9735 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9736 }
9737
9738 /* Add the broadcast filter so that we initially will receive
9739 * broadcast packets. Note that when a new VLAN is first added the
9740 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
9741 * specific filters as part of transitioning into "vlan" operation.
9742 * When more VLANs are added, the driver will copy each existing MAC
9743 * filter and add it for the new VLAN.
9744 *
9745 * Broadcast filters are handled specially by
9746 * i40e_sync_filters_subtask, as the driver must to set the broadcast
9747 * promiscuous bit instead of adding this directly as a MAC/VLAN
9748 * filter. The subtask will update the correct broadcast promiscuous
9749 * bits as VLANs become active or inactive.
9750 */
9751 eth_broadcast_addr(broadcast);
9752 spin_lock_bh(&vsi->mac_filter_hash_lock);
9753 i40e_add_mac_filter(vsi, broadcast);
9754 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9755
9756 ether_addr_copy(netdev->dev_addr, mac_addr);
9757 ether_addr_copy(netdev->perm_addr, mac_addr);
9758
9759 netdev->priv_flags |= IFF_UNICAST_FLT;
9760 netdev->priv_flags |= IFF_SUPP_NOFCS;
9761 /* Setup netdev TC information */
9762 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
9763
9764 netdev->netdev_ops = &i40e_netdev_ops;
9765 netdev->watchdog_timeo = 5 * HZ;
9766 i40e_set_ethtool_ops(netdev);
9767
9768 /* MTU range: 68 - 9706 */
9769 netdev->min_mtu = ETH_MIN_MTU;
9770 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
9771
9772 return 0;
9773 }
9774
9775 /**
9776 * i40e_vsi_delete - Delete a VSI from the switch
9777 * @vsi: the VSI being removed
9778 *
9779 * Returns 0 on success, negative value on failure
9780 **/
9781 static void i40e_vsi_delete(struct i40e_vsi *vsi)
9782 {
9783 /* remove default VSI is not allowed */
9784 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
9785 return;
9786
9787 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
9788 }
9789
9790 /**
9791 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
9792 * @vsi: the VSI being queried
9793 *
9794 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
9795 **/
9796 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9797 {
9798 struct i40e_veb *veb;
9799 struct i40e_pf *pf = vsi->back;
9800
9801 /* Uplink is not a bridge so default to VEB */
9802 if (vsi->veb_idx == I40E_NO_VEB)
9803 return 1;
9804
9805 veb = pf->veb[vsi->veb_idx];
9806 if (!veb) {
9807 dev_info(&pf->pdev->dev,
9808 "There is no veb associated with the bridge\n");
9809 return -ENOENT;
9810 }
9811
9812 /* Uplink is a bridge in VEPA mode */
9813 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
9814 return 0;
9815 } else {
9816 /* Uplink is a bridge in VEB mode */
9817 return 1;
9818 }
9819
9820 /* VEPA is now default bridge, so return 0 */
9821 return 0;
9822 }
9823
9824 /**
9825 * i40e_add_vsi - Add a VSI to the switch
9826 * @vsi: the VSI being configured
9827 *
9828 * This initializes a VSI context depending on the VSI type to be added and
9829 * passes it down to the add_vsi aq command.
9830 **/
9831 static int i40e_add_vsi(struct i40e_vsi *vsi)
9832 {
9833 int ret = -ENODEV;
9834 struct i40e_pf *pf = vsi->back;
9835 struct i40e_hw *hw = &pf->hw;
9836 struct i40e_vsi_context ctxt;
9837 struct i40e_mac_filter *f;
9838 struct hlist_node *h;
9839 int bkt;
9840
9841 u8 enabled_tc = 0x1; /* TC0 enabled */
9842 int f_count = 0;
9843
9844 memset(&ctxt, 0, sizeof(ctxt));
9845 switch (vsi->type) {
9846 case I40E_VSI_MAIN:
9847 /* The PF's main VSI is already setup as part of the
9848 * device initialization, so we'll not bother with
9849 * the add_vsi call, but we will retrieve the current
9850 * VSI context.
9851 */
9852 ctxt.seid = pf->main_vsi_seid;
9853 ctxt.pf_num = pf->hw.pf_id;
9854 ctxt.vf_num = 0;
9855 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9856 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9857 if (ret) {
9858 dev_info(&pf->pdev->dev,
9859 "couldn't get PF vsi config, err %s aq_err %s\n",
9860 i40e_stat_str(&pf->hw, ret),
9861 i40e_aq_str(&pf->hw,
9862 pf->hw.aq.asq_last_status));
9863 return -ENOENT;
9864 }
9865 vsi->info = ctxt.info;
9866 vsi->info.valid_sections = 0;
9867
9868 vsi->seid = ctxt.seid;
9869 vsi->id = ctxt.vsi_number;
9870
9871 enabled_tc = i40e_pf_get_tc_map(pf);
9872
9873 /* MFP mode setup queue map and update VSI */
9874 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
9875 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
9876 memset(&ctxt, 0, sizeof(ctxt));
9877 ctxt.seid = pf->main_vsi_seid;
9878 ctxt.pf_num = pf->hw.pf_id;
9879 ctxt.vf_num = 0;
9880 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
9881 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9882 if (ret) {
9883 dev_info(&pf->pdev->dev,
9884 "update vsi failed, err %s aq_err %s\n",
9885 i40e_stat_str(&pf->hw, ret),
9886 i40e_aq_str(&pf->hw,
9887 pf->hw.aq.asq_last_status));
9888 ret = -ENOENT;
9889 goto err;
9890 }
9891 /* update the local VSI info queue map */
9892 i40e_vsi_update_queue_map(vsi, &ctxt);
9893 vsi->info.valid_sections = 0;
9894 } else {
9895 /* Default/Main VSI is only enabled for TC0
9896 * reconfigure it to enable all TCs that are
9897 * available on the port in SFP mode.
9898 * For MFP case the iSCSI PF would use this
9899 * flow to enable LAN+iSCSI TC.
9900 */
9901 ret = i40e_vsi_config_tc(vsi, enabled_tc);
9902 if (ret) {
9903 /* Single TC condition is not fatal,
9904 * message and continue
9905 */
9906 dev_info(&pf->pdev->dev,
9907 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9908 enabled_tc,
9909 i40e_stat_str(&pf->hw, ret),
9910 i40e_aq_str(&pf->hw,
9911 pf->hw.aq.asq_last_status));
9912 }
9913 }
9914 break;
9915
9916 case I40E_VSI_FDIR:
9917 ctxt.pf_num = hw->pf_id;
9918 ctxt.vf_num = 0;
9919 ctxt.uplink_seid = vsi->uplink_seid;
9920 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9921 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9922 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
9923 (i40e_is_vsi_uplink_mode_veb(vsi))) {
9924 ctxt.info.valid_sections |=
9925 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9926 ctxt.info.switch_id =
9927 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9928 }
9929 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9930 break;
9931
9932 case I40E_VSI_VMDQ2:
9933 ctxt.pf_num = hw->pf_id;
9934 ctxt.vf_num = 0;
9935 ctxt.uplink_seid = vsi->uplink_seid;
9936 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9937 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
9938
9939 /* This VSI is connected to VEB so the switch_id
9940 * should be set to zero by default.
9941 */
9942 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9943 ctxt.info.valid_sections |=
9944 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9945 ctxt.info.switch_id =
9946 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9947 }
9948
9949 /* Setup the VSI tx/rx queue map for TC0 only for now */
9950 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9951 break;
9952
9953 case I40E_VSI_SRIOV:
9954 ctxt.pf_num = hw->pf_id;
9955 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
9956 ctxt.uplink_seid = vsi->uplink_seid;
9957 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9958 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
9959
9960 /* This VSI is connected to VEB so the switch_id
9961 * should be set to zero by default.
9962 */
9963 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9964 ctxt.info.valid_sections |=
9965 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9966 ctxt.info.switch_id =
9967 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9968 }
9969
9970 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
9971 ctxt.info.valid_sections |=
9972 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
9973 ctxt.info.queueing_opt_flags |=
9974 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
9975 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
9976 }
9977
9978 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
9979 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
9980 if (pf->vf[vsi->vf_id].spoofchk) {
9981 ctxt.info.valid_sections |=
9982 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
9983 ctxt.info.sec_flags |=
9984 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
9985 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
9986 }
9987 /* Setup the VSI tx/rx queue map for TC0 only for now */
9988 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9989 break;
9990
9991 case I40E_VSI_IWARP:
9992 /* send down message to iWARP */
9993 break;
9994
9995 default:
9996 return -ENODEV;
9997 }
9998
9999 if (vsi->type != I40E_VSI_MAIN) {
10000 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
10001 if (ret) {
10002 dev_info(&vsi->back->pdev->dev,
10003 "add vsi failed, err %s aq_err %s\n",
10004 i40e_stat_str(&pf->hw, ret),
10005 i40e_aq_str(&pf->hw,
10006 pf->hw.aq.asq_last_status));
10007 ret = -ENOENT;
10008 goto err;
10009 }
10010 vsi->info = ctxt.info;
10011 vsi->info.valid_sections = 0;
10012 vsi->seid = ctxt.seid;
10013 vsi->id = ctxt.vsi_number;
10014 }
10015
10016 vsi->active_filters = 0;
10017 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
10018 spin_lock_bh(&vsi->mac_filter_hash_lock);
10019 /* If macvlan filters already exist, force them to get loaded */
10020 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
10021 f->state = I40E_FILTER_NEW;
10022 f_count++;
10023 }
10024 spin_unlock_bh(&vsi->mac_filter_hash_lock);
10025
10026 if (f_count) {
10027 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
10028 pf->flags |= I40E_FLAG_FILTER_SYNC;
10029 }
10030
10031 /* Update VSI BW information */
10032 ret = i40e_vsi_get_bw_info(vsi);
10033 if (ret) {
10034 dev_info(&pf->pdev->dev,
10035 "couldn't get vsi bw info, err %s aq_err %s\n",
10036 i40e_stat_str(&pf->hw, ret),
10037 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10038 /* VSI is already added so not tearing that up */
10039 ret = 0;
10040 }
10041
10042 err:
10043 return ret;
10044 }
10045
10046 /**
10047 * i40e_vsi_release - Delete a VSI and free its resources
10048 * @vsi: the VSI being removed
10049 *
10050 * Returns 0 on success or < 0 on error
10051 **/
10052 int i40e_vsi_release(struct i40e_vsi *vsi)
10053 {
10054 struct i40e_mac_filter *f;
10055 struct hlist_node *h;
10056 struct i40e_veb *veb = NULL;
10057 struct i40e_pf *pf;
10058 u16 uplink_seid;
10059 int i, n, bkt;
10060
10061 pf = vsi->back;
10062
10063 /* release of a VEB-owner or last VSI is not allowed */
10064 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
10065 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
10066 vsi->seid, vsi->uplink_seid);
10067 return -ENODEV;
10068 }
10069 if (vsi == pf->vsi[pf->lan_vsi] &&
10070 !test_bit(__I40E_DOWN, pf->state)) {
10071 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
10072 return -ENODEV;
10073 }
10074
10075 uplink_seid = vsi->uplink_seid;
10076 if (vsi->type != I40E_VSI_SRIOV) {
10077 if (vsi->netdev_registered) {
10078 vsi->netdev_registered = false;
10079 if (vsi->netdev) {
10080 /* results in a call to i40e_close() */
10081 unregister_netdev(vsi->netdev);
10082 }
10083 } else {
10084 i40e_vsi_close(vsi);
10085 }
10086 i40e_vsi_disable_irq(vsi);
10087 }
10088
10089 spin_lock_bh(&vsi->mac_filter_hash_lock);
10090
10091 /* clear the sync flag on all filters */
10092 if (vsi->netdev) {
10093 __dev_uc_unsync(vsi->netdev, NULL);
10094 __dev_mc_unsync(vsi->netdev, NULL);
10095 }
10096
10097 /* make sure any remaining filters are marked for deletion */
10098 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
10099 __i40e_del_filter(vsi, f);
10100
10101 spin_unlock_bh(&vsi->mac_filter_hash_lock);
10102
10103 i40e_sync_vsi_filters(vsi);
10104
10105 i40e_vsi_delete(vsi);
10106 i40e_vsi_free_q_vectors(vsi);
10107 if (vsi->netdev) {
10108 free_netdev(vsi->netdev);
10109 vsi->netdev = NULL;
10110 }
10111 i40e_vsi_clear_rings(vsi);
10112 i40e_vsi_clear(vsi);
10113
10114 /* If this was the last thing on the VEB, except for the
10115 * controlling VSI, remove the VEB, which puts the controlling
10116 * VSI onto the next level down in the switch.
10117 *
10118 * Well, okay, there's one more exception here: don't remove
10119 * the orphan VEBs yet. We'll wait for an explicit remove request
10120 * from up the network stack.
10121 */
10122 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
10123 if (pf->vsi[i] &&
10124 pf->vsi[i]->uplink_seid == uplink_seid &&
10125 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10126 n++; /* count the VSIs */
10127 }
10128 }
10129 for (i = 0; i < I40E_MAX_VEB; i++) {
10130 if (!pf->veb[i])
10131 continue;
10132 if (pf->veb[i]->uplink_seid == uplink_seid)
10133 n++; /* count the VEBs */
10134 if (pf->veb[i]->seid == uplink_seid)
10135 veb = pf->veb[i];
10136 }
10137 if (n == 0 && veb && veb->uplink_seid != 0)
10138 i40e_veb_release(veb);
10139
10140 return 0;
10141 }
10142
10143 /**
10144 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
10145 * @vsi: ptr to the VSI
10146 *
10147 * This should only be called after i40e_vsi_mem_alloc() which allocates the
10148 * corresponding SW VSI structure and initializes num_queue_pairs for the
10149 * newly allocated VSI.
10150 *
10151 * Returns 0 on success or negative on failure
10152 **/
10153 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
10154 {
10155 int ret = -ENOENT;
10156 struct i40e_pf *pf = vsi->back;
10157
10158 if (vsi->q_vectors[0]) {
10159 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
10160 vsi->seid);
10161 return -EEXIST;
10162 }
10163
10164 if (vsi->base_vector) {
10165 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
10166 vsi->seid, vsi->base_vector);
10167 return -EEXIST;
10168 }
10169
10170 ret = i40e_vsi_alloc_q_vectors(vsi);
10171 if (ret) {
10172 dev_info(&pf->pdev->dev,
10173 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
10174 vsi->num_q_vectors, vsi->seid, ret);
10175 vsi->num_q_vectors = 0;
10176 goto vector_setup_out;
10177 }
10178
10179 /* In Legacy mode, we do not have to get any other vector since we
10180 * piggyback on the misc/ICR0 for queue interrupts.
10181 */
10182 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10183 return ret;
10184 if (vsi->num_q_vectors)
10185 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
10186 vsi->num_q_vectors, vsi->idx);
10187 if (vsi->base_vector < 0) {
10188 dev_info(&pf->pdev->dev,
10189 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
10190 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
10191 i40e_vsi_free_q_vectors(vsi);
10192 ret = -ENOENT;
10193 goto vector_setup_out;
10194 }
10195
10196 vector_setup_out:
10197 return ret;
10198 }
10199
10200 /**
10201 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
10202 * @vsi: pointer to the vsi.
10203 *
10204 * This re-allocates a vsi's queue resources.
10205 *
10206 * Returns pointer to the successfully allocated and configured VSI sw struct
10207 * on success, otherwise returns NULL on failure.
10208 **/
10209 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
10210 {
10211 u16 alloc_queue_pairs;
10212 struct i40e_pf *pf;
10213 u8 enabled_tc;
10214 int ret;
10215
10216 if (!vsi)
10217 return NULL;
10218
10219 pf = vsi->back;
10220
10221 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10222 i40e_vsi_clear_rings(vsi);
10223
10224 i40e_vsi_free_arrays(vsi, false);
10225 i40e_set_num_rings_in_vsi(vsi);
10226 ret = i40e_vsi_alloc_arrays(vsi, false);
10227 if (ret)
10228 goto err_vsi;
10229
10230 alloc_queue_pairs = vsi->alloc_queue_pairs *
10231 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
10232
10233 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
10234 if (ret < 0) {
10235 dev_info(&pf->pdev->dev,
10236 "failed to get tracking for %d queues for VSI %d err %d\n",
10237 alloc_queue_pairs, vsi->seid, ret);
10238 goto err_vsi;
10239 }
10240 vsi->base_queue = ret;
10241
10242 /* Update the FW view of the VSI. Force a reset of TC and queue
10243 * layout configurations.
10244 */
10245 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
10246 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10247 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10248 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
10249 if (vsi->type == I40E_VSI_MAIN)
10250 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
10251
10252 /* assign it some queues */
10253 ret = i40e_alloc_rings(vsi);
10254 if (ret)
10255 goto err_rings;
10256
10257 /* map all of the rings to the q_vectors */
10258 i40e_vsi_map_rings_to_vectors(vsi);
10259 return vsi;
10260
10261 err_rings:
10262 i40e_vsi_free_q_vectors(vsi);
10263 if (vsi->netdev_registered) {
10264 vsi->netdev_registered = false;
10265 unregister_netdev(vsi->netdev);
10266 free_netdev(vsi->netdev);
10267 vsi->netdev = NULL;
10268 }
10269 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
10270 err_vsi:
10271 i40e_vsi_clear(vsi);
10272 return NULL;
10273 }
10274
10275 /**
10276 * i40e_vsi_setup - Set up a VSI by a given type
10277 * @pf: board private structure
10278 * @type: VSI type
10279 * @uplink_seid: the switch element to link to
10280 * @param1: usage depends upon VSI type. For VF types, indicates VF id
10281 *
10282 * This allocates the sw VSI structure and its queue resources, then add a VSI
10283 * to the identified VEB.
10284 *
10285 * Returns pointer to the successfully allocated and configure VSI sw struct on
10286 * success, otherwise returns NULL on failure.
10287 **/
10288 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
10289 u16 uplink_seid, u32 param1)
10290 {
10291 struct i40e_vsi *vsi = NULL;
10292 struct i40e_veb *veb = NULL;
10293 u16 alloc_queue_pairs;
10294 int ret, i;
10295 int v_idx;
10296
10297 /* The requested uplink_seid must be either
10298 * - the PF's port seid
10299 * no VEB is needed because this is the PF
10300 * or this is a Flow Director special case VSI
10301 * - seid of an existing VEB
10302 * - seid of a VSI that owns an existing VEB
10303 * - seid of a VSI that doesn't own a VEB
10304 * a new VEB is created and the VSI becomes the owner
10305 * - seid of the PF VSI, which is what creates the first VEB
10306 * this is a special case of the previous
10307 *
10308 * Find which uplink_seid we were given and create a new VEB if needed
10309 */
10310 for (i = 0; i < I40E_MAX_VEB; i++) {
10311 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
10312 veb = pf->veb[i];
10313 break;
10314 }
10315 }
10316
10317 if (!veb && uplink_seid != pf->mac_seid) {
10318
10319 for (i = 0; i < pf->num_alloc_vsi; i++) {
10320 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
10321 vsi = pf->vsi[i];
10322 break;
10323 }
10324 }
10325 if (!vsi) {
10326 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
10327 uplink_seid);
10328 return NULL;
10329 }
10330
10331 if (vsi->uplink_seid == pf->mac_seid)
10332 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
10333 vsi->tc_config.enabled_tc);
10334 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
10335 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
10336 vsi->tc_config.enabled_tc);
10337 if (veb) {
10338 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
10339 dev_info(&vsi->back->pdev->dev,
10340 "New VSI creation error, uplink seid of LAN VSI expected.\n");
10341 return NULL;
10342 }
10343 /* We come up by default in VEPA mode if SRIOV is not
10344 * already enabled, in which case we can't force VEPA
10345 * mode.
10346 */
10347 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
10348 veb->bridge_mode = BRIDGE_MODE_VEPA;
10349 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
10350 }
10351 i40e_config_bridge_mode(veb);
10352 }
10353 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
10354 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
10355 veb = pf->veb[i];
10356 }
10357 if (!veb) {
10358 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
10359 return NULL;
10360 }
10361
10362 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10363 uplink_seid = veb->seid;
10364 }
10365
10366 /* get vsi sw struct */
10367 v_idx = i40e_vsi_mem_alloc(pf, type);
10368 if (v_idx < 0)
10369 goto err_alloc;
10370 vsi = pf->vsi[v_idx];
10371 if (!vsi)
10372 goto err_alloc;
10373 vsi->type = type;
10374 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
10375
10376 if (type == I40E_VSI_MAIN)
10377 pf->lan_vsi = v_idx;
10378 else if (type == I40E_VSI_SRIOV)
10379 vsi->vf_id = param1;
10380 /* assign it some queues */
10381 alloc_queue_pairs = vsi->alloc_queue_pairs *
10382 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
10383
10384 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
10385 if (ret < 0) {
10386 dev_info(&pf->pdev->dev,
10387 "failed to get tracking for %d queues for VSI %d err=%d\n",
10388 alloc_queue_pairs, vsi->seid, ret);
10389 goto err_vsi;
10390 }
10391 vsi->base_queue = ret;
10392
10393 /* get a VSI from the hardware */
10394 vsi->uplink_seid = uplink_seid;
10395 ret = i40e_add_vsi(vsi);
10396 if (ret)
10397 goto err_vsi;
10398
10399 switch (vsi->type) {
10400 /* setup the netdev if needed */
10401 case I40E_VSI_MAIN:
10402 case I40E_VSI_VMDQ2:
10403 ret = i40e_config_netdev(vsi);
10404 if (ret)
10405 goto err_netdev;
10406 ret = register_netdev(vsi->netdev);
10407 if (ret)
10408 goto err_netdev;
10409 vsi->netdev_registered = true;
10410 netif_carrier_off(vsi->netdev);
10411 #ifdef CONFIG_I40E_DCB
10412 /* Setup DCB netlink interface */
10413 i40e_dcbnl_setup(vsi);
10414 #endif /* CONFIG_I40E_DCB */
10415 /* fall through */
10416
10417 case I40E_VSI_FDIR:
10418 /* set up vectors and rings if needed */
10419 ret = i40e_vsi_setup_vectors(vsi);
10420 if (ret)
10421 goto err_msix;
10422
10423 ret = i40e_alloc_rings(vsi);
10424 if (ret)
10425 goto err_rings;
10426
10427 /* map all of the rings to the q_vectors */
10428 i40e_vsi_map_rings_to_vectors(vsi);
10429
10430 i40e_vsi_reset_stats(vsi);
10431 break;
10432
10433 default:
10434 /* no netdev or rings for the other VSI types */
10435 break;
10436 }
10437
10438 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
10439 (vsi->type == I40E_VSI_VMDQ2)) {
10440 ret = i40e_vsi_config_rss(vsi);
10441 }
10442 return vsi;
10443
10444 err_rings:
10445 i40e_vsi_free_q_vectors(vsi);
10446 err_msix:
10447 if (vsi->netdev_registered) {
10448 vsi->netdev_registered = false;
10449 unregister_netdev(vsi->netdev);
10450 free_netdev(vsi->netdev);
10451 vsi->netdev = NULL;
10452 }
10453 err_netdev:
10454 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
10455 err_vsi:
10456 i40e_vsi_clear(vsi);
10457 err_alloc:
10458 return NULL;
10459 }
10460
10461 /**
10462 * i40e_veb_get_bw_info - Query VEB BW information
10463 * @veb: the veb to query
10464 *
10465 * Query the Tx scheduler BW configuration data for given VEB
10466 **/
10467 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
10468 {
10469 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
10470 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
10471 struct i40e_pf *pf = veb->pf;
10472 struct i40e_hw *hw = &pf->hw;
10473 u32 tc_bw_max;
10474 int ret = 0;
10475 int i;
10476
10477 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10478 &bw_data, NULL);
10479 if (ret) {
10480 dev_info(&pf->pdev->dev,
10481 "query veb bw config failed, err %s aq_err %s\n",
10482 i40e_stat_str(&pf->hw, ret),
10483 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
10484 goto out;
10485 }
10486
10487 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10488 &ets_data, NULL);
10489 if (ret) {
10490 dev_info(&pf->pdev->dev,
10491 "query veb bw ets config failed, err %s aq_err %s\n",
10492 i40e_stat_str(&pf->hw, ret),
10493 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
10494 goto out;
10495 }
10496
10497 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
10498 veb->bw_max_quanta = ets_data.tc_bw_max;
10499 veb->is_abs_credits = bw_data.absolute_credits_enable;
10500 veb->enabled_tc = ets_data.tc_valid_bits;
10501 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
10502 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
10503 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10504 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
10505 veb->bw_tc_limit_credits[i] =
10506 le16_to_cpu(bw_data.tc_bw_limits[i]);
10507 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
10508 }
10509
10510 out:
10511 return ret;
10512 }
10513
10514 /**
10515 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
10516 * @pf: board private structure
10517 *
10518 * On error: returns error code (negative)
10519 * On success: returns vsi index in PF (positive)
10520 **/
10521 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
10522 {
10523 int ret = -ENOENT;
10524 struct i40e_veb *veb;
10525 int i;
10526
10527 /* Need to protect the allocation of switch elements at the PF level */
10528 mutex_lock(&pf->switch_mutex);
10529
10530 /* VEB list may be fragmented if VEB creation/destruction has
10531 * been happening. We can afford to do a quick scan to look
10532 * for any free slots in the list.
10533 *
10534 * find next empty veb slot, looping back around if necessary
10535 */
10536 i = 0;
10537 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
10538 i++;
10539 if (i >= I40E_MAX_VEB) {
10540 ret = -ENOMEM;
10541 goto err_alloc_veb; /* out of VEB slots! */
10542 }
10543
10544 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
10545 if (!veb) {
10546 ret = -ENOMEM;
10547 goto err_alloc_veb;
10548 }
10549 veb->pf = pf;
10550 veb->idx = i;
10551 veb->enabled_tc = 1;
10552
10553 pf->veb[i] = veb;
10554 ret = i;
10555 err_alloc_veb:
10556 mutex_unlock(&pf->switch_mutex);
10557 return ret;
10558 }
10559
10560 /**
10561 * i40e_switch_branch_release - Delete a branch of the switch tree
10562 * @branch: where to start deleting
10563 *
10564 * This uses recursion to find the tips of the branch to be
10565 * removed, deleting until we get back to and can delete this VEB.
10566 **/
10567 static void i40e_switch_branch_release(struct i40e_veb *branch)
10568 {
10569 struct i40e_pf *pf = branch->pf;
10570 u16 branch_seid = branch->seid;
10571 u16 veb_idx = branch->idx;
10572 int i;
10573
10574 /* release any VEBs on this VEB - RECURSION */
10575 for (i = 0; i < I40E_MAX_VEB; i++) {
10576 if (!pf->veb[i])
10577 continue;
10578 if (pf->veb[i]->uplink_seid == branch->seid)
10579 i40e_switch_branch_release(pf->veb[i]);
10580 }
10581
10582 /* Release the VSIs on this VEB, but not the owner VSI.
10583 *
10584 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
10585 * the VEB itself, so don't use (*branch) after this loop.
10586 */
10587 for (i = 0; i < pf->num_alloc_vsi; i++) {
10588 if (!pf->vsi[i])
10589 continue;
10590 if (pf->vsi[i]->uplink_seid == branch_seid &&
10591 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10592 i40e_vsi_release(pf->vsi[i]);
10593 }
10594 }
10595
10596 /* There's one corner case where the VEB might not have been
10597 * removed, so double check it here and remove it if needed.
10598 * This case happens if the veb was created from the debugfs
10599 * commands and no VSIs were added to it.
10600 */
10601 if (pf->veb[veb_idx])
10602 i40e_veb_release(pf->veb[veb_idx]);
10603 }
10604
10605 /**
10606 * i40e_veb_clear - remove veb struct
10607 * @veb: the veb to remove
10608 **/
10609 static void i40e_veb_clear(struct i40e_veb *veb)
10610 {
10611 if (!veb)
10612 return;
10613
10614 if (veb->pf) {
10615 struct i40e_pf *pf = veb->pf;
10616
10617 mutex_lock(&pf->switch_mutex);
10618 if (pf->veb[veb->idx] == veb)
10619 pf->veb[veb->idx] = NULL;
10620 mutex_unlock(&pf->switch_mutex);
10621 }
10622
10623 kfree(veb);
10624 }
10625
10626 /**
10627 * i40e_veb_release - Delete a VEB and free its resources
10628 * @veb: the VEB being removed
10629 **/
10630 void i40e_veb_release(struct i40e_veb *veb)
10631 {
10632 struct i40e_vsi *vsi = NULL;
10633 struct i40e_pf *pf;
10634 int i, n = 0;
10635
10636 pf = veb->pf;
10637
10638 /* find the remaining VSI and check for extras */
10639 for (i = 0; i < pf->num_alloc_vsi; i++) {
10640 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
10641 n++;
10642 vsi = pf->vsi[i];
10643 }
10644 }
10645 if (n != 1) {
10646 dev_info(&pf->pdev->dev,
10647 "can't remove VEB %d with %d VSIs left\n",
10648 veb->seid, n);
10649 return;
10650 }
10651
10652 /* move the remaining VSI to uplink veb */
10653 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
10654 if (veb->uplink_seid) {
10655 vsi->uplink_seid = veb->uplink_seid;
10656 if (veb->uplink_seid == pf->mac_seid)
10657 vsi->veb_idx = I40E_NO_VEB;
10658 else
10659 vsi->veb_idx = veb->veb_idx;
10660 } else {
10661 /* floating VEB */
10662 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10663 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
10664 }
10665
10666 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10667 i40e_veb_clear(veb);
10668 }
10669
10670 /**
10671 * i40e_add_veb - create the VEB in the switch
10672 * @veb: the VEB to be instantiated
10673 * @vsi: the controlling VSI
10674 **/
10675 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10676 {
10677 struct i40e_pf *pf = veb->pf;
10678 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
10679 int ret;
10680
10681 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
10682 veb->enabled_tc, false,
10683 &veb->seid, enable_stats, NULL);
10684
10685 /* get a VEB from the hardware */
10686 if (ret) {
10687 dev_info(&pf->pdev->dev,
10688 "couldn't add VEB, err %s aq_err %s\n",
10689 i40e_stat_str(&pf->hw, ret),
10690 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10691 return -EPERM;
10692 }
10693
10694 /* get statistics counter */
10695 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
10696 &veb->stats_idx, NULL, NULL, NULL);
10697 if (ret) {
10698 dev_info(&pf->pdev->dev,
10699 "couldn't get VEB statistics idx, err %s aq_err %s\n",
10700 i40e_stat_str(&pf->hw, ret),
10701 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10702 return -EPERM;
10703 }
10704 ret = i40e_veb_get_bw_info(veb);
10705 if (ret) {
10706 dev_info(&pf->pdev->dev,
10707 "couldn't get VEB bw info, err %s aq_err %s\n",
10708 i40e_stat_str(&pf->hw, ret),
10709 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10710 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10711 return -ENOENT;
10712 }
10713
10714 vsi->uplink_seid = veb->seid;
10715 vsi->veb_idx = veb->idx;
10716 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10717
10718 return 0;
10719 }
10720
10721 /**
10722 * i40e_veb_setup - Set up a VEB
10723 * @pf: board private structure
10724 * @flags: VEB setup flags
10725 * @uplink_seid: the switch element to link to
10726 * @vsi_seid: the initial VSI seid
10727 * @enabled_tc: Enabled TC bit-map
10728 *
10729 * This allocates the sw VEB structure and links it into the switch
10730 * It is possible and legal for this to be a duplicate of an already
10731 * existing VEB. It is also possible for both uplink and vsi seids
10732 * to be zero, in order to create a floating VEB.
10733 *
10734 * Returns pointer to the successfully allocated VEB sw struct on
10735 * success, otherwise returns NULL on failure.
10736 **/
10737 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
10738 u16 uplink_seid, u16 vsi_seid,
10739 u8 enabled_tc)
10740 {
10741 struct i40e_veb *veb, *uplink_veb = NULL;
10742 int vsi_idx, veb_idx;
10743 int ret;
10744
10745 /* if one seid is 0, the other must be 0 to create a floating relay */
10746 if ((uplink_seid == 0 || vsi_seid == 0) &&
10747 (uplink_seid + vsi_seid != 0)) {
10748 dev_info(&pf->pdev->dev,
10749 "one, not both seid's are 0: uplink=%d vsi=%d\n",
10750 uplink_seid, vsi_seid);
10751 return NULL;
10752 }
10753
10754 /* make sure there is such a vsi and uplink */
10755 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
10756 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
10757 break;
10758 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
10759 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
10760 vsi_seid);
10761 return NULL;
10762 }
10763
10764 if (uplink_seid && uplink_seid != pf->mac_seid) {
10765 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10766 if (pf->veb[veb_idx] &&
10767 pf->veb[veb_idx]->seid == uplink_seid) {
10768 uplink_veb = pf->veb[veb_idx];
10769 break;
10770 }
10771 }
10772 if (!uplink_veb) {
10773 dev_info(&pf->pdev->dev,
10774 "uplink seid %d not found\n", uplink_seid);
10775 return NULL;
10776 }
10777 }
10778
10779 /* get veb sw struct */
10780 veb_idx = i40e_veb_mem_alloc(pf);
10781 if (veb_idx < 0)
10782 goto err_alloc;
10783 veb = pf->veb[veb_idx];
10784 veb->flags = flags;
10785 veb->uplink_seid = uplink_seid;
10786 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
10787 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
10788
10789 /* create the VEB in the switch */
10790 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
10791 if (ret)
10792 goto err_veb;
10793 if (vsi_idx == pf->lan_vsi)
10794 pf->lan_veb = veb->idx;
10795
10796 return veb;
10797
10798 err_veb:
10799 i40e_veb_clear(veb);
10800 err_alloc:
10801 return NULL;
10802 }
10803
10804 /**
10805 * i40e_setup_pf_switch_element - set PF vars based on switch type
10806 * @pf: board private structure
10807 * @ele: element we are building info from
10808 * @num_reported: total number of elements
10809 * @printconfig: should we print the contents
10810 *
10811 * helper function to assist in extracting a few useful SEID values.
10812 **/
10813 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
10814 struct i40e_aqc_switch_config_element_resp *ele,
10815 u16 num_reported, bool printconfig)
10816 {
10817 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
10818 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
10819 u8 element_type = ele->element_type;
10820 u16 seid = le16_to_cpu(ele->seid);
10821
10822 if (printconfig)
10823 dev_info(&pf->pdev->dev,
10824 "type=%d seid=%d uplink=%d downlink=%d\n",
10825 element_type, seid, uplink_seid, downlink_seid);
10826
10827 switch (element_type) {
10828 case I40E_SWITCH_ELEMENT_TYPE_MAC:
10829 pf->mac_seid = seid;
10830 break;
10831 case I40E_SWITCH_ELEMENT_TYPE_VEB:
10832 /* Main VEB? */
10833 if (uplink_seid != pf->mac_seid)
10834 break;
10835 if (pf->lan_veb == I40E_NO_VEB) {
10836 int v;
10837
10838 /* find existing or else empty VEB */
10839 for (v = 0; v < I40E_MAX_VEB; v++) {
10840 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
10841 pf->lan_veb = v;
10842 break;
10843 }
10844 }
10845 if (pf->lan_veb == I40E_NO_VEB) {
10846 v = i40e_veb_mem_alloc(pf);
10847 if (v < 0)
10848 break;
10849 pf->lan_veb = v;
10850 }
10851 }
10852
10853 pf->veb[pf->lan_veb]->seid = seid;
10854 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
10855 pf->veb[pf->lan_veb]->pf = pf;
10856 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
10857 break;
10858 case I40E_SWITCH_ELEMENT_TYPE_VSI:
10859 if (num_reported != 1)
10860 break;
10861 /* This is immediately after a reset so we can assume this is
10862 * the PF's VSI
10863 */
10864 pf->mac_seid = uplink_seid;
10865 pf->pf_seid = downlink_seid;
10866 pf->main_vsi_seid = seid;
10867 if (printconfig)
10868 dev_info(&pf->pdev->dev,
10869 "pf_seid=%d main_vsi_seid=%d\n",
10870 pf->pf_seid, pf->main_vsi_seid);
10871 break;
10872 case I40E_SWITCH_ELEMENT_TYPE_PF:
10873 case I40E_SWITCH_ELEMENT_TYPE_VF:
10874 case I40E_SWITCH_ELEMENT_TYPE_EMP:
10875 case I40E_SWITCH_ELEMENT_TYPE_BMC:
10876 case I40E_SWITCH_ELEMENT_TYPE_PE:
10877 case I40E_SWITCH_ELEMENT_TYPE_PA:
10878 /* ignore these for now */
10879 break;
10880 default:
10881 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
10882 element_type, seid);
10883 break;
10884 }
10885 }
10886
10887 /**
10888 * i40e_fetch_switch_configuration - Get switch config from firmware
10889 * @pf: board private structure
10890 * @printconfig: should we print the contents
10891 *
10892 * Get the current switch configuration from the device and
10893 * extract a few useful SEID values.
10894 **/
10895 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10896 {
10897 struct i40e_aqc_get_switch_config_resp *sw_config;
10898 u16 next_seid = 0;
10899 int ret = 0;
10900 u8 *aq_buf;
10901 int i;
10902
10903 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
10904 if (!aq_buf)
10905 return -ENOMEM;
10906
10907 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
10908 do {
10909 u16 num_reported, num_total;
10910
10911 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
10912 I40E_AQ_LARGE_BUF,
10913 &next_seid, NULL);
10914 if (ret) {
10915 dev_info(&pf->pdev->dev,
10916 "get switch config failed err %s aq_err %s\n",
10917 i40e_stat_str(&pf->hw, ret),
10918 i40e_aq_str(&pf->hw,
10919 pf->hw.aq.asq_last_status));
10920 kfree(aq_buf);
10921 return -ENOENT;
10922 }
10923
10924 num_reported = le16_to_cpu(sw_config->header.num_reported);
10925 num_total = le16_to_cpu(sw_config->header.num_total);
10926
10927 if (printconfig)
10928 dev_info(&pf->pdev->dev,
10929 "header: %d reported %d total\n",
10930 num_reported, num_total);
10931
10932 for (i = 0; i < num_reported; i++) {
10933 struct i40e_aqc_switch_config_element_resp *ele =
10934 &sw_config->element[i];
10935
10936 i40e_setup_pf_switch_element(pf, ele, num_reported,
10937 printconfig);
10938 }
10939 } while (next_seid != 0);
10940
10941 kfree(aq_buf);
10942 return ret;
10943 }
10944
10945 /**
10946 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
10947 * @pf: board private structure
10948 * @reinit: if the Main VSI needs to re-initialized.
10949 *
10950 * Returns 0 on success, negative value on failure
10951 **/
10952 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
10953 {
10954 u16 flags = 0;
10955 int ret;
10956
10957 /* find out what's out there already */
10958 ret = i40e_fetch_switch_configuration(pf, false);
10959 if (ret) {
10960 dev_info(&pf->pdev->dev,
10961 "couldn't fetch switch config, err %s aq_err %s\n",
10962 i40e_stat_str(&pf->hw, ret),
10963 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10964 return ret;
10965 }
10966 i40e_pf_reset_stats(pf);
10967
10968 /* set the switch config bit for the whole device to
10969 * support limited promisc or true promisc
10970 * when user requests promisc. The default is limited
10971 * promisc.
10972 */
10973
10974 if ((pf->hw.pf_id == 0) &&
10975 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
10976 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10977
10978 if (pf->hw.pf_id == 0) {
10979 u16 valid_flags;
10980
10981 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10982 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
10983 NULL);
10984 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
10985 dev_info(&pf->pdev->dev,
10986 "couldn't set switch config bits, err %s aq_err %s\n",
10987 i40e_stat_str(&pf->hw, ret),
10988 i40e_aq_str(&pf->hw,
10989 pf->hw.aq.asq_last_status));
10990 /* not a fatal problem, just keep going */
10991 }
10992 }
10993
10994 /* first time setup */
10995 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
10996 struct i40e_vsi *vsi = NULL;
10997 u16 uplink_seid;
10998
10999 /* Set up the PF VSI associated with the PF's main VSI
11000 * that is already in the HW switch
11001 */
11002 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
11003 uplink_seid = pf->veb[pf->lan_veb]->seid;
11004 else
11005 uplink_seid = pf->mac_seid;
11006 if (pf->lan_vsi == I40E_NO_VSI)
11007 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
11008 else if (reinit)
11009 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
11010 if (!vsi) {
11011 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
11012 i40e_fdir_teardown(pf);
11013 return -EAGAIN;
11014 }
11015 } else {
11016 /* force a reset of TC and queue layout configurations */
11017 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
11018
11019 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
11020 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
11021 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
11022 }
11023 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
11024
11025 i40e_fdir_sb_setup(pf);
11026
11027 /* Setup static PF queue filter control settings */
11028 ret = i40e_setup_pf_filter_control(pf);
11029 if (ret) {
11030 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
11031 ret);
11032 /* Failure here should not stop continuing other steps */
11033 }
11034
11035 /* enable RSS in the HW, even for only one queue, as the stack can use
11036 * the hash
11037 */
11038 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
11039 i40e_pf_config_rss(pf);
11040
11041 /* fill in link information and enable LSE reporting */
11042 i40e_link_event(pf);
11043
11044 /* Initialize user-specific link properties */
11045 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
11046 I40E_AQ_AN_COMPLETED) ? true : false);
11047
11048 i40e_ptp_init(pf);
11049
11050 /* repopulate tunnel port filters */
11051 i40e_sync_udp_filters(pf);
11052
11053 return ret;
11054 }
11055
11056 /**
11057 * i40e_determine_queue_usage - Work out queue distribution
11058 * @pf: board private structure
11059 **/
11060 static void i40e_determine_queue_usage(struct i40e_pf *pf)
11061 {
11062 int queues_left;
11063
11064 pf->num_lan_qps = 0;
11065
11066 /* Find the max queues to be put into basic use. We'll always be
11067 * using TC0, whether or not DCB is running, and TC0 will get the
11068 * big RSS set.
11069 */
11070 queues_left = pf->hw.func_caps.num_tx_qp;
11071
11072 if ((queues_left == 1) ||
11073 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
11074 /* one qp for PF, no queues for anything else */
11075 queues_left = 0;
11076 pf->alloc_rss_size = pf->num_lan_qps = 1;
11077
11078 /* make sure all the fancies are disabled */
11079 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
11080 I40E_FLAG_IWARP_ENABLED |
11081 I40E_FLAG_FD_SB_ENABLED |
11082 I40E_FLAG_FD_ATR_ENABLED |
11083 I40E_FLAG_DCB_CAPABLE |
11084 I40E_FLAG_DCB_ENABLED |
11085 I40E_FLAG_SRIOV_ENABLED |
11086 I40E_FLAG_VMDQ_ENABLED);
11087 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
11088 I40E_FLAG_FD_SB_ENABLED |
11089 I40E_FLAG_FD_ATR_ENABLED |
11090 I40E_FLAG_DCB_CAPABLE))) {
11091 /* one qp for PF */
11092 pf->alloc_rss_size = pf->num_lan_qps = 1;
11093 queues_left -= pf->num_lan_qps;
11094
11095 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
11096 I40E_FLAG_IWARP_ENABLED |
11097 I40E_FLAG_FD_SB_ENABLED |
11098 I40E_FLAG_FD_ATR_ENABLED |
11099 I40E_FLAG_DCB_ENABLED |
11100 I40E_FLAG_VMDQ_ENABLED);
11101 } else {
11102 /* Not enough queues for all TCs */
11103 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
11104 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
11105 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
11106 I40E_FLAG_DCB_ENABLED);
11107 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
11108 }
11109 pf->num_lan_qps = max_t(int, pf->rss_size_max,
11110 num_online_cpus());
11111 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
11112 pf->hw.func_caps.num_tx_qp);
11113
11114 queues_left -= pf->num_lan_qps;
11115 }
11116
11117 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11118 if (queues_left > 1) {
11119 queues_left -= 1; /* save 1 queue for FD */
11120 } else {
11121 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11122 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
11123 }
11124 }
11125
11126 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11127 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
11128 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
11129 (queues_left / pf->num_vf_qps));
11130 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
11131 }
11132
11133 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11134 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
11135 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
11136 (queues_left / pf->num_vmdq_qps));
11137 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
11138 }
11139
11140 pf->queues_left = queues_left;
11141 dev_dbg(&pf->pdev->dev,
11142 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
11143 pf->hw.func_caps.num_tx_qp,
11144 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
11145 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
11146 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
11147 queues_left);
11148 }
11149
11150 /**
11151 * i40e_setup_pf_filter_control - Setup PF static filter control
11152 * @pf: PF to be setup
11153 *
11154 * i40e_setup_pf_filter_control sets up a PF's initial filter control
11155 * settings. If PE/FCoE are enabled then it will also set the per PF
11156 * based filter sizes required for them. It also enables Flow director,
11157 * ethertype and macvlan type filter settings for the pf.
11158 *
11159 * Returns 0 on success, negative on failure
11160 **/
11161 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
11162 {
11163 struct i40e_filter_control_settings *settings = &pf->filter_settings;
11164
11165 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
11166
11167 /* Flow Director is enabled */
11168 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
11169 settings->enable_fdir = true;
11170
11171 /* Ethtype and MACVLAN filters enabled for PF */
11172 settings->enable_ethtype = true;
11173 settings->enable_macvlan = true;
11174
11175 if (i40e_set_filter_control(&pf->hw, settings))
11176 return -ENOENT;
11177
11178 return 0;
11179 }
11180
11181 #define INFO_STRING_LEN 255
11182 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
11183 static void i40e_print_features(struct i40e_pf *pf)
11184 {
11185 struct i40e_hw *hw = &pf->hw;
11186 char *buf;
11187 int i;
11188
11189 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
11190 if (!buf)
11191 return;
11192
11193 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
11194 #ifdef CONFIG_PCI_IOV
11195 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
11196 #endif
11197 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
11198 pf->hw.func_caps.num_vsis,
11199 pf->vsi[pf->lan_vsi]->num_queue_pairs);
11200 if (pf->flags & I40E_FLAG_RSS_ENABLED)
11201 i += snprintf(&buf[i], REMAIN(i), " RSS");
11202 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
11203 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
11204 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11205 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
11206 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
11207 }
11208 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
11209 i += snprintf(&buf[i], REMAIN(i), " DCB");
11210 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
11211 i += snprintf(&buf[i], REMAIN(i), " Geneve");
11212 if (pf->flags & I40E_FLAG_PTP)
11213 i += snprintf(&buf[i], REMAIN(i), " PTP");
11214 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
11215 i += snprintf(&buf[i], REMAIN(i), " VEB");
11216 else
11217 i += snprintf(&buf[i], REMAIN(i), " VEPA");
11218
11219 dev_info(&pf->pdev->dev, "%s\n", buf);
11220 kfree(buf);
11221 WARN_ON(i > INFO_STRING_LEN);
11222 }
11223
11224 /**
11225 * i40e_get_platform_mac_addr - get platform-specific MAC address
11226 * @pdev: PCI device information struct
11227 * @pf: board private structure
11228 *
11229 * Look up the MAC address for the device. First we'll try
11230 * eth_platform_get_mac_address, which will check Open Firmware, or arch
11231 * specific fallback. Otherwise, we'll default to the stored value in
11232 * firmware.
11233 **/
11234 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
11235 {
11236 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
11237 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
11238 }
11239
11240 /**
11241 * i40e_probe - Device initialization routine
11242 * @pdev: PCI device information struct
11243 * @ent: entry in i40e_pci_tbl
11244 *
11245 * i40e_probe initializes a PF identified by a pci_dev structure.
11246 * The OS initialization, configuring of the PF private structure,
11247 * and a hardware reset occur.
11248 *
11249 * Returns 0 on success, negative on failure
11250 **/
11251 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11252 {
11253 struct i40e_aq_get_phy_abilities_resp abilities;
11254 struct i40e_pf *pf;
11255 struct i40e_hw *hw;
11256 static u16 pfs_found;
11257 u16 wol_nvm_bits;
11258 u16 link_status;
11259 int err;
11260 u32 val;
11261 u32 i;
11262 u8 set_fc_aq_fail;
11263
11264 err = pci_enable_device_mem(pdev);
11265 if (err)
11266 return err;
11267
11268 /* set up for high or low dma */
11269 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11270 if (err) {
11271 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11272 if (err) {
11273 dev_err(&pdev->dev,
11274 "DMA configuration failed: 0x%x\n", err);
11275 goto err_dma;
11276 }
11277 }
11278
11279 /* set up pci connections */
11280 err = pci_request_mem_regions(pdev, i40e_driver_name);
11281 if (err) {
11282 dev_info(&pdev->dev,
11283 "pci_request_selected_regions failed %d\n", err);
11284 goto err_pci_reg;
11285 }
11286
11287 pci_enable_pcie_error_reporting(pdev);
11288 pci_set_master(pdev);
11289
11290 /* Now that we have a PCI connection, we need to do the
11291 * low level device setup. This is primarily setting up
11292 * the Admin Queue structures and then querying for the
11293 * device's current profile information.
11294 */
11295 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
11296 if (!pf) {
11297 err = -ENOMEM;
11298 goto err_pf_alloc;
11299 }
11300 pf->next_vsi = 0;
11301 pf->pdev = pdev;
11302 set_bit(__I40E_DOWN, pf->state);
11303
11304 hw = &pf->hw;
11305 hw->back = pf;
11306
11307 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
11308 I40E_MAX_CSR_SPACE);
11309
11310 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
11311 if (!hw->hw_addr) {
11312 err = -EIO;
11313 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
11314 (unsigned int)pci_resource_start(pdev, 0),
11315 pf->ioremap_len, err);
11316 goto err_ioremap;
11317 }
11318 hw->vendor_id = pdev->vendor;
11319 hw->device_id = pdev->device;
11320 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
11321 hw->subsystem_vendor_id = pdev->subsystem_vendor;
11322 hw->subsystem_device_id = pdev->subsystem_device;
11323 hw->bus.device = PCI_SLOT(pdev->devfn);
11324 hw->bus.func = PCI_FUNC(pdev->devfn);
11325 hw->bus.bus_id = pdev->bus->number;
11326 pf->instance = pfs_found;
11327
11328 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
11329 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
11330
11331 /* set up the locks for the AQ, do this only once in probe
11332 * and destroy them only once in remove
11333 */
11334 mutex_init(&hw->aq.asq_mutex);
11335 mutex_init(&hw->aq.arq_mutex);
11336
11337 pf->msg_enable = netif_msg_init(debug,
11338 NETIF_MSG_DRV |
11339 NETIF_MSG_PROBE |
11340 NETIF_MSG_LINK);
11341 if (debug < -1)
11342 pf->hw.debug_mask = debug;
11343
11344 /* do a special CORER for clearing PXE mode once at init */
11345 if (hw->revision_id == 0 &&
11346 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
11347 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
11348 i40e_flush(hw);
11349 msleep(200);
11350 pf->corer_count++;
11351
11352 i40e_clear_pxe_mode(hw);
11353 }
11354
11355 /* Reset here to make sure all is clean and to define PF 'n' */
11356 i40e_clear_hw(hw);
11357 err = i40e_pf_reset(hw);
11358 if (err) {
11359 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
11360 goto err_pf_reset;
11361 }
11362 pf->pfr_count++;
11363
11364 hw->aq.num_arq_entries = I40E_AQ_LEN;
11365 hw->aq.num_asq_entries = I40E_AQ_LEN;
11366 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
11367 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
11368 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
11369
11370 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
11371 "%s-%s:misc",
11372 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
11373
11374 err = i40e_init_shared_code(hw);
11375 if (err) {
11376 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
11377 err);
11378 goto err_pf_reset;
11379 }
11380
11381 /* set up a default setting for link flow control */
11382 pf->hw.fc.requested_mode = I40E_FC_NONE;
11383
11384 err = i40e_init_adminq(hw);
11385 if (err) {
11386 if (err == I40E_ERR_FIRMWARE_API_VERSION)
11387 dev_info(&pdev->dev,
11388 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
11389 else
11390 dev_info(&pdev->dev,
11391 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
11392
11393 goto err_pf_reset;
11394 }
11395 i40e_get_oem_version(hw);
11396
11397 /* provide nvm, fw, api versions */
11398 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
11399 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
11400 hw->aq.api_maj_ver, hw->aq.api_min_ver,
11401 i40e_nvm_version_str(hw));
11402
11403 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
11404 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
11405 dev_info(&pdev->dev,
11406 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
11407 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
11408 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
11409 dev_info(&pdev->dev,
11410 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
11411
11412 i40e_verify_eeprom(pf);
11413
11414 /* Rev 0 hardware was never productized */
11415 if (hw->revision_id < 1)
11416 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
11417
11418 i40e_clear_pxe_mode(hw);
11419 err = i40e_get_capabilities(pf);
11420 if (err)
11421 goto err_adminq_setup;
11422
11423 err = i40e_sw_init(pf);
11424 if (err) {
11425 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
11426 goto err_sw_init;
11427 }
11428
11429 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
11430 hw->func_caps.num_rx_qp, 0, 0);
11431 if (err) {
11432 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
11433 goto err_init_lan_hmc;
11434 }
11435
11436 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
11437 if (err) {
11438 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
11439 err = -ENOENT;
11440 goto err_configure_lan_hmc;
11441 }
11442
11443 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
11444 * Ignore error return codes because if it was already disabled via
11445 * hardware settings this will fail
11446 */
11447 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
11448 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
11449 i40e_aq_stop_lldp(hw, true, NULL);
11450 }
11451
11452 /* allow a platform config to override the HW addr */
11453 i40e_get_platform_mac_addr(pdev, pf);
11454
11455 if (!is_valid_ether_addr(hw->mac.addr)) {
11456 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
11457 err = -EIO;
11458 goto err_mac_addr;
11459 }
11460 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
11461 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
11462 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
11463 if (is_valid_ether_addr(hw->mac.port_addr))
11464 pf->hw_features |= I40E_HW_PORT_ID_VALID;
11465
11466 pci_set_drvdata(pdev, pf);
11467 pci_save_state(pdev);
11468 #ifdef CONFIG_I40E_DCB
11469 err = i40e_init_pf_dcb(pf);
11470 if (err) {
11471 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
11472 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
11473 /* Continue without DCB enabled */
11474 }
11475 #endif /* CONFIG_I40E_DCB */
11476
11477 /* set up periodic task facility */
11478 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
11479 pf->service_timer_period = HZ;
11480
11481 INIT_WORK(&pf->service_task, i40e_service_task);
11482 clear_bit(__I40E_SERVICE_SCHED, pf->state);
11483
11484 /* NVM bit on means WoL disabled for the port */
11485 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
11486 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
11487 pf->wol_en = false;
11488 else
11489 pf->wol_en = true;
11490 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
11491
11492 /* set up the main switch operations */
11493 i40e_determine_queue_usage(pf);
11494 err = i40e_init_interrupt_scheme(pf);
11495 if (err)
11496 goto err_switch_setup;
11497
11498 /* The number of VSIs reported by the FW is the minimum guaranteed
11499 * to us; HW supports far more and we share the remaining pool with
11500 * the other PFs. We allocate space for more than the guarantee with
11501 * the understanding that we might not get them all later.
11502 */
11503 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
11504 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
11505 else
11506 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
11507
11508 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
11509 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
11510 GFP_KERNEL);
11511 if (!pf->vsi) {
11512 err = -ENOMEM;
11513 goto err_switch_setup;
11514 }
11515
11516 #ifdef CONFIG_PCI_IOV
11517 /* prep for VF support */
11518 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11519 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11520 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
11521 if (pci_num_vf(pdev))
11522 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11523 }
11524 #endif
11525 err = i40e_setup_pf_switch(pf, false);
11526 if (err) {
11527 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
11528 goto err_vsis;
11529 }
11530
11531 /* Make sure flow control is set according to current settings */
11532 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
11533 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
11534 dev_dbg(&pf->pdev->dev,
11535 "Set fc with err %s aq_err %s on get_phy_cap\n",
11536 i40e_stat_str(hw, err),
11537 i40e_aq_str(hw, hw->aq.asq_last_status));
11538 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
11539 dev_dbg(&pf->pdev->dev,
11540 "Set fc with err %s aq_err %s on set_phy_config\n",
11541 i40e_stat_str(hw, err),
11542 i40e_aq_str(hw, hw->aq.asq_last_status));
11543 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
11544 dev_dbg(&pf->pdev->dev,
11545 "Set fc with err %s aq_err %s on get_link_info\n",
11546 i40e_stat_str(hw, err),
11547 i40e_aq_str(hw, hw->aq.asq_last_status));
11548
11549 /* if FDIR VSI was set up, start it now */
11550 for (i = 0; i < pf->num_alloc_vsi; i++) {
11551 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
11552 i40e_vsi_open(pf->vsi[i]);
11553 break;
11554 }
11555 }
11556
11557 /* The driver only wants link up/down and module qualification
11558 * reports from firmware. Note the negative logic.
11559 */
11560 err = i40e_aq_set_phy_int_mask(&pf->hw,
11561 ~(I40E_AQ_EVENT_LINK_UPDOWN |
11562 I40E_AQ_EVENT_MEDIA_NA |
11563 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
11564 if (err)
11565 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
11566 i40e_stat_str(&pf->hw, err),
11567 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11568
11569 /* Reconfigure hardware for allowing smaller MSS in the case
11570 * of TSO, so that we avoid the MDD being fired and causing
11571 * a reset in the case of small MSS+TSO.
11572 */
11573 val = rd32(hw, I40E_REG_MSS);
11574 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11575 val &= ~I40E_REG_MSS_MIN_MASK;
11576 val |= I40E_64BYTE_MSS;
11577 wr32(hw, I40E_REG_MSS, val);
11578 }
11579
11580 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
11581 msleep(75);
11582 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11583 if (err)
11584 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
11585 i40e_stat_str(&pf->hw, err),
11586 i40e_aq_str(&pf->hw,
11587 pf->hw.aq.asq_last_status));
11588 }
11589 /* The main driver is (mostly) up and happy. We need to set this state
11590 * before setting up the misc vector or we get a race and the vector
11591 * ends up disabled forever.
11592 */
11593 clear_bit(__I40E_DOWN, pf->state);
11594
11595 /* In case of MSIX we are going to setup the misc vector right here
11596 * to handle admin queue events etc. In case of legacy and MSI
11597 * the misc functionality and queue processing is combined in
11598 * the same vector and that gets setup at open.
11599 */
11600 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11601 err = i40e_setup_misc_vector(pf);
11602 if (err) {
11603 dev_info(&pdev->dev,
11604 "setup of misc vector failed: %d\n", err);
11605 goto err_vsis;
11606 }
11607 }
11608
11609 #ifdef CONFIG_PCI_IOV
11610 /* prep for VF support */
11611 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11612 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11613 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
11614 /* disable link interrupts for VFs */
11615 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
11616 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
11617 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
11618 i40e_flush(hw);
11619
11620 if (pci_num_vf(pdev)) {
11621 dev_info(&pdev->dev,
11622 "Active VFs found, allocating resources.\n");
11623 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
11624 if (err)
11625 dev_info(&pdev->dev,
11626 "Error %d allocating resources for existing VFs\n",
11627 err);
11628 }
11629 }
11630 #endif /* CONFIG_PCI_IOV */
11631
11632 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11633 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
11634 pf->num_iwarp_msix,
11635 I40E_IWARP_IRQ_PILE_ID);
11636 if (pf->iwarp_base_vector < 0) {
11637 dev_info(&pdev->dev,
11638 "failed to get tracking for %d vectors for IWARP err=%d\n",
11639 pf->num_iwarp_msix, pf->iwarp_base_vector);
11640 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11641 }
11642 }
11643
11644 i40e_dbg_pf_init(pf);
11645
11646 /* tell the firmware that we're starting */
11647 i40e_send_version(pf);
11648
11649 /* since everything's happy, start the service_task timer */
11650 mod_timer(&pf->service_timer,
11651 round_jiffies(jiffies + pf->service_timer_period));
11652
11653 /* add this PF to client device list and launch a client service task */
11654 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11655 err = i40e_lan_add_device(pf);
11656 if (err)
11657 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
11658 err);
11659 }
11660
11661 #define PCI_SPEED_SIZE 8
11662 #define PCI_WIDTH_SIZE 8
11663 /* Devices on the IOSF bus do not have this information
11664 * and will report PCI Gen 1 x 1 by default so don't bother
11665 * checking them.
11666 */
11667 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
11668 char speed[PCI_SPEED_SIZE] = "Unknown";
11669 char width[PCI_WIDTH_SIZE] = "Unknown";
11670
11671 /* Get the negotiated link width and speed from PCI config
11672 * space
11673 */
11674 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
11675 &link_status);
11676
11677 i40e_set_pci_config_data(hw, link_status);
11678
11679 switch (hw->bus.speed) {
11680 case i40e_bus_speed_8000:
11681 strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
11682 case i40e_bus_speed_5000:
11683 strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
11684 case i40e_bus_speed_2500:
11685 strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
11686 default:
11687 break;
11688 }
11689 switch (hw->bus.width) {
11690 case i40e_bus_width_pcie_x8:
11691 strncpy(width, "8", PCI_WIDTH_SIZE); break;
11692 case i40e_bus_width_pcie_x4:
11693 strncpy(width, "4", PCI_WIDTH_SIZE); break;
11694 case i40e_bus_width_pcie_x2:
11695 strncpy(width, "2", PCI_WIDTH_SIZE); break;
11696 case i40e_bus_width_pcie_x1:
11697 strncpy(width, "1", PCI_WIDTH_SIZE); break;
11698 default:
11699 break;
11700 }
11701
11702 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
11703 speed, width);
11704
11705 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
11706 hw->bus.speed < i40e_bus_speed_8000) {
11707 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11708 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11709 }
11710 }
11711
11712 /* get the requested speeds from the fw */
11713 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
11714 if (err)
11715 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
11716 i40e_stat_str(&pf->hw, err),
11717 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11718 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
11719
11720 /* get the supported phy types from the fw */
11721 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
11722 if (err)
11723 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
11724 i40e_stat_str(&pf->hw, err),
11725 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11726
11727 /* Add a filter to drop all Flow control frames from any VSI from being
11728 * transmitted. By doing so we stop a malicious VF from sending out
11729 * PAUSE or PFC frames and potentially controlling traffic for other
11730 * PF/VF VSIs.
11731 * The FW can still send Flow control frames if enabled.
11732 */
11733 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11734 pf->main_vsi_seid);
11735
11736 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
11737 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
11738 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
11739 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
11740 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
11741 /* print a string summarizing features */
11742 i40e_print_features(pf);
11743
11744 return 0;
11745
11746 /* Unwind what we've done if something failed in the setup */
11747 err_vsis:
11748 set_bit(__I40E_DOWN, pf->state);
11749 i40e_clear_interrupt_scheme(pf);
11750 kfree(pf->vsi);
11751 err_switch_setup:
11752 i40e_reset_interrupt_capability(pf);
11753 del_timer_sync(&pf->service_timer);
11754 err_mac_addr:
11755 err_configure_lan_hmc:
11756 (void)i40e_shutdown_lan_hmc(hw);
11757 err_init_lan_hmc:
11758 kfree(pf->qp_pile);
11759 err_sw_init:
11760 err_adminq_setup:
11761 err_pf_reset:
11762 iounmap(hw->hw_addr);
11763 err_ioremap:
11764 kfree(pf);
11765 err_pf_alloc:
11766 pci_disable_pcie_error_reporting(pdev);
11767 pci_release_mem_regions(pdev);
11768 err_pci_reg:
11769 err_dma:
11770 pci_disable_device(pdev);
11771 return err;
11772 }
11773
11774 /**
11775 * i40e_remove - Device removal routine
11776 * @pdev: PCI device information struct
11777 *
11778 * i40e_remove is called by the PCI subsystem to alert the driver
11779 * that is should release a PCI device. This could be caused by a
11780 * Hot-Plug event, or because the driver is going to be removed from
11781 * memory.
11782 **/
11783 static void i40e_remove(struct pci_dev *pdev)
11784 {
11785 struct i40e_pf *pf = pci_get_drvdata(pdev);
11786 struct i40e_hw *hw = &pf->hw;
11787 i40e_status ret_code;
11788 int i;
11789
11790 i40e_dbg_pf_exit(pf);
11791
11792 i40e_ptp_stop(pf);
11793
11794 /* Disable RSS in hw */
11795 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
11796 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
11797
11798 /* no more scheduling of any task */
11799 set_bit(__I40E_SUSPENDED, pf->state);
11800 set_bit(__I40E_DOWN, pf->state);
11801 if (pf->service_timer.data)
11802 del_timer_sync(&pf->service_timer);
11803 if (pf->service_task.func)
11804 cancel_work_sync(&pf->service_task);
11805
11806 /* Client close must be called explicitly here because the timer
11807 * has been stopped.
11808 */
11809 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
11810
11811 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11812 i40e_free_vfs(pf);
11813 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
11814 }
11815
11816 i40e_fdir_teardown(pf);
11817
11818 /* If there is a switch structure or any orphans, remove them.
11819 * This will leave only the PF's VSI remaining.
11820 */
11821 for (i = 0; i < I40E_MAX_VEB; i++) {
11822 if (!pf->veb[i])
11823 continue;
11824
11825 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
11826 pf->veb[i]->uplink_seid == 0)
11827 i40e_switch_branch_release(pf->veb[i]);
11828 }
11829
11830 /* Now we can shutdown the PF's VSI, just before we kill
11831 * adminq and hmc.
11832 */
11833 if (pf->vsi[pf->lan_vsi])
11834 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11835
11836 /* remove attached clients */
11837 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11838 ret_code = i40e_lan_del_device(pf);
11839 if (ret_code)
11840 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
11841 ret_code);
11842 }
11843
11844 /* shutdown and destroy the HMC */
11845 if (hw->hmc.hmc_obj) {
11846 ret_code = i40e_shutdown_lan_hmc(hw);
11847 if (ret_code)
11848 dev_warn(&pdev->dev,
11849 "Failed to destroy the HMC resources: %d\n",
11850 ret_code);
11851 }
11852
11853 /* shutdown the adminq */
11854 i40e_shutdown_adminq(hw);
11855
11856 /* destroy the locks only once, here */
11857 mutex_destroy(&hw->aq.arq_mutex);
11858 mutex_destroy(&hw->aq.asq_mutex);
11859
11860 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
11861 i40e_clear_interrupt_scheme(pf);
11862 for (i = 0; i < pf->num_alloc_vsi; i++) {
11863 if (pf->vsi[i]) {
11864 i40e_vsi_clear_rings(pf->vsi[i]);
11865 i40e_vsi_clear(pf->vsi[i]);
11866 pf->vsi[i] = NULL;
11867 }
11868 }
11869
11870 for (i = 0; i < I40E_MAX_VEB; i++) {
11871 kfree(pf->veb[i]);
11872 pf->veb[i] = NULL;
11873 }
11874
11875 kfree(pf->qp_pile);
11876 kfree(pf->vsi);
11877
11878 iounmap(hw->hw_addr);
11879 kfree(pf);
11880 pci_release_mem_regions(pdev);
11881
11882 pci_disable_pcie_error_reporting(pdev);
11883 pci_disable_device(pdev);
11884 }
11885
11886 /**
11887 * i40e_pci_error_detected - warning that something funky happened in PCI land
11888 * @pdev: PCI device information struct
11889 *
11890 * Called to warn that something happened and the error handling steps
11891 * are in progress. Allows the driver to quiesce things, be ready for
11892 * remediation.
11893 **/
11894 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
11895 enum pci_channel_state error)
11896 {
11897 struct i40e_pf *pf = pci_get_drvdata(pdev);
11898
11899 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
11900
11901 if (!pf) {
11902 dev_info(&pdev->dev,
11903 "Cannot recover - error happened during device probe\n");
11904 return PCI_ERS_RESULT_DISCONNECT;
11905 }
11906
11907 /* shutdown all operations */
11908 if (!test_bit(__I40E_SUSPENDED, pf->state))
11909 i40e_prep_for_reset(pf, false);
11910
11911 /* Request a slot reset */
11912 return PCI_ERS_RESULT_NEED_RESET;
11913 }
11914
11915 /**
11916 * i40e_pci_error_slot_reset - a PCI slot reset just happened
11917 * @pdev: PCI device information struct
11918 *
11919 * Called to find if the driver can work with the device now that
11920 * the pci slot has been reset. If a basic connection seems good
11921 * (registers are readable and have sane content) then return a
11922 * happy little PCI_ERS_RESULT_xxx.
11923 **/
11924 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
11925 {
11926 struct i40e_pf *pf = pci_get_drvdata(pdev);
11927 pci_ers_result_t result;
11928 int err;
11929 u32 reg;
11930
11931 dev_dbg(&pdev->dev, "%s\n", __func__);
11932 if (pci_enable_device_mem(pdev)) {
11933 dev_info(&pdev->dev,
11934 "Cannot re-enable PCI device after reset.\n");
11935 result = PCI_ERS_RESULT_DISCONNECT;
11936 } else {
11937 pci_set_master(pdev);
11938 pci_restore_state(pdev);
11939 pci_save_state(pdev);
11940 pci_wake_from_d3(pdev, false);
11941
11942 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
11943 if (reg == 0)
11944 result = PCI_ERS_RESULT_RECOVERED;
11945 else
11946 result = PCI_ERS_RESULT_DISCONNECT;
11947 }
11948
11949 err = pci_cleanup_aer_uncorrect_error_status(pdev);
11950 if (err) {
11951 dev_info(&pdev->dev,
11952 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
11953 err);
11954 /* non-fatal, continue */
11955 }
11956
11957 return result;
11958 }
11959
11960 /**
11961 * i40e_pci_error_resume - restart operations after PCI error recovery
11962 * @pdev: PCI device information struct
11963 *
11964 * Called to allow the driver to bring things back up after PCI error
11965 * and/or reset recovery has finished.
11966 **/
11967 static void i40e_pci_error_resume(struct pci_dev *pdev)
11968 {
11969 struct i40e_pf *pf = pci_get_drvdata(pdev);
11970
11971 dev_dbg(&pdev->dev, "%s\n", __func__);
11972 if (test_bit(__I40E_SUSPENDED, pf->state))
11973 return;
11974
11975 i40e_handle_reset_warning(pf, false);
11976 }
11977
11978 /**
11979 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
11980 * using the mac_address_write admin q function
11981 * @pf: pointer to i40e_pf struct
11982 **/
11983 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
11984 {
11985 struct i40e_hw *hw = &pf->hw;
11986 i40e_status ret;
11987 u8 mac_addr[6];
11988 u16 flags = 0;
11989
11990 /* Get current MAC address in case it's an LAA */
11991 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
11992 ether_addr_copy(mac_addr,
11993 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
11994 } else {
11995 dev_err(&pf->pdev->dev,
11996 "Failed to retrieve MAC address; using default\n");
11997 ether_addr_copy(mac_addr, hw->mac.addr);
11998 }
11999
12000 /* The FW expects the mac address write cmd to first be called with
12001 * one of these flags before calling it again with the multicast
12002 * enable flags.
12003 */
12004 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
12005
12006 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
12007 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
12008
12009 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
12010 if (ret) {
12011 dev_err(&pf->pdev->dev,
12012 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
12013 return;
12014 }
12015
12016 flags = I40E_AQC_MC_MAG_EN
12017 | I40E_AQC_WOL_PRESERVE_ON_PFR
12018 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
12019 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
12020 if (ret)
12021 dev_err(&pf->pdev->dev,
12022 "Failed to enable Multicast Magic Packet wake up\n");
12023 }
12024
12025 /**
12026 * i40e_shutdown - PCI callback for shutting down
12027 * @pdev: PCI device information struct
12028 **/
12029 static void i40e_shutdown(struct pci_dev *pdev)
12030 {
12031 struct i40e_pf *pf = pci_get_drvdata(pdev);
12032 struct i40e_hw *hw = &pf->hw;
12033
12034 set_bit(__I40E_SUSPENDED, pf->state);
12035 set_bit(__I40E_DOWN, pf->state);
12036 rtnl_lock();
12037 i40e_prep_for_reset(pf, true);
12038 rtnl_unlock();
12039
12040 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12041 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12042
12043 del_timer_sync(&pf->service_timer);
12044 cancel_work_sync(&pf->service_task);
12045 i40e_fdir_teardown(pf);
12046
12047 /* Client close must be called explicitly here because the timer
12048 * has been stopped.
12049 */
12050 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
12051
12052 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
12053 i40e_enable_mc_magic_wake(pf);
12054
12055 i40e_prep_for_reset(pf, false);
12056
12057 wr32(hw, I40E_PFPM_APM,
12058 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12059 wr32(hw, I40E_PFPM_WUFC,
12060 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12061
12062 i40e_clear_interrupt_scheme(pf);
12063
12064 if (system_state == SYSTEM_POWER_OFF) {
12065 pci_wake_from_d3(pdev, pf->wol_en);
12066 pci_set_power_state(pdev, PCI_D3hot);
12067 }
12068 }
12069
12070 #ifdef CONFIG_PM
12071 /**
12072 * i40e_suspend - PCI callback for moving to D3
12073 * @pdev: PCI device information struct
12074 **/
12075 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
12076 {
12077 struct i40e_pf *pf = pci_get_drvdata(pdev);
12078 struct i40e_hw *hw = &pf->hw;
12079 int retval = 0;
12080
12081 set_bit(__I40E_SUSPENDED, pf->state);
12082 set_bit(__I40E_DOWN, pf->state);
12083
12084 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
12085 i40e_enable_mc_magic_wake(pf);
12086
12087 i40e_prep_for_reset(pf, false);
12088
12089 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12090 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12091
12092 i40e_stop_misc_vector(pf);
12093 if (pf->msix_entries) {
12094 synchronize_irq(pf->msix_entries[0].vector);
12095 free_irq(pf->msix_entries[0].vector, pf);
12096 }
12097 retval = pci_save_state(pdev);
12098 if (retval)
12099 return retval;
12100
12101 pci_wake_from_d3(pdev, pf->wol_en);
12102 pci_set_power_state(pdev, PCI_D3hot);
12103
12104 return retval;
12105 }
12106
12107 /**
12108 * i40e_resume - PCI callback for waking up from D3
12109 * @pdev: PCI device information struct
12110 **/
12111 static int i40e_resume(struct pci_dev *pdev)
12112 {
12113 struct i40e_pf *pf = pci_get_drvdata(pdev);
12114 u32 err;
12115
12116 pci_set_power_state(pdev, PCI_D0);
12117 pci_restore_state(pdev);
12118 /* pci_restore_state() clears dev->state_saves, so
12119 * call pci_save_state() again to restore it.
12120 */
12121 pci_save_state(pdev);
12122
12123 err = pci_enable_device_mem(pdev);
12124 if (err) {
12125 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
12126 return err;
12127 }
12128 pci_set_master(pdev);
12129
12130 /* no wakeup events while running */
12131 pci_wake_from_d3(pdev, false);
12132
12133 /* handling the reset will rebuild the device state */
12134 if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
12135 clear_bit(__I40E_DOWN, pf->state);
12136 if (pf->msix_entries) {
12137 err = request_irq(pf->msix_entries[0].vector,
12138 i40e_intr, 0, pf->int_name, pf);
12139 if (err) {
12140 dev_err(&pf->pdev->dev,
12141 "request_irq for %s failed: %d\n",
12142 pf->int_name, err);
12143 }
12144 }
12145 i40e_reset_and_rebuild(pf, false, false);
12146 }
12147
12148 return 0;
12149 }
12150
12151 #endif
12152 static const struct pci_error_handlers i40e_err_handler = {
12153 .error_detected = i40e_pci_error_detected,
12154 .slot_reset = i40e_pci_error_slot_reset,
12155 .resume = i40e_pci_error_resume,
12156 };
12157
12158 static struct pci_driver i40e_driver = {
12159 .name = i40e_driver_name,
12160 .id_table = i40e_pci_tbl,
12161 .probe = i40e_probe,
12162 .remove = i40e_remove,
12163 #ifdef CONFIG_PM
12164 .suspend = i40e_suspend,
12165 .resume = i40e_resume,
12166 #endif
12167 .shutdown = i40e_shutdown,
12168 .err_handler = &i40e_err_handler,
12169 .sriov_configure = i40e_pci_sriov_configure,
12170 };
12171
12172 /**
12173 * i40e_init_module - Driver registration routine
12174 *
12175 * i40e_init_module is the first routine called when the driver is
12176 * loaded. All it does is register with the PCI subsystem.
12177 **/
12178 static int __init i40e_init_module(void)
12179 {
12180 pr_info("%s: %s - version %s\n", i40e_driver_name,
12181 i40e_driver_string, i40e_driver_version_str);
12182 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
12183
12184 /* There is no need to throttle the number of active tasks because
12185 * each device limits its own task using a state bit for scheduling
12186 * the service task, and the device tasks do not interfere with each
12187 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
12188 * since we need to be able to guarantee forward progress even under
12189 * memory pressure.
12190 */
12191 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
12192 if (!i40e_wq) {
12193 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
12194 return -ENOMEM;
12195 }
12196
12197 i40e_dbg_init();
12198 return pci_register_driver(&i40e_driver);
12199 }
12200 module_init(i40e_init_module);
12201
12202 /**
12203 * i40e_exit_module - Driver exit cleanup routine
12204 *
12205 * i40e_exit_module is called just before the driver is removed
12206 * from memory.
12207 **/
12208 static void __exit i40e_exit_module(void)
12209 {
12210 pci_unregister_driver(&i40e_driver);
12211 destroy_workqueue(i40e_wq);
12212 i40e_dbg_exit();
12213 }
12214 module_exit(i40e_exit_module);