2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/rtnetlink.h>
30 #define BNAD_NUM_TXF_COUNTERS 12
31 #define BNAD_NUM_RXF_COUNTERS 10
32 #define BNAD_NUM_CQ_COUNTERS (3 + 5)
33 #define BNAD_NUM_RXQ_COUNTERS 6
34 #define BNAD_NUM_TXQ_COUNTERS 5
36 #define BNAD_ETHTOOL_STATS_NUM \
37 (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
38 sizeof(struct bnad_drv_stats) / sizeof(u64) + \
39 offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
41 static char *bnad_net_stats_strings
[BNAD_ETHTOOL_STATS_NUM
] = {
63 "tx_heartbeat_errors",
71 "netif_queue_stopped",
81 "tx_skb_mss_too_long",
82 "tx_skb_tso_too_short",
84 "tx_skb_non_tso_too_long",
88 "tx_skb_headlen_too_long",
89 "tx_skb_headlen_zero",
91 "tx_skb_len_mismatch",
98 "rxp_info_alloc_failed",
101 "tx_unmap_q_alloc_failed",
102 "rx_unmap_q_alloc_failed",
103 "rxbuf_alloc_failed",
109 "mac_frame_512_1023",
110 "mac_frame_1024_1518",
111 "mac_frame_1518_1522",
117 "mac_rx_control_frames",
119 "mac_rx_unknown_opcode",
120 "mac_rx_alignment_error",
121 "mac_rx_frame_length_error",
123 "mac_rx_carrier_sense_error",
136 "mac_tx_excessive_deferral",
137 "mac_tx_single_collision",
138 "mac_tx_muliple_collision",
139 "mac_tx_late_collision",
140 "mac_tx_excessive_collision",
141 "mac_tx_total_collision",
142 "mac_tx_pause_honored",
146 "mac_tx_control_frame",
159 "bpc_tx_zero_pause_0",
160 "bpc_tx_zero_pause_1",
161 "bpc_tx_zero_pause_2",
162 "bpc_tx_zero_pause_3",
163 "bpc_tx_zero_pause_4",
164 "bpc_tx_zero_pause_5",
165 "bpc_tx_zero_pause_6",
166 "bpc_tx_zero_pause_7",
167 "bpc_tx_first_pause_0",
168 "bpc_tx_first_pause_1",
169 "bpc_tx_first_pause_2",
170 "bpc_tx_first_pause_3",
171 "bpc_tx_first_pause_4",
172 "bpc_tx_first_pause_5",
173 "bpc_tx_first_pause_6",
174 "bpc_tx_first_pause_7",
184 "bpc_rx_zero_pause_0",
185 "bpc_rx_zero_pause_1",
186 "bpc_rx_zero_pause_2",
187 "bpc_rx_zero_pause_3",
188 "bpc_rx_zero_pause_4",
189 "bpc_rx_zero_pause_5",
190 "bpc_rx_zero_pause_6",
191 "bpc_rx_zero_pause_7",
192 "bpc_rx_first_pause_0",
193 "bpc_rx_first_pause_1",
194 "bpc_rx_first_pause_2",
195 "bpc_rx_first_pause_3",
196 "bpc_rx_first_pause_4",
197 "bpc_rx_first_pause_5",
198 "bpc_rx_first_pause_6",
199 "bpc_rx_first_pause_7",
203 "rad_rx_vlan_frames",
205 "rad_rx_ucast_octets",
208 "rad_rx_mcast_octets",
211 "rad_rx_bcast_octets",
217 "rlb_rad_rx_vlan_frames",
219 "rlb_rad_rx_ucast_octets",
220 "rlb_rad_rx_ucast_vlan",
222 "rlb_rad_rx_mcast_octets",
223 "rlb_rad_rx_mcast_vlan",
225 "rlb_rad_rx_bcast_octets",
226 "rlb_rad_rx_bcast_vlan",
229 "fc_rx_ucast_octets",
232 "fc_rx_mcast_octets",
235 "fc_rx_bcast_octets",
239 "fc_tx_ucast_octets",
242 "fc_tx_mcast_octets",
245 "fc_tx_bcast_octets",
248 "fc_tx_parity_errors",
250 "fc_tx_fid_parity_errors",
254 bnad_get_settings(struct net_device
*netdev
, struct ethtool_cmd
*cmd
)
256 cmd
->supported
= SUPPORTED_10000baseT_Full
;
257 cmd
->advertising
= ADVERTISED_10000baseT_Full
;
258 cmd
->autoneg
= AUTONEG_DISABLE
;
259 cmd
->supported
|= SUPPORTED_FIBRE
;
260 cmd
->advertising
|= ADVERTISED_FIBRE
;
261 cmd
->port
= PORT_FIBRE
;
262 cmd
->phy_address
= 0;
264 if (netif_carrier_ok(netdev
)) {
265 ethtool_cmd_speed_set(cmd
, SPEED_10000
);
266 cmd
->duplex
= DUPLEX_FULL
;
268 ethtool_cmd_speed_set(cmd
, -1);
271 cmd
->transceiver
= XCVR_EXTERNAL
;
279 bnad_set_settings(struct net_device
*netdev
, struct ethtool_cmd
*cmd
)
281 /* 10G full duplex setting supported only */
282 if (cmd
->autoneg
== AUTONEG_ENABLE
)
283 return -EOPNOTSUPP
; else {
284 if ((ethtool_cmd_speed(cmd
) == SPEED_10000
)
285 && (cmd
->duplex
== DUPLEX_FULL
))
293 bnad_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
295 struct bnad
*bnad
= netdev_priv(netdev
);
296 struct bfa_ioc_attr
*ioc_attr
;
299 strcpy(drvinfo
->driver
, BNAD_NAME
);
300 strcpy(drvinfo
->version
, BNAD_VERSION
);
302 ioc_attr
= kzalloc(sizeof(*ioc_attr
), GFP_KERNEL
);
304 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
305 bfa_nw_ioc_get_attr(&bnad
->bna
.ioceth
.ioc
, ioc_attr
);
306 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
308 strncpy(drvinfo
->fw_version
, ioc_attr
->adapter_attr
.fw_ver
,
309 sizeof(drvinfo
->fw_version
) - 1);
313 strncpy(drvinfo
->bus_info
, pci_name(bnad
->pcidev
), ETHTOOL_BUSINFO_LEN
);
317 bnad_get_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wolinfo
)
319 wolinfo
->supported
= 0;
320 wolinfo
->wolopts
= 0;
324 bnad_get_coalesce(struct net_device
*netdev
, struct ethtool_coalesce
*coalesce
)
326 struct bnad
*bnad
= netdev_priv(netdev
);
329 /* Lock rqd. to access bnad->bna_lock */
330 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
331 coalesce
->use_adaptive_rx_coalesce
=
332 (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
) ? true : false;
333 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
335 coalesce
->rx_coalesce_usecs
= bnad
->rx_coalescing_timeo
*
336 BFI_COALESCING_TIMER_UNIT
;
337 coalesce
->tx_coalesce_usecs
= bnad
->tx_coalescing_timeo
*
338 BFI_COALESCING_TIMER_UNIT
;
339 coalesce
->tx_max_coalesced_frames
= BFI_TX_INTERPKT_COUNT
;
345 bnad_set_coalesce(struct net_device
*netdev
, struct ethtool_coalesce
*coalesce
)
347 struct bnad
*bnad
= netdev_priv(netdev
);
351 if (coalesce
->rx_coalesce_usecs
== 0 ||
352 coalesce
->rx_coalesce_usecs
>
353 BFI_MAX_COALESCING_TIMEO
* BFI_COALESCING_TIMER_UNIT
)
356 if (coalesce
->tx_coalesce_usecs
== 0 ||
357 coalesce
->tx_coalesce_usecs
>
358 BFI_MAX_COALESCING_TIMEO
* BFI_COALESCING_TIMER_UNIT
)
361 mutex_lock(&bnad
->conf_mutex
);
363 * Do not need to store rx_coalesce_usecs here
364 * Every time DIM is disabled, we can get it from the
367 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
368 if (coalesce
->use_adaptive_rx_coalesce
) {
369 if (!(bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
)) {
370 bnad
->cfg_flags
|= BNAD_CF_DIM_ENABLED
;
371 bnad_dim_timer_start(bnad
);
374 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
) {
375 bnad
->cfg_flags
&= ~BNAD_CF_DIM_ENABLED
;
376 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
377 test_bit(BNAD_RF_DIM_TIMER_RUNNING
,
379 clear_bit(BNAD_RF_DIM_TIMER_RUNNING
,
383 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
385 del_timer_sync(&bnad
->dim_timer
);
386 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
387 bnad_rx_coalescing_timeo_set(bnad
);
390 if (bnad
->tx_coalescing_timeo
!= coalesce
->tx_coalesce_usecs
/
391 BFI_COALESCING_TIMER_UNIT
) {
392 bnad
->tx_coalescing_timeo
= coalesce
->tx_coalesce_usecs
/
393 BFI_COALESCING_TIMER_UNIT
;
394 bnad_tx_coalescing_timeo_set(bnad
);
397 if (bnad
->rx_coalescing_timeo
!= coalesce
->rx_coalesce_usecs
/
398 BFI_COALESCING_TIMER_UNIT
) {
399 bnad
->rx_coalescing_timeo
= coalesce
->rx_coalesce_usecs
/
400 BFI_COALESCING_TIMER_UNIT
;
402 if (!(bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
))
403 bnad_rx_coalescing_timeo_set(bnad
);
407 /* Add Tx Inter-pkt DMA count? */
409 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
411 mutex_unlock(&bnad
->conf_mutex
);
416 bnad_get_ringparam(struct net_device
*netdev
,
417 struct ethtool_ringparam
*ringparam
)
419 struct bnad
*bnad
= netdev_priv(netdev
);
421 ringparam
->rx_max_pending
= BNAD_MAX_RXQ_DEPTH
;
422 ringparam
->rx_mini_max_pending
= 0;
423 ringparam
->rx_jumbo_max_pending
= 0;
424 ringparam
->tx_max_pending
= BNAD_MAX_TXQ_DEPTH
;
426 ringparam
->rx_pending
= bnad
->rxq_depth
;
427 ringparam
->rx_mini_max_pending
= 0;
428 ringparam
->rx_jumbo_max_pending
= 0;
429 ringparam
->tx_pending
= bnad
->txq_depth
;
433 bnad_set_ringparam(struct net_device
*netdev
,
434 struct ethtool_ringparam
*ringparam
)
436 int i
, current_err
, err
= 0;
437 struct bnad
*bnad
= netdev_priv(netdev
);
440 mutex_lock(&bnad
->conf_mutex
);
441 if (ringparam
->rx_pending
== bnad
->rxq_depth
&&
442 ringparam
->tx_pending
== bnad
->txq_depth
) {
443 mutex_unlock(&bnad
->conf_mutex
);
447 if (ringparam
->rx_pending
< BNAD_MIN_Q_DEPTH
||
448 ringparam
->rx_pending
> BNAD_MAX_RXQ_DEPTH
||
449 !BNA_POWER_OF_2(ringparam
->rx_pending
)) {
450 mutex_unlock(&bnad
->conf_mutex
);
453 if (ringparam
->tx_pending
< BNAD_MIN_Q_DEPTH
||
454 ringparam
->tx_pending
> BNAD_MAX_TXQ_DEPTH
||
455 !BNA_POWER_OF_2(ringparam
->tx_pending
)) {
456 mutex_unlock(&bnad
->conf_mutex
);
460 if (ringparam
->rx_pending
!= bnad
->rxq_depth
) {
461 bnad
->rxq_depth
= ringparam
->rx_pending
;
462 if (!netif_running(netdev
)) {
463 mutex_unlock(&bnad
->conf_mutex
);
467 for (i
= 0; i
< bnad
->num_rx
; i
++) {
468 if (!bnad
->rx_info
[i
].rx
)
470 bnad_cleanup_rx(bnad
, i
);
471 current_err
= bnad_setup_rx(bnad
, i
);
472 if (current_err
&& !err
)
476 if (!err
&& bnad
->rx_info
[0].rx
) {
477 /* restore rx configuration */
478 bnad_restore_vlans(bnad
, 0);
479 bnad_enable_default_bcast(bnad
);
480 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
481 bnad_mac_addr_set_locked(bnad
, netdev
->dev_addr
);
482 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
483 bnad
->cfg_flags
&= ~(BNAD_CF_ALLMULTI
|
485 bnad_set_rx_mode(netdev
);
488 if (ringparam
->tx_pending
!= bnad
->txq_depth
) {
489 bnad
->txq_depth
= ringparam
->tx_pending
;
490 if (!netif_running(netdev
)) {
491 mutex_unlock(&bnad
->conf_mutex
);
495 for (i
= 0; i
< bnad
->num_tx
; i
++) {
496 if (!bnad
->tx_info
[i
].tx
)
498 bnad_cleanup_tx(bnad
, i
);
499 current_err
= bnad_setup_tx(bnad
, i
);
500 if (current_err
&& !err
)
505 mutex_unlock(&bnad
->conf_mutex
);
510 bnad_get_pauseparam(struct net_device
*netdev
,
511 struct ethtool_pauseparam
*pauseparam
)
513 struct bnad
*bnad
= netdev_priv(netdev
);
515 pauseparam
->autoneg
= 0;
516 pauseparam
->rx_pause
= bnad
->bna
.enet
.pause_config
.rx_pause
;
517 pauseparam
->tx_pause
= bnad
->bna
.enet
.pause_config
.tx_pause
;
521 bnad_set_pauseparam(struct net_device
*netdev
,
522 struct ethtool_pauseparam
*pauseparam
)
524 struct bnad
*bnad
= netdev_priv(netdev
);
525 struct bna_pause_config pause_config
;
528 if (pauseparam
->autoneg
== AUTONEG_ENABLE
)
531 mutex_lock(&bnad
->conf_mutex
);
532 if (pauseparam
->rx_pause
!= bnad
->bna
.enet
.pause_config
.rx_pause
||
533 pauseparam
->tx_pause
!= bnad
->bna
.enet
.pause_config
.tx_pause
) {
534 pause_config
.rx_pause
= pauseparam
->rx_pause
;
535 pause_config
.tx_pause
= pauseparam
->tx_pause
;
536 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
537 bna_enet_pause_config(&bnad
->bna
.enet
, &pause_config
, NULL
);
538 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
540 mutex_unlock(&bnad
->conf_mutex
);
545 bnad_get_strings(struct net_device
*netdev
, u32 stringset
, u8
* string
)
547 struct bnad
*bnad
= netdev_priv(netdev
);
551 mutex_lock(&bnad
->conf_mutex
);
555 for (i
= 0; i
< BNAD_ETHTOOL_STATS_NUM
; i
++) {
556 BUG_ON(!(strlen(bnad_net_stats_strings
[i
]) <
558 memcpy(string
, bnad_net_stats_strings
[i
],
560 string
+= ETH_GSTRING_LEN
;
562 bmap
= bna_tx_rid_mask(&bnad
->bna
);
563 for (i
= 0; bmap
; i
++) {
565 sprintf(string
, "txf%d_ucast_octets", i
);
566 string
+= ETH_GSTRING_LEN
;
567 sprintf(string
, "txf%d_ucast", i
);
568 string
+= ETH_GSTRING_LEN
;
569 sprintf(string
, "txf%d_ucast_vlan", i
);
570 string
+= ETH_GSTRING_LEN
;
571 sprintf(string
, "txf%d_mcast_octets", i
);
572 string
+= ETH_GSTRING_LEN
;
573 sprintf(string
, "txf%d_mcast", i
);
574 string
+= ETH_GSTRING_LEN
;
575 sprintf(string
, "txf%d_mcast_vlan", i
);
576 string
+= ETH_GSTRING_LEN
;
577 sprintf(string
, "txf%d_bcast_octets", i
);
578 string
+= ETH_GSTRING_LEN
;
579 sprintf(string
, "txf%d_bcast", i
);
580 string
+= ETH_GSTRING_LEN
;
581 sprintf(string
, "txf%d_bcast_vlan", i
);
582 string
+= ETH_GSTRING_LEN
;
583 sprintf(string
, "txf%d_errors", i
);
584 string
+= ETH_GSTRING_LEN
;
585 sprintf(string
, "txf%d_filter_vlan", i
);
586 string
+= ETH_GSTRING_LEN
;
587 sprintf(string
, "txf%d_filter_mac_sa", i
);
588 string
+= ETH_GSTRING_LEN
;
593 bmap
= bna_rx_rid_mask(&bnad
->bna
);
594 for (i
= 0; bmap
; i
++) {
596 sprintf(string
, "rxf%d_ucast_octets", i
);
597 string
+= ETH_GSTRING_LEN
;
598 sprintf(string
, "rxf%d_ucast", i
);
599 string
+= ETH_GSTRING_LEN
;
600 sprintf(string
, "rxf%d_ucast_vlan", i
);
601 string
+= ETH_GSTRING_LEN
;
602 sprintf(string
, "rxf%d_mcast_octets", i
);
603 string
+= ETH_GSTRING_LEN
;
604 sprintf(string
, "rxf%d_mcast", i
);
605 string
+= ETH_GSTRING_LEN
;
606 sprintf(string
, "rxf%d_mcast_vlan", i
);
607 string
+= ETH_GSTRING_LEN
;
608 sprintf(string
, "rxf%d_bcast_octets", i
);
609 string
+= ETH_GSTRING_LEN
;
610 sprintf(string
, "rxf%d_bcast", i
);
611 string
+= ETH_GSTRING_LEN
;
612 sprintf(string
, "rxf%d_bcast_vlan", i
);
613 string
+= ETH_GSTRING_LEN
;
614 sprintf(string
, "rxf%d_frame_drops", i
);
615 string
+= ETH_GSTRING_LEN
;
621 for (i
= 0; i
< bnad
->num_rx
; i
++) {
622 if (!bnad
->rx_info
[i
].rx
)
624 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
625 sprintf(string
, "cq%d_producer_index", q_num
);
626 string
+= ETH_GSTRING_LEN
;
627 sprintf(string
, "cq%d_consumer_index", q_num
);
628 string
+= ETH_GSTRING_LEN
;
629 sprintf(string
, "cq%d_hw_producer_index",
631 string
+= ETH_GSTRING_LEN
;
632 sprintf(string
, "cq%d_intr", q_num
);
633 string
+= ETH_GSTRING_LEN
;
634 sprintf(string
, "cq%d_poll", q_num
);
635 string
+= ETH_GSTRING_LEN
;
636 sprintf(string
, "cq%d_schedule", q_num
);
637 string
+= ETH_GSTRING_LEN
;
638 sprintf(string
, "cq%d_keep_poll", q_num
);
639 string
+= ETH_GSTRING_LEN
;
640 sprintf(string
, "cq%d_complete", q_num
);
641 string
+= ETH_GSTRING_LEN
;
647 for (i
= 0; i
< bnad
->num_rx
; i
++) {
648 if (!bnad
->rx_info
[i
].rx
)
650 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
651 sprintf(string
, "rxq%d_packets", q_num
);
652 string
+= ETH_GSTRING_LEN
;
653 sprintf(string
, "rxq%d_bytes", q_num
);
654 string
+= ETH_GSTRING_LEN
;
655 sprintf(string
, "rxq%d_packets_with_error",
657 string
+= ETH_GSTRING_LEN
;
658 sprintf(string
, "rxq%d_allocbuf_failed", q_num
);
659 string
+= ETH_GSTRING_LEN
;
660 sprintf(string
, "rxq%d_producer_index", q_num
);
661 string
+= ETH_GSTRING_LEN
;
662 sprintf(string
, "rxq%d_consumer_index", q_num
);
663 string
+= ETH_GSTRING_LEN
;
665 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
&&
666 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
668 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
670 sprintf(string
, "rxq%d_packets", q_num
);
671 string
+= ETH_GSTRING_LEN
;
672 sprintf(string
, "rxq%d_bytes", q_num
);
673 string
+= ETH_GSTRING_LEN
;
675 "rxq%d_packets_with_error", q_num
);
676 string
+= ETH_GSTRING_LEN
;
677 sprintf(string
, "rxq%d_allocbuf_failed",
679 string
+= ETH_GSTRING_LEN
;
680 sprintf(string
, "rxq%d_producer_index",
682 string
+= ETH_GSTRING_LEN
;
683 sprintf(string
, "rxq%d_consumer_index",
685 string
+= ETH_GSTRING_LEN
;
692 for (i
= 0; i
< bnad
->num_tx
; i
++) {
693 if (!bnad
->tx_info
[i
].tx
)
695 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
696 sprintf(string
, "txq%d_packets", q_num
);
697 string
+= ETH_GSTRING_LEN
;
698 sprintf(string
, "txq%d_bytes", q_num
);
699 string
+= ETH_GSTRING_LEN
;
700 sprintf(string
, "txq%d_producer_index", q_num
);
701 string
+= ETH_GSTRING_LEN
;
702 sprintf(string
, "txq%d_consumer_index", q_num
);
703 string
+= ETH_GSTRING_LEN
;
704 sprintf(string
, "txq%d_hw_consumer_index",
706 string
+= ETH_GSTRING_LEN
;
717 mutex_unlock(&bnad
->conf_mutex
);
721 bnad_get_stats_count_locked(struct net_device
*netdev
)
723 struct bnad
*bnad
= netdev_priv(netdev
);
724 int i
, j
, count
= 0, rxf_active_num
= 0, txf_active_num
= 0;
727 bmap
= bna_tx_rid_mask(&bnad
->bna
);
728 for (i
= 0; bmap
; i
++) {
733 bmap
= bna_rx_rid_mask(&bnad
->bna
);
734 for (i
= 0; bmap
; i
++) {
739 count
= BNAD_ETHTOOL_STATS_NUM
+
740 txf_active_num
* BNAD_NUM_TXF_COUNTERS
+
741 rxf_active_num
* BNAD_NUM_RXF_COUNTERS
;
743 for (i
= 0; i
< bnad
->num_rx
; i
++) {
744 if (!bnad
->rx_info
[i
].rx
)
746 count
+= bnad
->num_rxp_per_rx
* BNAD_NUM_CQ_COUNTERS
;
747 count
+= bnad
->num_rxp_per_rx
* BNAD_NUM_RXQ_COUNTERS
;
748 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++)
749 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
&&
750 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1] &&
751 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1]->rxq
)
752 count
+= BNAD_NUM_RXQ_COUNTERS
;
755 for (i
= 0; i
< bnad
->num_tx
; i
++) {
756 if (!bnad
->tx_info
[i
].tx
)
758 count
+= bnad
->num_txq_per_tx
* BNAD_NUM_TXQ_COUNTERS
;
764 bnad_per_q_stats_fill(struct bnad
*bnad
, u64
*buf
, int bi
)
767 struct bna_rcb
*rcb
= NULL
;
768 struct bna_tcb
*tcb
= NULL
;
770 for (i
= 0; i
< bnad
->num_rx
; i
++) {
771 if (!bnad
->rx_info
[i
].rx
)
773 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++)
774 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
&&
775 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[0] &&
776 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[0]->rxq
) {
777 buf
[bi
++] = bnad
->rx_info
[i
].rx_ctrl
[j
].
779 buf
[bi
++] = 0; /* ccb->consumer_index */
780 buf
[bi
++] = *(bnad
->rx_info
[i
].rx_ctrl
[j
].
781 ccb
->hw_producer_index
);
783 buf
[bi
++] = bnad
->rx_info
[i
].
784 rx_ctrl
[j
].rx_intr_ctr
;
785 buf
[bi
++] = bnad
->rx_info
[i
].
786 rx_ctrl
[j
].rx_poll_ctr
;
787 buf
[bi
++] = bnad
->rx_info
[i
].
788 rx_ctrl
[j
].rx_schedule
;
789 buf
[bi
++] = bnad
->rx_info
[i
].
790 rx_ctrl
[j
].rx_keep_poll
;
791 buf
[bi
++] = bnad
->rx_info
[i
].
792 rx_ctrl
[j
].rx_complete
;
795 for (i
= 0; i
< bnad
->num_rx
; i
++) {
796 if (!bnad
->rx_info
[i
].rx
)
798 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++)
799 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
) {
800 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[0] &&
801 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
803 rcb
= bnad
->rx_info
[i
].rx_ctrl
[j
].
805 buf
[bi
++] = rcb
->rxq
->rx_packets
;
806 buf
[bi
++] = rcb
->rxq
->rx_bytes
;
807 buf
[bi
++] = rcb
->rxq
->
808 rx_packets_with_error
;
809 buf
[bi
++] = rcb
->rxq
->
811 buf
[bi
++] = rcb
->producer_index
;
812 buf
[bi
++] = rcb
->consumer_index
;
814 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1] &&
815 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
817 rcb
= bnad
->rx_info
[i
].rx_ctrl
[j
].
819 buf
[bi
++] = rcb
->rxq
->rx_packets
;
820 buf
[bi
++] = rcb
->rxq
->rx_bytes
;
821 buf
[bi
++] = rcb
->rxq
->
822 rx_packets_with_error
;
823 buf
[bi
++] = rcb
->rxq
->
825 buf
[bi
++] = rcb
->producer_index
;
826 buf
[bi
++] = rcb
->consumer_index
;
831 for (i
= 0; i
< bnad
->num_tx
; i
++) {
832 if (!bnad
->tx_info
[i
].tx
)
834 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++)
835 if (bnad
->tx_info
[i
].tcb
[j
] &&
836 bnad
->tx_info
[i
].tcb
[j
]->txq
) {
837 tcb
= bnad
->tx_info
[i
].tcb
[j
];
838 buf
[bi
++] = tcb
->txq
->tx_packets
;
839 buf
[bi
++] = tcb
->txq
->tx_bytes
;
840 buf
[bi
++] = tcb
->producer_index
;
841 buf
[bi
++] = tcb
->consumer_index
;
842 buf
[bi
++] = *(tcb
->hw_consumer_index
);
850 bnad_get_ethtool_stats(struct net_device
*netdev
, struct ethtool_stats
*stats
,
853 struct bnad
*bnad
= netdev_priv(netdev
);
856 struct rtnl_link_stats64
*net_stats64
;
860 mutex_lock(&bnad
->conf_mutex
);
861 if (bnad_get_stats_count_locked(netdev
) != stats
->n_stats
) {
862 mutex_unlock(&bnad
->conf_mutex
);
867 * Used bna_lock to sync reads from bna_stats, which is written
868 * under the same lock
870 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
872 memset(buf
, 0, stats
->n_stats
* sizeof(u64
));
874 net_stats64
= (struct rtnl_link_stats64
*)buf
;
875 bnad_netdev_qstats_fill(bnad
, net_stats64
);
876 bnad_netdev_hwstats_fill(bnad
, net_stats64
);
878 bi
= sizeof(*net_stats64
) / sizeof(u64
);
880 /* Get netif_queue_stopped from stack */
881 bnad
->stats
.drv_stats
.netif_queue_stopped
= netif_queue_stopped(netdev
);
883 /* Fill driver stats into ethtool buffers */
884 stats64
= (u64
*)&bnad
->stats
.drv_stats
;
885 for (i
= 0; i
< sizeof(struct bnad_drv_stats
) / sizeof(u64
); i
++)
886 buf
[bi
++] = stats64
[i
];
888 /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
889 stats64
= (u64
*) &bnad
->stats
.bna_stats
->hw_stats
;
891 i
< offsetof(struct bfi_enet_stats
, rxf_stats
[0]) /
894 buf
[bi
++] = stats64
[i
];
896 /* Fill txf stats into ethtool buffers */
897 bmap
= bna_tx_rid_mask(&bnad
->bna
);
898 for (i
= 0; bmap
; i
++) {
900 stats64
= (u64
*)&bnad
->stats
.bna_stats
->
901 hw_stats
.txf_stats
[i
];
902 for (j
= 0; j
< sizeof(struct bfi_enet_stats_txf
) /
904 buf
[bi
++] = stats64
[j
];
909 /* Fill rxf stats into ethtool buffers */
910 bmap
= bna_rx_rid_mask(&bnad
->bna
);
911 for (i
= 0; bmap
; i
++) {
913 stats64
= (u64
*)&bnad
->stats
.bna_stats
->
914 hw_stats
.rxf_stats
[i
];
915 for (j
= 0; j
< sizeof(struct bfi_enet_stats_rxf
) /
917 buf
[bi
++] = stats64
[j
];
922 /* Fill per Q stats into ethtool buffers */
923 bi
= bnad_per_q_stats_fill(bnad
, buf
, bi
);
925 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
927 mutex_unlock(&bnad
->conf_mutex
);
931 bnad_get_sset_count(struct net_device
*netdev
, int sset
)
935 return bnad_get_stats_count_locked(netdev
);
941 static struct ethtool_ops bnad_ethtool_ops
= {
942 .get_settings
= bnad_get_settings
,
943 .set_settings
= bnad_set_settings
,
944 .get_drvinfo
= bnad_get_drvinfo
,
945 .get_wol
= bnad_get_wol
,
946 .get_link
= ethtool_op_get_link
,
947 .get_coalesce
= bnad_get_coalesce
,
948 .set_coalesce
= bnad_set_coalesce
,
949 .get_ringparam
= bnad_get_ringparam
,
950 .set_ringparam
= bnad_set_ringparam
,
951 .get_pauseparam
= bnad_get_pauseparam
,
952 .set_pauseparam
= bnad_set_pauseparam
,
953 .get_strings
= bnad_get_strings
,
954 .get_ethtool_stats
= bnad_get_ethtool_stats
,
955 .get_sset_count
= bnad_get_sset_count
959 bnad_set_ethtool_ops(struct net_device
*netdev
)
961 SET_ETHTOOL_OPS(netdev
, &bnad_ethtool_ops
);