2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/tcp.h>
36 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
40 #include <linux/mlx4/driver.h>
41 #include <linux/mlx4/device.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/cq.h>
49 static void mlx4_en_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
51 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
52 struct mlx4_en_dev
*mdev
= priv
->mdev
;
55 en_dbg(HW
, priv
, "Registering VLAN group:%p\n", grp
);
58 mutex_lock(&mdev
->state_lock
);
59 if (mdev
->device_up
&& priv
->port_up
) {
60 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
->port
, grp
);
62 en_err(priv
, "Failed configuring VLAN filter\n");
64 mutex_unlock(&mdev
->state_lock
);
67 static void mlx4_en_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
69 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
70 struct mlx4_en_dev
*mdev
= priv
->mdev
;
76 en_dbg(HW
, priv
, "adding VLAN:%d (vlgrp entry:%p)\n",
77 vid
, vlan_group_get_device(priv
->vlgrp
, vid
));
79 /* Add VID to port VLAN filter */
80 mutex_lock(&mdev
->state_lock
);
81 if (mdev
->device_up
&& priv
->port_up
) {
82 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
->port
, priv
->vlgrp
);
84 en_err(priv
, "Failed configuring VLAN filter\n");
86 mutex_unlock(&mdev
->state_lock
);
89 static void mlx4_en_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
91 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
92 struct mlx4_en_dev
*mdev
= priv
->mdev
;
98 en_dbg(HW
, priv
, "Killing VID:%d (vlgrp:%p vlgrp entry:%p)\n",
99 vid
, priv
->vlgrp
, vlan_group_get_device(priv
->vlgrp
, vid
));
100 vlan_group_set_device(priv
->vlgrp
, vid
, NULL
);
102 /* Remove VID from port VLAN filter */
103 mutex_lock(&mdev
->state_lock
);
104 if (mdev
->device_up
&& priv
->port_up
) {
105 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
->port
, priv
->vlgrp
);
107 en_err(priv
, "Failed configuring VLAN filter\n");
109 mutex_unlock(&mdev
->state_lock
);
112 static u64
mlx4_en_mac_to_u64(u8
*addr
)
117 for (i
= 0; i
< ETH_ALEN
; i
++) {
124 static int mlx4_en_set_mac(struct net_device
*dev
, void *addr
)
126 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
127 struct mlx4_en_dev
*mdev
= priv
->mdev
;
128 struct sockaddr
*saddr
= addr
;
130 if (!is_valid_ether_addr(saddr
->sa_data
))
131 return -EADDRNOTAVAIL
;
133 memcpy(dev
->dev_addr
, saddr
->sa_data
, ETH_ALEN
);
134 priv
->mac
= mlx4_en_mac_to_u64(dev
->dev_addr
);
135 queue_work(mdev
->workqueue
, &priv
->mac_task
);
139 static void mlx4_en_do_set_mac(struct work_struct
*work
)
141 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
143 struct mlx4_en_dev
*mdev
= priv
->mdev
;
146 mutex_lock(&mdev
->state_lock
);
148 /* Remove old MAC and insert the new one */
149 mlx4_unregister_mac(mdev
->dev
, priv
->port
, priv
->mac_index
);
150 err
= mlx4_register_mac(mdev
->dev
, priv
->port
,
151 priv
->mac
, &priv
->mac_index
);
153 en_err(priv
, "Failed changing HW MAC address\n");
155 en_dbg(HW
, priv
, "Port is down while "
156 "registering mac, exiting...\n");
158 mutex_unlock(&mdev
->state_lock
);
161 static void mlx4_en_clear_list(struct net_device
*dev
)
163 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
164 struct dev_mc_list
*plist
= priv
->mc_list
;
165 struct dev_mc_list
*next
;
172 priv
->mc_list
= NULL
;
175 static void mlx4_en_cache_mclist(struct net_device
*dev
)
177 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
178 struct dev_mc_list
*mclist
;
179 struct dev_mc_list
*tmp
;
180 struct dev_mc_list
*plist
= NULL
;
182 for (mclist
= dev
->mc_list
; mclist
; mclist
= mclist
->next
) {
183 tmp
= kmalloc(sizeof(struct dev_mc_list
), GFP_ATOMIC
);
185 en_err(priv
, "failed to allocate multicast list\n");
186 mlx4_en_clear_list(dev
);
189 memcpy(tmp
, mclist
, sizeof(struct dev_mc_list
));
200 static void mlx4_en_set_multicast(struct net_device
*dev
)
202 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
207 queue_work(priv
->mdev
->workqueue
, &priv
->mcast_task
);
210 static void mlx4_en_do_set_multicast(struct work_struct
*work
)
212 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
214 struct mlx4_en_dev
*mdev
= priv
->mdev
;
215 struct net_device
*dev
= priv
->dev
;
216 struct dev_mc_list
*mclist
;
220 mutex_lock(&mdev
->state_lock
);
221 if (!mdev
->device_up
) {
222 en_dbg(HW
, priv
, "Card is not up, "
223 "ignoring multicast change.\n");
226 if (!priv
->port_up
) {
227 en_dbg(HW
, priv
, "Port is down, "
228 "ignoring multicast change.\n");
233 * Promsicuous mode: disable all filters
236 if (dev
->flags
& IFF_PROMISC
) {
237 if (!(priv
->flags
& MLX4_EN_FLAG_PROMISC
)) {
238 if (netif_msg_rx_status(priv
))
239 en_warn(priv
, "Entering promiscuous mode\n");
240 priv
->flags
|= MLX4_EN_FLAG_PROMISC
;
242 /* Enable promiscouos mode */
243 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
, priv
->port
,
246 en_err(priv
, "Failed enabling "
247 "promiscous mode\n");
249 /* Disable port multicast filter (unconditionally) */
250 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
251 0, MLX4_MCAST_DISABLE
);
253 en_err(priv
, "Failed disabling "
254 "multicast filter\n");
256 /* Disable port VLAN filter */
257 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
->port
, NULL
);
259 en_err(priv
, "Failed disabling VLAN filter\n");
265 * Not in promiscous mode
268 if (priv
->flags
& MLX4_EN_FLAG_PROMISC
) {
269 if (netif_msg_rx_status(priv
))
270 en_warn(priv
, "Leaving promiscuous mode\n");
271 priv
->flags
&= ~MLX4_EN_FLAG_PROMISC
;
273 /* Disable promiscouos mode */
274 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
, priv
->port
,
277 en_err(priv
, "Failed disabling promiscous mode\n");
279 /* Enable port VLAN filter */
280 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
->port
, priv
->vlgrp
);
282 en_err(priv
, "Failed enabling VLAN filter\n");
285 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
286 if (dev
->flags
& IFF_ALLMULTI
) {
287 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
288 0, MLX4_MCAST_DISABLE
);
290 en_err(priv
, "Failed disabling multicast filter\n");
292 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
293 0, MLX4_MCAST_DISABLE
);
295 en_err(priv
, "Failed disabling multicast filter\n");
297 /* Flush mcast filter and init it with broadcast address */
298 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, ETH_BCAST
,
299 1, MLX4_MCAST_CONFIG
);
301 /* Update multicast list - we cache all addresses so they won't
302 * change while HW is updated holding the command semaphor */
303 netif_tx_lock_bh(dev
);
304 mlx4_en_cache_mclist(dev
);
305 netif_tx_unlock_bh(dev
);
306 for (mclist
= priv
->mc_list
; mclist
; mclist
= mclist
->next
) {
307 mcast_addr
= mlx4_en_mac_to_u64(mclist
->dmi_addr
);
308 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
,
309 mcast_addr
, 0, MLX4_MCAST_CONFIG
);
311 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
312 0, MLX4_MCAST_ENABLE
);
314 en_err(priv
, "Failed enabling multicast filter\n");
316 mlx4_en_clear_list(dev
);
319 mutex_unlock(&mdev
->state_lock
);
322 #ifdef CONFIG_NET_POLL_CONTROLLER
323 static void mlx4_en_netpoll(struct net_device
*dev
)
325 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
326 struct mlx4_en_cq
*cq
;
330 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
331 cq
= &priv
->rx_cq
[i
];
332 spin_lock_irqsave(&cq
->lock
, flags
);
333 napi_synchronize(&cq
->napi
);
334 mlx4_en_process_rx_cq(dev
, cq
, 0);
335 spin_unlock_irqrestore(&cq
->lock
, flags
);
340 static void mlx4_en_tx_timeout(struct net_device
*dev
)
342 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
343 struct mlx4_en_dev
*mdev
= priv
->mdev
;
345 if (netif_msg_timer(priv
))
346 en_warn(priv
, "Tx timeout called on port:%d\n", priv
->port
);
348 priv
->port_stats
.tx_timeout
++;
349 en_dbg(DRV
, priv
, "Scheduling watchdog\n");
350 queue_work(mdev
->workqueue
, &priv
->watchdog_task
);
354 static struct net_device_stats
*mlx4_en_get_stats(struct net_device
*dev
)
356 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
358 spin_lock_bh(&priv
->stats_lock
);
359 memcpy(&priv
->ret_stats
, &priv
->stats
, sizeof(priv
->stats
));
360 spin_unlock_bh(&priv
->stats_lock
);
362 return &priv
->ret_stats
;
365 static void mlx4_en_set_default_moderation(struct mlx4_en_priv
*priv
)
367 struct mlx4_en_cq
*cq
;
370 /* If we haven't received a specific coalescing setting
371 * (module param), we set the moderation parameters as follows:
372 * - moder_cnt is set to the number of mtu sized packets to
373 * satisfy our coelsing target.
374 * - moder_time is set to a fixed value.
376 priv
->rx_frames
= MLX4_EN_RX_COAL_TARGET
;
377 priv
->rx_usecs
= MLX4_EN_RX_COAL_TIME
;
378 en_dbg(INTR
, priv
, "Default coalesing params for mtu:%d - "
379 "rx_frames:%d rx_usecs:%d\n",
380 priv
->dev
->mtu
, priv
->rx_frames
, priv
->rx_usecs
);
382 /* Setup cq moderation params */
383 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
384 cq
= &priv
->rx_cq
[i
];
385 cq
->moder_cnt
= priv
->rx_frames
;
386 cq
->moder_time
= priv
->rx_usecs
;
389 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
390 cq
= &priv
->tx_cq
[i
];
391 cq
->moder_cnt
= MLX4_EN_TX_COAL_PKTS
;
392 cq
->moder_time
= MLX4_EN_TX_COAL_TIME
;
395 /* Reset auto-moderation params */
396 priv
->pkt_rate_low
= MLX4_EN_RX_RATE_LOW
;
397 priv
->rx_usecs_low
= MLX4_EN_RX_COAL_TIME_LOW
;
398 priv
->pkt_rate_high
= MLX4_EN_RX_RATE_HIGH
;
399 priv
->rx_usecs_high
= MLX4_EN_RX_COAL_TIME_HIGH
;
400 priv
->sample_interval
= MLX4_EN_SAMPLE_INTERVAL
;
401 priv
->adaptive_rx_coal
= 1;
402 priv
->last_moder_time
= MLX4_EN_AUTO_CONF
;
403 priv
->last_moder_jiffies
= 0;
404 priv
->last_moder_packets
= 0;
405 priv
->last_moder_tx_packets
= 0;
406 priv
->last_moder_bytes
= 0;
409 static void mlx4_en_auto_moderation(struct mlx4_en_priv
*priv
)
411 unsigned long period
= (unsigned long) (jiffies
- priv
->last_moder_jiffies
);
412 struct mlx4_en_cq
*cq
;
413 unsigned long packets
;
415 unsigned long avg_pkt_size
;
416 unsigned long rx_packets
;
417 unsigned long rx_bytes
;
418 unsigned long rx_byte_diff
;
419 unsigned long tx_packets
;
420 unsigned long tx_pkt_diff
;
421 unsigned long rx_pkt_diff
;
425 if (!priv
->adaptive_rx_coal
|| period
< priv
->sample_interval
* HZ
)
428 spin_lock_bh(&priv
->stats_lock
);
429 rx_packets
= priv
->stats
.rx_packets
;
430 rx_bytes
= priv
->stats
.rx_bytes
;
431 tx_packets
= priv
->stats
.tx_packets
;
432 spin_unlock_bh(&priv
->stats_lock
);
434 if (!priv
->last_moder_jiffies
|| !period
)
437 tx_pkt_diff
= ((unsigned long) (tx_packets
-
438 priv
->last_moder_tx_packets
));
439 rx_pkt_diff
= ((unsigned long) (rx_packets
-
440 priv
->last_moder_packets
));
441 packets
= max(tx_pkt_diff
, rx_pkt_diff
);
442 rx_byte_diff
= rx_bytes
- priv
->last_moder_bytes
;
443 rx_byte_diff
= rx_byte_diff
? rx_byte_diff
: 1;
444 rate
= packets
* HZ
/ period
;
445 avg_pkt_size
= packets
? ((unsigned long) (rx_bytes
-
446 priv
->last_moder_bytes
)) / packets
: 0;
448 /* Apply auto-moderation only when packet rate exceeds a rate that
450 if (rate
> MLX4_EN_RX_RATE_THRESH
) {
451 /* If tx and rx packet rates are not balanced, assume that
452 * traffic is mainly BW bound and apply maximum moderation.
453 * Otherwise, moderate according to packet rate */
454 if (2 * tx_pkt_diff
> 3 * rx_pkt_diff
&&
455 rx_pkt_diff
/ rx_byte_diff
<
456 MLX4_EN_SMALL_PKT_SIZE
)
457 moder_time
= priv
->rx_usecs_low
;
458 else if (2 * rx_pkt_diff
> 3 * tx_pkt_diff
)
459 moder_time
= priv
->rx_usecs_high
;
461 if (rate
< priv
->pkt_rate_low
)
462 moder_time
= priv
->rx_usecs_low
;
463 else if (rate
> priv
->pkt_rate_high
)
464 moder_time
= priv
->rx_usecs_high
;
466 moder_time
= (rate
- priv
->pkt_rate_low
) *
467 (priv
->rx_usecs_high
- priv
->rx_usecs_low
) /
468 (priv
->pkt_rate_high
- priv
->pkt_rate_low
) +
472 /* When packet rate is low, use default moderation rather than
473 * 0 to prevent interrupt storms if traffic suddenly increases */
474 moder_time
= priv
->rx_usecs
;
477 en_dbg(INTR
, priv
, "tx rate:%lu rx_rate:%lu\n",
478 tx_pkt_diff
* HZ
/ period
, rx_pkt_diff
* HZ
/ period
);
480 en_dbg(INTR
, priv
, "Rx moder_time changed from:%d to %d period:%lu "
481 "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
482 priv
->last_moder_time
, moder_time
, period
, packets
,
485 if (moder_time
!= priv
->last_moder_time
) {
486 priv
->last_moder_time
= moder_time
;
487 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
488 cq
= &priv
->rx_cq
[i
];
489 cq
->moder_time
= moder_time
;
490 err
= mlx4_en_set_cq_moder(priv
, cq
);
492 en_err(priv
, "Failed modifying moderation for cq:%d\n", i
);
499 priv
->last_moder_packets
= rx_packets
;
500 priv
->last_moder_tx_packets
= tx_packets
;
501 priv
->last_moder_bytes
= rx_bytes
;
502 priv
->last_moder_jiffies
= jiffies
;
505 static void mlx4_en_do_get_stats(struct work_struct
*work
)
507 struct delayed_work
*delay
= to_delayed_work(work
);
508 struct mlx4_en_priv
*priv
= container_of(delay
, struct mlx4_en_priv
,
510 struct mlx4_en_dev
*mdev
= priv
->mdev
;
513 err
= mlx4_en_DUMP_ETH_STATS(mdev
, priv
->port
, 0);
515 en_dbg(HW
, priv
, "Could not update stats \n");
517 mutex_lock(&mdev
->state_lock
);
518 if (mdev
->device_up
) {
520 mlx4_en_auto_moderation(priv
);
522 queue_delayed_work(mdev
->workqueue
, &priv
->stats_task
, STATS_DELAY
);
524 mutex_unlock(&mdev
->state_lock
);
527 static void mlx4_en_linkstate(struct work_struct
*work
)
529 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
531 struct mlx4_en_dev
*mdev
= priv
->mdev
;
532 int linkstate
= priv
->link_state
;
534 mutex_lock(&mdev
->state_lock
);
535 /* If observable port state changed set carrier state and
536 * report to system log */
537 if (priv
->last_link_state
!= linkstate
) {
538 if (linkstate
== MLX4_DEV_EVENT_PORT_DOWN
) {
539 en_dbg(LINK
, priv
, "Link Down\n");
540 netif_carrier_off(priv
->dev
);
542 en_dbg(LINK
, priv
, "Link Up\n");
543 netif_carrier_on(priv
->dev
);
546 priv
->last_link_state
= linkstate
;
547 mutex_unlock(&mdev
->state_lock
);
551 int mlx4_en_start_port(struct net_device
*dev
)
553 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
554 struct mlx4_en_dev
*mdev
= priv
->mdev
;
555 struct mlx4_en_cq
*cq
;
556 struct mlx4_en_tx_ring
*tx_ring
;
564 en_dbg(DRV
, priv
, "start port called while port already up\n");
568 /* Calculate Rx buf size */
569 dev
->mtu
= min(dev
->mtu
, priv
->max_mtu
);
570 mlx4_en_calc_rx_buf(dev
);
571 en_dbg(DRV
, priv
, "Rx buf size:%d\n", priv
->rx_skb_size
);
573 /* Configure rx cq's and rings */
574 err
= mlx4_en_activate_rx_rings(priv
);
576 en_err(priv
, "Failed to activate RX rings\n");
579 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
580 cq
= &priv
->rx_cq
[i
];
582 err
= mlx4_en_activate_cq(priv
, cq
);
584 en_err(priv
, "Failed activating Rx CQ\n");
587 for (j
= 0; j
< cq
->size
; j
++)
588 cq
->buf
[j
].owner_sr_opcode
= MLX4_CQE_OWNER_MASK
;
589 err
= mlx4_en_set_cq_moder(priv
, cq
);
591 en_err(priv
, "Failed setting cq moderation parameters");
592 mlx4_en_deactivate_cq(priv
, cq
);
595 mlx4_en_arm_cq(priv
, cq
);
596 priv
->rx_ring
[i
].cqn
= cq
->mcq
.cqn
;
600 err
= mlx4_en_config_rss_steer(priv
);
602 en_err(priv
, "Failed configuring rss steering\n");
606 /* Configure tx cq's and rings */
607 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
609 cq
= &priv
->tx_cq
[i
];
610 err
= mlx4_en_activate_cq(priv
, cq
);
612 en_err(priv
, "Failed allocating Tx CQ\n");
615 err
= mlx4_en_set_cq_moder(priv
, cq
);
617 en_err(priv
, "Failed setting cq moderation parameters");
618 mlx4_en_deactivate_cq(priv
, cq
);
621 en_dbg(DRV
, priv
, "Resetting index of collapsed CQ:%d to -1\n", i
);
622 cq
->buf
->wqe_index
= cpu_to_be16(0xffff);
625 tx_ring
= &priv
->tx_ring
[i
];
626 err
= mlx4_en_activate_tx_ring(priv
, tx_ring
, cq
->mcq
.cqn
);
628 en_err(priv
, "Failed allocating Tx ring\n");
629 mlx4_en_deactivate_cq(priv
, cq
);
632 /* Set initial ownership of all Tx TXBBs to SW (1) */
633 for (j
= 0; j
< tx_ring
->buf_size
; j
+= STAMP_STRIDE
)
634 *((u32
*) (tx_ring
->buf
+ j
)) = 0xffffffff;
639 err
= mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
640 priv
->rx_skb_size
+ ETH_FCS_LEN
,
641 priv
->prof
->tx_pause
,
643 priv
->prof
->rx_pause
,
646 en_err(priv
, "Failed setting port general configurations "
647 "for port %d, with error %d\n", priv
->port
, err
);
650 /* Set default qp number */
651 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
, priv
->port
, priv
->base_qpn
, 0);
653 en_err(priv
, "Failed setting default qp numbers\n");
656 /* Set port mac number */
657 en_dbg(DRV
, priv
, "Setting mac for port %d\n", priv
->port
);
658 err
= mlx4_register_mac(mdev
->dev
, priv
->port
,
659 priv
->mac
, &priv
->mac_index
);
661 en_err(priv
, "Failed setting port mac\n");
666 en_dbg(HW
, priv
, "Initializing port\n");
667 err
= mlx4_INIT_PORT(mdev
->dev
, priv
->port
);
669 en_err(priv
, "Failed Initializing port\n");
673 /* Schedule multicast task to populate multicast list */
674 queue_work(mdev
->workqueue
, &priv
->mcast_task
);
676 priv
->port_up
= true;
677 netif_tx_start_all_queues(dev
);
681 mlx4_unregister_mac(mdev
->dev
, priv
->port
, priv
->mac_index
);
684 mlx4_en_deactivate_tx_ring(priv
, &priv
->tx_ring
[tx_index
]);
685 mlx4_en_deactivate_cq(priv
, &priv
->tx_cq
[tx_index
]);
688 mlx4_en_release_rss_steer(priv
);
691 mlx4_en_deactivate_cq(priv
, &priv
->rx_cq
[rx_index
]);
692 for (i
= 0; i
< priv
->rx_ring_num
; i
++)
693 mlx4_en_deactivate_rx_ring(priv
, &priv
->rx_ring
[i
]);
695 return err
; /* need to close devices */
699 void mlx4_en_stop_port(struct net_device
*dev
)
701 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
702 struct mlx4_en_dev
*mdev
= priv
->mdev
;
705 if (!priv
->port_up
) {
706 en_dbg(DRV
, priv
, "stop port called while port already down\n");
710 /* Synchronize with tx routine */
711 netif_tx_lock_bh(dev
);
712 netif_tx_stop_all_queues(dev
);
713 netif_tx_unlock_bh(dev
);
716 priv
->port_up
= false;
717 mlx4_CLOSE_PORT(mdev
->dev
, priv
->port
);
719 /* Unregister Mac address for the port */
720 mlx4_unregister_mac(mdev
->dev
, priv
->port
, priv
->mac_index
);
723 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
724 mlx4_en_deactivate_tx_ring(priv
, &priv
->tx_ring
[i
]);
725 mlx4_en_deactivate_cq(priv
, &priv
->tx_cq
[i
]);
729 for (i
= 0; i
< priv
->tx_ring_num
; i
++)
730 mlx4_en_free_tx_buf(dev
, &priv
->tx_ring
[i
]);
733 mlx4_en_release_rss_steer(priv
);
736 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
737 mlx4_en_deactivate_rx_ring(priv
, &priv
->rx_ring
[i
]);
738 while (test_bit(NAPI_STATE_SCHED
, &priv
->rx_cq
[i
].napi
.state
))
740 mlx4_en_deactivate_cq(priv
, &priv
->rx_cq
[i
]);
744 static void mlx4_en_restart(struct work_struct
*work
)
746 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
748 struct mlx4_en_dev
*mdev
= priv
->mdev
;
749 struct net_device
*dev
= priv
->dev
;
751 en_dbg(DRV
, priv
, "Watchdog task called for port %d\n", priv
->port
);
753 mutex_lock(&mdev
->state_lock
);
755 mlx4_en_stop_port(dev
);
756 if (mlx4_en_start_port(dev
))
757 en_err(priv
, "Failed restarting port %d\n", priv
->port
);
759 mutex_unlock(&mdev
->state_lock
);
763 static int mlx4_en_open(struct net_device
*dev
)
765 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
766 struct mlx4_en_dev
*mdev
= priv
->mdev
;
770 mutex_lock(&mdev
->state_lock
);
772 if (!mdev
->device_up
) {
773 en_err(priv
, "Cannot open - device down/disabled\n");
778 /* Reset HW statistics and performance counters */
779 if (mlx4_en_DUMP_ETH_STATS(mdev
, priv
->port
, 1))
780 en_dbg(HW
, priv
, "Failed dumping statistics\n");
782 memset(&priv
->stats
, 0, sizeof(priv
->stats
));
783 memset(&priv
->pstats
, 0, sizeof(priv
->pstats
));
785 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
786 priv
->tx_ring
[i
].bytes
= 0;
787 priv
->tx_ring
[i
].packets
= 0;
789 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
790 priv
->rx_ring
[i
].bytes
= 0;
791 priv
->rx_ring
[i
].packets
= 0;
794 mlx4_en_set_default_moderation(priv
);
795 err
= mlx4_en_start_port(dev
);
797 en_err(priv
, "Failed starting port:%d\n", priv
->port
);
800 mutex_unlock(&mdev
->state_lock
);
805 static int mlx4_en_close(struct net_device
*dev
)
807 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
808 struct mlx4_en_dev
*mdev
= priv
->mdev
;
810 en_dbg(IFDOWN
, priv
, "Close port called\n");
812 mutex_lock(&mdev
->state_lock
);
814 mlx4_en_stop_port(dev
);
815 netif_carrier_off(dev
);
817 mutex_unlock(&mdev
->state_lock
);
821 void mlx4_en_free_resources(struct mlx4_en_priv
*priv
)
825 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
826 if (priv
->tx_ring
[i
].tx_info
)
827 mlx4_en_destroy_tx_ring(priv
, &priv
->tx_ring
[i
]);
828 if (priv
->tx_cq
[i
].buf
)
829 mlx4_en_destroy_cq(priv
, &priv
->tx_cq
[i
]);
832 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
833 if (priv
->rx_ring
[i
].rx_info
)
834 mlx4_en_destroy_rx_ring(priv
, &priv
->rx_ring
[i
]);
835 if (priv
->rx_cq
[i
].buf
)
836 mlx4_en_destroy_cq(priv
, &priv
->rx_cq
[i
]);
840 int mlx4_en_alloc_resources(struct mlx4_en_priv
*priv
)
842 struct mlx4_en_port_profile
*prof
= priv
->prof
;
845 /* Create tx Rings */
846 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
847 if (mlx4_en_create_cq(priv
, &priv
->tx_cq
[i
],
848 prof
->tx_ring_size
, i
, TX
))
851 if (mlx4_en_create_tx_ring(priv
, &priv
->tx_ring
[i
],
852 prof
->tx_ring_size
, TXBB_SIZE
))
856 /* Create rx Rings */
857 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
858 if (mlx4_en_create_cq(priv
, &priv
->rx_cq
[i
],
859 prof
->rx_ring_size
, i
, RX
))
862 if (mlx4_en_create_rx_ring(priv
, &priv
->rx_ring
[i
],
863 prof
->rx_ring_size
, priv
->stride
))
870 en_err(priv
, "Failed to allocate NIC resources\n");
875 void mlx4_en_destroy_netdev(struct net_device
*dev
)
877 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
878 struct mlx4_en_dev
*mdev
= priv
->mdev
;
880 en_dbg(DRV
, priv
, "Destroying netdev on port:%d\n", priv
->port
);
882 /* Unregister device - this will close the port if it was up */
883 if (priv
->registered
)
884 unregister_netdev(dev
);
887 mlx4_free_hwq_res(mdev
->dev
, &priv
->res
, MLX4_EN_PAGE_SIZE
);
889 cancel_delayed_work(&priv
->stats_task
);
890 /* flush any pending task for this netdev */
891 flush_workqueue(mdev
->workqueue
);
893 /* Detach the netdev so tasks would not attempt to access it */
894 mutex_lock(&mdev
->state_lock
);
895 mdev
->pndev
[priv
->port
] = NULL
;
896 mutex_unlock(&mdev
->state_lock
);
898 mlx4_en_free_resources(priv
);
902 static int mlx4_en_change_mtu(struct net_device
*dev
, int new_mtu
)
904 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
905 struct mlx4_en_dev
*mdev
= priv
->mdev
;
908 en_dbg(DRV
, priv
, "Change MTU called - current:%d new:%d\n",
911 if ((new_mtu
< MLX4_EN_MIN_MTU
) || (new_mtu
> priv
->max_mtu
)) {
912 en_err(priv
, "Bad MTU size:%d.\n", new_mtu
);
917 if (netif_running(dev
)) {
918 mutex_lock(&mdev
->state_lock
);
919 if (!mdev
->device_up
) {
920 /* NIC is probably restarting - let watchdog task reset
922 en_dbg(DRV
, priv
, "Change MTU called with card down!?\n");
924 mlx4_en_stop_port(dev
);
925 mlx4_en_set_default_moderation(priv
);
926 err
= mlx4_en_start_port(dev
);
928 en_err(priv
, "Failed restarting port:%d\n",
930 queue_work(mdev
->workqueue
, &priv
->watchdog_task
);
933 mutex_unlock(&mdev
->state_lock
);
938 static const struct net_device_ops mlx4_netdev_ops
= {
939 .ndo_open
= mlx4_en_open
,
940 .ndo_stop
= mlx4_en_close
,
941 .ndo_start_xmit
= mlx4_en_xmit
,
942 .ndo_select_queue
= mlx4_en_select_queue
,
943 .ndo_get_stats
= mlx4_en_get_stats
,
944 .ndo_set_multicast_list
= mlx4_en_set_multicast
,
945 .ndo_set_mac_address
= mlx4_en_set_mac
,
946 .ndo_validate_addr
= eth_validate_addr
,
947 .ndo_change_mtu
= mlx4_en_change_mtu
,
948 .ndo_tx_timeout
= mlx4_en_tx_timeout
,
949 .ndo_vlan_rx_register
= mlx4_en_vlan_rx_register
,
950 .ndo_vlan_rx_add_vid
= mlx4_en_vlan_rx_add_vid
,
951 .ndo_vlan_rx_kill_vid
= mlx4_en_vlan_rx_kill_vid
,
952 #ifdef CONFIG_NET_POLL_CONTROLLER
953 .ndo_poll_controller
= mlx4_en_netpoll
,
957 int mlx4_en_init_netdev(struct mlx4_en_dev
*mdev
, int port
,
958 struct mlx4_en_port_profile
*prof
)
960 struct net_device
*dev
;
961 struct mlx4_en_priv
*priv
;
965 dev
= alloc_etherdev_mq(sizeof(struct mlx4_en_priv
), prof
->tx_ring_num
);
967 mlx4_err(mdev
, "Net device allocation failed\n");
971 SET_NETDEV_DEV(dev
, &mdev
->dev
->pdev
->dev
);
974 * Initialize driver private data
977 priv
= netdev_priv(dev
);
978 memset(priv
, 0, sizeof(struct mlx4_en_priv
));
983 priv
->port_up
= false;
985 priv
->flags
= prof
->flags
;
986 priv
->tx_ring_num
= prof
->tx_ring_num
;
987 priv
->rx_ring_num
= prof
->rx_ring_num
;
988 priv
->mc_list
= NULL
;
989 priv
->mac_index
= -1;
990 priv
->msg_enable
= MLX4_EN_MSG_LEVEL
;
991 spin_lock_init(&priv
->stats_lock
);
992 INIT_WORK(&priv
->mcast_task
, mlx4_en_do_set_multicast
);
993 INIT_WORK(&priv
->mac_task
, mlx4_en_do_set_mac
);
994 INIT_WORK(&priv
->watchdog_task
, mlx4_en_restart
);
995 INIT_WORK(&priv
->linkstate_task
, mlx4_en_linkstate
);
996 INIT_DELAYED_WORK(&priv
->stats_task
, mlx4_en_do_get_stats
);
998 /* Query for default mac and max mtu */
999 priv
->max_mtu
= mdev
->dev
->caps
.eth_mtu_cap
[priv
->port
];
1000 priv
->mac
= mdev
->dev
->caps
.def_mac
[priv
->port
];
1001 if (ILLEGAL_MAC(priv
->mac
)) {
1002 en_err(priv
, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
1003 priv
->port
, priv
->mac
);
1008 priv
->stride
= roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc
) +
1009 DS_SIZE
* MLX4_EN_MAX_RX_FRAGS
);
1010 err
= mlx4_en_alloc_resources(priv
);
1014 /* Allocate page for receive rings */
1015 err
= mlx4_alloc_hwq_res(mdev
->dev
, &priv
->res
,
1016 MLX4_EN_PAGE_SIZE
, MLX4_EN_PAGE_SIZE
);
1018 en_err(priv
, "Failed to allocate page for rx qps\n");
1021 priv
->allocated
= 1;
1024 * Initialize netdev entry points
1026 dev
->netdev_ops
= &mlx4_netdev_ops
;
1027 dev
->watchdog_timeo
= MLX4_EN_WATCHDOG_TIMEOUT
;
1028 dev
->real_num_tx_queues
= MLX4_EN_NUM_TX_RINGS
;
1030 SET_ETHTOOL_OPS(dev
, &mlx4_en_ethtool_ops
);
1032 /* Set defualt MAC */
1033 dev
->addr_len
= ETH_ALEN
;
1034 for (i
= 0; i
< ETH_ALEN
; i
++)
1035 dev
->dev_addr
[ETH_ALEN
- 1 - i
] =
1036 (u8
) (priv
->mac
>> (8 * i
));
1039 * Set driver features
1041 dev
->features
|= NETIF_F_SG
;
1042 dev
->vlan_features
|= NETIF_F_SG
;
1043 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
1044 dev
->vlan_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
1045 dev
->features
|= NETIF_F_HIGHDMA
;
1046 dev
->features
|= NETIF_F_HW_VLAN_TX
|
1047 NETIF_F_HW_VLAN_RX
|
1048 NETIF_F_HW_VLAN_FILTER
;
1049 if (mdev
->profile
.num_lro
)
1050 dev
->features
|= NETIF_F_LRO
;
1051 if (mdev
->LSO_support
) {
1052 dev
->features
|= NETIF_F_TSO
;
1053 dev
->features
|= NETIF_F_TSO6
;
1054 dev
->vlan_features
|= NETIF_F_TSO
;
1055 dev
->vlan_features
|= NETIF_F_TSO6
;
1058 mdev
->pndev
[port
] = dev
;
1060 netif_carrier_off(dev
);
1061 err
= register_netdev(dev
);
1063 en_err(priv
, "Netdev registration failed for port %d\n", port
);
1067 en_warn(priv
, "Using %d TX rings\n", prof
->tx_ring_num
);
1068 en_warn(priv
, "Using %d RX rings\n", prof
->rx_ring_num
);
1070 priv
->registered
= 1;
1071 queue_delayed_work(mdev
->workqueue
, &priv
->stats_task
, STATS_DELAY
);
1075 mlx4_en_destroy_netdev(dev
);