2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
44 #include <linux/if_arp.h> /* For ARPHRD_xxx */
51 MODULE_AUTHOR("Roland Dreier");
52 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
53 MODULE_LICENSE("Dual BSD/GPL");
55 int ipoib_sendq_size __read_mostly
= IPOIB_TX_RING_SIZE
;
56 int ipoib_recvq_size __read_mostly
= IPOIB_RX_RING_SIZE
;
58 module_param_named(send_queue_size
, ipoib_sendq_size
, int, 0444);
59 MODULE_PARM_DESC(send_queue_size
, "Number of descriptors in send queue");
60 module_param_named(recv_queue_size
, ipoib_recvq_size
, int, 0444);
61 MODULE_PARM_DESC(recv_queue_size
, "Number of descriptors in receive queue");
64 module_param(lro
, bool, 0444);
65 MODULE_PARM_DESC(lro
, "Enable LRO (Large Receive Offload)");
67 static int lro_max_aggr
= IPOIB_LRO_MAX_AGGR
;
68 module_param(lro_max_aggr
, int, 0644);
69 MODULE_PARM_DESC(lro_max_aggr
, "LRO: Max packets to be aggregated "
72 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
73 int ipoib_debug_level
;
75 module_param_named(debug_level
, ipoib_debug_level
, int, 0644);
76 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0");
79 struct ipoib_path_iter
{
80 struct net_device
*dev
;
81 struct ipoib_path path
;
84 static const u8 ipv4_bcast_addr
[] = {
85 0x00, 0xff, 0xff, 0xff,
86 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
87 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
90 struct workqueue_struct
*ipoib_workqueue
;
92 struct ib_sa_client ipoib_sa_client
;
94 static void ipoib_add_one(struct ib_device
*device
);
95 static void ipoib_remove_one(struct ib_device
*device
);
97 static struct ib_client ipoib_client
= {
100 .remove
= ipoib_remove_one
103 int ipoib_open(struct net_device
*dev
)
105 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
107 ipoib_dbg(priv
, "bringing up interface\n");
109 napi_enable(&priv
->napi
);
110 set_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
);
112 if (ipoib_pkey_dev_delay_open(dev
))
115 if (ipoib_ib_dev_open(dev
)) {
116 napi_disable(&priv
->napi
);
120 if (ipoib_ib_dev_up(dev
)) {
121 ipoib_ib_dev_stop(dev
, 1);
122 napi_disable(&priv
->napi
);
126 if (!test_bit(IPOIB_FLAG_SUBINTERFACE
, &priv
->flags
)) {
127 struct ipoib_dev_priv
*cpriv
;
129 /* Bring up any child interfaces too */
130 mutex_lock(&priv
->vlan_mutex
);
131 list_for_each_entry(cpriv
, &priv
->child_intfs
, list
) {
134 flags
= cpriv
->dev
->flags
;
138 dev_change_flags(cpriv
->dev
, flags
| IFF_UP
);
140 mutex_unlock(&priv
->vlan_mutex
);
143 netif_start_queue(dev
);
148 static int ipoib_stop(struct net_device
*dev
)
150 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
152 ipoib_dbg(priv
, "stopping interface\n");
154 clear_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
);
155 napi_disable(&priv
->napi
);
157 netif_stop_queue(dev
);
159 ipoib_ib_dev_down(dev
, 0);
160 ipoib_ib_dev_stop(dev
, 0);
162 if (!test_bit(IPOIB_FLAG_SUBINTERFACE
, &priv
->flags
)) {
163 struct ipoib_dev_priv
*cpriv
;
165 /* Bring down any child interfaces too */
166 mutex_lock(&priv
->vlan_mutex
);
167 list_for_each_entry(cpriv
, &priv
->child_intfs
, list
) {
170 flags
= cpriv
->dev
->flags
;
171 if (!(flags
& IFF_UP
))
174 dev_change_flags(cpriv
->dev
, flags
& ~IFF_UP
);
176 mutex_unlock(&priv
->vlan_mutex
);
182 static int ipoib_change_mtu(struct net_device
*dev
, int new_mtu
)
184 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
186 /* dev->mtu > 2K ==> connected mode */
187 if (ipoib_cm_admin_enabled(dev
)) {
188 if (new_mtu
> ipoib_cm_max_mtu(dev
))
191 if (new_mtu
> priv
->mcast_mtu
)
192 ipoib_warn(priv
, "mtu > %d will cause multicast packet drops.\n",
199 if (new_mtu
> IPOIB_UD_MTU(priv
->max_ib_mtu
))
202 priv
->admin_mtu
= new_mtu
;
204 dev
->mtu
= min(priv
->mcast_mtu
, priv
->admin_mtu
);
209 static struct ipoib_path
*__path_find(struct net_device
*dev
, void *gid
)
211 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
212 struct rb_node
*n
= priv
->path_tree
.rb_node
;
213 struct ipoib_path
*path
;
217 path
= rb_entry(n
, struct ipoib_path
, rb_node
);
219 ret
= memcmp(gid
, path
->pathrec
.dgid
.raw
,
220 sizeof (union ib_gid
));
233 static int __path_add(struct net_device
*dev
, struct ipoib_path
*path
)
235 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
236 struct rb_node
**n
= &priv
->path_tree
.rb_node
;
237 struct rb_node
*pn
= NULL
;
238 struct ipoib_path
*tpath
;
243 tpath
= rb_entry(pn
, struct ipoib_path
, rb_node
);
245 ret
= memcmp(path
->pathrec
.dgid
.raw
, tpath
->pathrec
.dgid
.raw
,
246 sizeof (union ib_gid
));
255 rb_link_node(&path
->rb_node
, pn
, n
);
256 rb_insert_color(&path
->rb_node
, &priv
->path_tree
);
258 list_add_tail(&path
->list
, &priv
->path_list
);
263 static void path_free(struct net_device
*dev
, struct ipoib_path
*path
)
265 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
266 struct ipoib_neigh
*neigh
, *tn
;
270 while ((skb
= __skb_dequeue(&path
->queue
)))
271 dev_kfree_skb_irq(skb
);
273 spin_lock_irqsave(&priv
->lock
, flags
);
275 list_for_each_entry_safe(neigh
, tn
, &path
->neigh_list
, list
) {
277 * It's safe to call ipoib_put_ah() inside priv->lock
278 * here, because we know that path->ah will always
279 * hold one more reference, so ipoib_put_ah() will
280 * never do more than decrement the ref count.
283 ipoib_put_ah(neigh
->ah
);
285 ipoib_neigh_free(dev
, neigh
);
288 spin_unlock_irqrestore(&priv
->lock
, flags
);
291 ipoib_put_ah(path
->ah
);
296 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
298 struct ipoib_path_iter
*ipoib_path_iter_init(struct net_device
*dev
)
300 struct ipoib_path_iter
*iter
;
302 iter
= kmalloc(sizeof *iter
, GFP_KERNEL
);
307 memset(iter
->path
.pathrec
.dgid
.raw
, 0, 16);
309 if (ipoib_path_iter_next(iter
)) {
317 int ipoib_path_iter_next(struct ipoib_path_iter
*iter
)
319 struct ipoib_dev_priv
*priv
= netdev_priv(iter
->dev
);
321 struct ipoib_path
*path
;
324 spin_lock_irq(&priv
->lock
);
326 n
= rb_first(&priv
->path_tree
);
329 path
= rb_entry(n
, struct ipoib_path
, rb_node
);
331 if (memcmp(iter
->path
.pathrec
.dgid
.raw
, path
->pathrec
.dgid
.raw
,
332 sizeof (union ib_gid
)) < 0) {
341 spin_unlock_irq(&priv
->lock
);
346 void ipoib_path_iter_read(struct ipoib_path_iter
*iter
,
347 struct ipoib_path
*path
)
352 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
354 void ipoib_mark_paths_invalid(struct net_device
*dev
)
356 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
357 struct ipoib_path
*path
, *tp
;
359 spin_lock_irq(&priv
->lock
);
361 list_for_each_entry_safe(path
, tp
, &priv
->path_list
, list
) {
362 ipoib_dbg(priv
, "mark path LID 0x%04x GID " IPOIB_GID_FMT
" invalid\n",
363 be16_to_cpu(path
->pathrec
.dlid
),
364 IPOIB_GID_ARG(path
->pathrec
.dgid
));
368 spin_unlock_irq(&priv
->lock
);
371 void ipoib_flush_paths(struct net_device
*dev
)
373 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
374 struct ipoib_path
*path
, *tp
;
375 LIST_HEAD(remove_list
);
377 spin_lock_irq(&priv
->tx_lock
);
378 spin_lock(&priv
->lock
);
380 list_splice_init(&priv
->path_list
, &remove_list
);
382 list_for_each_entry(path
, &remove_list
, list
)
383 rb_erase(&path
->rb_node
, &priv
->path_tree
);
385 list_for_each_entry_safe(path
, tp
, &remove_list
, list
) {
387 ib_sa_cancel_query(path
->query_id
, path
->query
);
388 spin_unlock(&priv
->lock
);
389 spin_unlock_irq(&priv
->tx_lock
);
390 wait_for_completion(&path
->done
);
391 path_free(dev
, path
);
392 spin_lock_irq(&priv
->tx_lock
);
393 spin_lock(&priv
->lock
);
395 spin_unlock(&priv
->lock
);
396 spin_unlock_irq(&priv
->tx_lock
);
399 static void path_rec_completion(int status
,
400 struct ib_sa_path_rec
*pathrec
,
403 struct ipoib_path
*path
= path_ptr
;
404 struct net_device
*dev
= path
->dev
;
405 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
406 struct ipoib_ah
*ah
= NULL
;
407 struct ipoib_ah
*old_ah
= NULL
;
408 struct ipoib_neigh
*neigh
, *tn
;
409 struct sk_buff_head skqueue
;
414 ipoib_dbg(priv
, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT
"\n",
415 be16_to_cpu(pathrec
->dlid
), IPOIB_GID_ARG(pathrec
->dgid
));
417 ipoib_dbg(priv
, "PathRec status %d for GID " IPOIB_GID_FMT
"\n",
418 status
, IPOIB_GID_ARG(path
->pathrec
.dgid
));
420 skb_queue_head_init(&skqueue
);
423 struct ib_ah_attr av
;
425 if (!ib_init_ah_from_path(priv
->ca
, priv
->port
, pathrec
, &av
))
426 ah
= ipoib_create_ah(dev
, priv
->pd
, &av
);
429 spin_lock_irqsave(&priv
->lock
, flags
);
432 path
->pathrec
= *pathrec
;
437 ipoib_dbg(priv
, "created address handle %p for LID 0x%04x, SL %d\n",
438 ah
, be16_to_cpu(pathrec
->dlid
), pathrec
->sl
);
440 while ((skb
= __skb_dequeue(&path
->queue
)))
441 __skb_queue_tail(&skqueue
, skb
);
443 list_for_each_entry_safe(neigh
, tn
, &path
->neigh_list
, list
) {
445 WARN_ON(neigh
->ah
!= old_ah
);
447 * Dropping the ah reference inside
448 * priv->lock is safe here, because we
449 * will hold one more reference from
450 * the original value of path->ah (ie
453 ipoib_put_ah(neigh
->ah
);
455 kref_get(&path
->ah
->ref
);
456 neigh
->ah
= path
->ah
;
457 memcpy(&neigh
->dgid
.raw
, &path
->pathrec
.dgid
.raw
,
458 sizeof(union ib_gid
));
460 if (ipoib_cm_enabled(dev
, neigh
->neighbour
)) {
461 if (!ipoib_cm_get(neigh
))
462 ipoib_cm_set(neigh
, ipoib_cm_create_tx(dev
,
465 if (!ipoib_cm_get(neigh
)) {
466 list_del(&neigh
->list
);
468 ipoib_put_ah(neigh
->ah
);
469 ipoib_neigh_free(dev
, neigh
);
474 while ((skb
= __skb_dequeue(&neigh
->queue
)))
475 __skb_queue_tail(&skqueue
, skb
);
481 complete(&path
->done
);
483 spin_unlock_irqrestore(&priv
->lock
, flags
);
486 ipoib_put_ah(old_ah
);
488 while ((skb
= __skb_dequeue(&skqueue
))) {
490 if (dev_queue_xmit(skb
))
491 ipoib_warn(priv
, "dev_queue_xmit failed "
492 "to requeue packet\n");
496 static struct ipoib_path
*path_rec_create(struct net_device
*dev
, void *gid
)
498 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
499 struct ipoib_path
*path
;
501 if (!priv
->broadcast
)
504 path
= kzalloc(sizeof *path
, GFP_ATOMIC
);
510 skb_queue_head_init(&path
->queue
);
512 INIT_LIST_HEAD(&path
->neigh_list
);
514 memcpy(path
->pathrec
.dgid
.raw
, gid
, sizeof (union ib_gid
));
515 path
->pathrec
.sgid
= priv
->local_gid
;
516 path
->pathrec
.pkey
= cpu_to_be16(priv
->pkey
);
517 path
->pathrec
.numb_path
= 1;
518 path
->pathrec
.traffic_class
= priv
->broadcast
->mcmember
.traffic_class
;
523 static int path_rec_start(struct net_device
*dev
,
524 struct ipoib_path
*path
)
526 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
528 ipoib_dbg(priv
, "Start path record lookup for " IPOIB_GID_FMT
"\n",
529 IPOIB_GID_ARG(path
->pathrec
.dgid
));
531 init_completion(&path
->done
);
534 ib_sa_path_rec_get(&ipoib_sa_client
, priv
->ca
, priv
->port
,
536 IB_SA_PATH_REC_DGID
|
537 IB_SA_PATH_REC_SGID
|
538 IB_SA_PATH_REC_NUMB_PATH
|
539 IB_SA_PATH_REC_TRAFFIC_CLASS
|
544 if (path
->query_id
< 0) {
545 ipoib_warn(priv
, "ib_sa_path_rec_get failed: %d\n", path
->query_id
);
547 return path
->query_id
;
553 static void neigh_add_path(struct sk_buff
*skb
, struct net_device
*dev
)
555 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
556 struct ipoib_path
*path
;
557 struct ipoib_neigh
*neigh
;
559 neigh
= ipoib_neigh_alloc(skb
->dst
->neighbour
, skb
->dev
);
561 ++dev
->stats
.tx_dropped
;
562 dev_kfree_skb_any(skb
);
567 * We can only be called from ipoib_start_xmit, so we're
568 * inside tx_lock -- no need to save/restore flags.
570 spin_lock(&priv
->lock
);
572 path
= __path_find(dev
, skb
->dst
->neighbour
->ha
+ 4);
574 path
= path_rec_create(dev
, skb
->dst
->neighbour
->ha
+ 4);
578 __path_add(dev
, path
);
581 list_add_tail(&neigh
->list
, &path
->neigh_list
);
584 kref_get(&path
->ah
->ref
);
585 neigh
->ah
= path
->ah
;
586 memcpy(&neigh
->dgid
.raw
, &path
->pathrec
.dgid
.raw
,
587 sizeof(union ib_gid
));
589 if (ipoib_cm_enabled(dev
, neigh
->neighbour
)) {
590 if (!ipoib_cm_get(neigh
))
591 ipoib_cm_set(neigh
, ipoib_cm_create_tx(dev
, path
, neigh
));
592 if (!ipoib_cm_get(neigh
)) {
593 list_del(&neigh
->list
);
595 ipoib_put_ah(neigh
->ah
);
596 ipoib_neigh_free(dev
, neigh
);
599 if (skb_queue_len(&neigh
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
)
600 __skb_queue_tail(&neigh
->queue
, skb
);
602 ipoib_warn(priv
, "queue length limit %d. Packet drop.\n",
603 skb_queue_len(&neigh
->queue
));
607 ipoib_send(dev
, skb
, path
->ah
, IPOIB_QPN(skb
->dst
->neighbour
->ha
));
611 if (!path
->query
&& path_rec_start(dev
, path
))
614 __skb_queue_tail(&neigh
->queue
, skb
);
617 spin_unlock(&priv
->lock
);
621 list_del(&neigh
->list
);
624 ipoib_neigh_free(dev
, neigh
);
626 ++dev
->stats
.tx_dropped
;
627 dev_kfree_skb_any(skb
);
629 spin_unlock(&priv
->lock
);
632 static void ipoib_path_lookup(struct sk_buff
*skb
, struct net_device
*dev
)
634 struct ipoib_dev_priv
*priv
= netdev_priv(skb
->dev
);
636 /* Look up path record for unicasts */
637 if (skb
->dst
->neighbour
->ha
[4] != 0xff) {
638 neigh_add_path(skb
, dev
);
642 /* Add in the P_Key for multicasts */
643 skb
->dst
->neighbour
->ha
[8] = (priv
->pkey
>> 8) & 0xff;
644 skb
->dst
->neighbour
->ha
[9] = priv
->pkey
& 0xff;
645 ipoib_mcast_send(dev
, skb
->dst
->neighbour
->ha
+ 4, skb
);
648 static void unicast_arp_send(struct sk_buff
*skb
, struct net_device
*dev
,
649 struct ipoib_pseudoheader
*phdr
)
651 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
652 struct ipoib_path
*path
;
655 * We can only be called from ipoib_start_xmit, so we're
656 * inside tx_lock -- no need to save/restore flags.
658 spin_lock(&priv
->lock
);
660 path
= __path_find(dev
, phdr
->hwaddr
+ 4);
661 if (!path
|| !path
->valid
) {
663 path
= path_rec_create(dev
, phdr
->hwaddr
+ 4);
665 /* put pseudoheader back on for next time */
666 skb_push(skb
, sizeof *phdr
);
667 __skb_queue_tail(&path
->queue
, skb
);
669 if (path_rec_start(dev
, path
)) {
670 spin_unlock(&priv
->lock
);
671 path_free(dev
, path
);
674 __path_add(dev
, path
);
676 ++dev
->stats
.tx_dropped
;
677 dev_kfree_skb_any(skb
);
680 spin_unlock(&priv
->lock
);
685 ipoib_dbg(priv
, "Send unicast ARP to %04x\n",
686 be16_to_cpu(path
->pathrec
.dlid
));
688 ipoib_send(dev
, skb
, path
->ah
, IPOIB_QPN(phdr
->hwaddr
));
689 } else if ((path
->query
|| !path_rec_start(dev
, path
)) &&
690 skb_queue_len(&path
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
) {
691 /* put pseudoheader back on for next time */
692 skb_push(skb
, sizeof *phdr
);
693 __skb_queue_tail(&path
->queue
, skb
);
695 ++dev
->stats
.tx_dropped
;
696 dev_kfree_skb_any(skb
);
699 spin_unlock(&priv
->lock
);
702 static int ipoib_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
704 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
705 struct ipoib_neigh
*neigh
;
708 if (unlikely(!spin_trylock_irqsave(&priv
->tx_lock
, flags
)))
709 return NETDEV_TX_LOCKED
;
711 if (likely(skb
->dst
&& skb
->dst
->neighbour
)) {
712 if (unlikely(!*to_ipoib_neigh(skb
->dst
->neighbour
))) {
713 ipoib_path_lookup(skb
, dev
);
717 neigh
= *to_ipoib_neigh(skb
->dst
->neighbour
);
720 if (unlikely((memcmp(&neigh
->dgid
.raw
,
721 skb
->dst
->neighbour
->ha
+ 4,
722 sizeof(union ib_gid
))) ||
723 (neigh
->dev
!= dev
))) {
724 spin_lock(&priv
->lock
);
726 * It's safe to call ipoib_put_ah() inside
727 * priv->lock here, because we know that
728 * path->ah will always hold one more reference,
729 * so ipoib_put_ah() will never do more than
730 * decrement the ref count.
732 ipoib_put_ah(neigh
->ah
);
733 list_del(&neigh
->list
);
734 ipoib_neigh_free(dev
, neigh
);
735 spin_unlock(&priv
->lock
);
736 ipoib_path_lookup(skb
, dev
);
740 if (ipoib_cm_get(neigh
)) {
741 if (ipoib_cm_up(neigh
)) {
742 ipoib_cm_send(dev
, skb
, ipoib_cm_get(neigh
));
745 } else if (neigh
->ah
) {
746 ipoib_send(dev
, skb
, neigh
->ah
, IPOIB_QPN(skb
->dst
->neighbour
->ha
));
750 if (skb_queue_len(&neigh
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
) {
751 spin_lock(&priv
->lock
);
752 __skb_queue_tail(&neigh
->queue
, skb
);
753 spin_unlock(&priv
->lock
);
755 ++dev
->stats
.tx_dropped
;
756 dev_kfree_skb_any(skb
);
759 struct ipoib_pseudoheader
*phdr
=
760 (struct ipoib_pseudoheader
*) skb
->data
;
761 skb_pull(skb
, sizeof *phdr
);
763 if (phdr
->hwaddr
[4] == 0xff) {
764 /* Add in the P_Key for multicast*/
765 phdr
->hwaddr
[8] = (priv
->pkey
>> 8) & 0xff;
766 phdr
->hwaddr
[9] = priv
->pkey
& 0xff;
768 ipoib_mcast_send(dev
, phdr
->hwaddr
+ 4, skb
);
770 /* unicast GID -- should be ARP or RARP reply */
772 if ((be16_to_cpup((__be16
*) skb
->data
) != ETH_P_ARP
) &&
773 (be16_to_cpup((__be16
*) skb
->data
) != ETH_P_RARP
)) {
774 ipoib_warn(priv
, "Unicast, no %s: type %04x, QPN %06x "
776 skb
->dst
? "neigh" : "dst",
777 be16_to_cpup((__be16
*) skb
->data
),
778 IPOIB_QPN(phdr
->hwaddr
),
779 IPOIB_GID_RAW_ARG(phdr
->hwaddr
+ 4));
780 dev_kfree_skb_any(skb
);
781 ++dev
->stats
.tx_dropped
;
785 unicast_arp_send(skb
, dev
, phdr
);
790 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
795 static void ipoib_timeout(struct net_device
*dev
)
797 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
799 ipoib_warn(priv
, "transmit timeout: latency %d msecs\n",
800 jiffies_to_msecs(jiffies
- dev
->trans_start
));
801 ipoib_warn(priv
, "queue stopped %d, tx_head %u, tx_tail %u\n",
802 netif_queue_stopped(dev
),
803 priv
->tx_head
, priv
->tx_tail
);
804 /* XXX reset QP, etc. */
807 static int ipoib_hard_header(struct sk_buff
*skb
,
808 struct net_device
*dev
,
810 const void *daddr
, const void *saddr
, unsigned len
)
812 struct ipoib_header
*header
;
814 header
= (struct ipoib_header
*) skb_push(skb
, sizeof *header
);
816 header
->proto
= htons(type
);
817 header
->reserved
= 0;
820 * If we don't have a neighbour structure, stuff the
821 * destination address onto the front of the skb so we can
822 * figure out where to send the packet later.
824 if ((!skb
->dst
|| !skb
->dst
->neighbour
) && daddr
) {
825 struct ipoib_pseudoheader
*phdr
=
826 (struct ipoib_pseudoheader
*) skb_push(skb
, sizeof *phdr
);
827 memcpy(phdr
->hwaddr
, daddr
, INFINIBAND_ALEN
);
833 static void ipoib_set_mcast_list(struct net_device
*dev
)
835 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
837 if (!test_bit(IPOIB_FLAG_OPER_UP
, &priv
->flags
)) {
838 ipoib_dbg(priv
, "IPOIB_FLAG_OPER_UP not set");
842 queue_work(ipoib_workqueue
, &priv
->restart_task
);
845 static void ipoib_neigh_cleanup(struct neighbour
*n
)
847 struct ipoib_neigh
*neigh
;
848 struct ipoib_dev_priv
*priv
= netdev_priv(n
->dev
);
850 struct ipoib_ah
*ah
= NULL
;
852 neigh
= *to_ipoib_neigh(n
);
854 priv
= netdev_priv(neigh
->dev
);
858 "neigh_cleanup for %06x " IPOIB_GID_FMT
"\n",
860 IPOIB_GID_RAW_ARG(n
->ha
+ 4));
862 spin_lock_irqsave(&priv
->lock
, flags
);
866 list_del(&neigh
->list
);
867 ipoib_neigh_free(n
->dev
, neigh
);
869 spin_unlock_irqrestore(&priv
->lock
, flags
);
875 struct ipoib_neigh
*ipoib_neigh_alloc(struct neighbour
*neighbour
,
876 struct net_device
*dev
)
878 struct ipoib_neigh
*neigh
;
880 neigh
= kmalloc(sizeof *neigh
, GFP_ATOMIC
);
884 neigh
->neighbour
= neighbour
;
886 *to_ipoib_neigh(neighbour
) = neigh
;
887 skb_queue_head_init(&neigh
->queue
);
888 ipoib_cm_set(neigh
, NULL
);
893 void ipoib_neigh_free(struct net_device
*dev
, struct ipoib_neigh
*neigh
)
896 *to_ipoib_neigh(neigh
->neighbour
) = NULL
;
897 while ((skb
= __skb_dequeue(&neigh
->queue
))) {
898 ++dev
->stats
.tx_dropped
;
899 dev_kfree_skb_any(skb
);
901 if (ipoib_cm_get(neigh
))
902 ipoib_cm_destroy_tx(ipoib_cm_get(neigh
));
906 static int ipoib_neigh_setup_dev(struct net_device
*dev
, struct neigh_parms
*parms
)
908 parms
->neigh_cleanup
= ipoib_neigh_cleanup
;
913 int ipoib_dev_init(struct net_device
*dev
, struct ib_device
*ca
, int port
)
915 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
917 /* Allocate RX/TX "rings" to hold queued skbs */
918 priv
->rx_ring
= kzalloc(ipoib_recvq_size
* sizeof *priv
->rx_ring
,
920 if (!priv
->rx_ring
) {
921 printk(KERN_WARNING
"%s: failed to allocate RX ring (%d entries)\n",
922 ca
->name
, ipoib_recvq_size
);
926 priv
->tx_ring
= vmalloc(ipoib_sendq_size
* sizeof *priv
->tx_ring
);
927 if (!priv
->tx_ring
) {
928 printk(KERN_WARNING
"%s: failed to allocate TX ring (%d entries)\n",
929 ca
->name
, ipoib_sendq_size
);
930 goto out_rx_ring_cleanup
;
932 memset(priv
->tx_ring
, 0, ipoib_sendq_size
* sizeof *priv
->tx_ring
);
934 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
936 if (ipoib_ib_dev_init(dev
, ca
, port
))
937 goto out_tx_ring_cleanup
;
942 vfree(priv
->tx_ring
);
945 kfree(priv
->rx_ring
);
951 void ipoib_dev_cleanup(struct net_device
*dev
)
953 struct ipoib_dev_priv
*priv
= netdev_priv(dev
), *cpriv
, *tcpriv
;
955 ipoib_delete_debug_files(dev
);
957 /* Delete any child interfaces first */
958 list_for_each_entry_safe(cpriv
, tcpriv
, &priv
->child_intfs
, list
) {
959 unregister_netdev(cpriv
->dev
);
960 ipoib_dev_cleanup(cpriv
->dev
);
961 free_netdev(cpriv
->dev
);
964 ipoib_ib_dev_cleanup(dev
);
966 kfree(priv
->rx_ring
);
967 vfree(priv
->tx_ring
);
969 priv
->rx_ring
= NULL
;
970 priv
->tx_ring
= NULL
;
973 static const struct header_ops ipoib_header_ops
= {
974 .create
= ipoib_hard_header
,
977 static int get_skb_hdr(struct sk_buff
*skb
, void **iphdr
,
978 void **tcph
, u64
*hdr_flags
, void *priv
)
983 if (unlikely(skb
->protocol
!= htons(ETH_P_IP
)))
987 * In the future we may add an else clause that verifies the
988 * checksum and allows devices which do not calculate checksum
991 if (unlikely(skb
->ip_summed
!= CHECKSUM_UNNECESSARY
))
994 /* Check for non-TCP packet */
995 skb_reset_network_header(skb
);
997 if (iph
->protocol
!= IPPROTO_TCP
)
1000 ip_len
= ip_hdrlen(skb
);
1001 skb_set_transport_header(skb
, ip_len
);
1002 *tcph
= tcp_hdr(skb
);
1004 /* check if IP header and TCP header are complete */
1005 if (ntohs(iph
->tot_len
) < ip_len
+ tcp_hdrlen(skb
))
1008 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
1014 static void ipoib_lro_setup(struct ipoib_dev_priv
*priv
)
1016 priv
->lro
.lro_mgr
.max_aggr
= lro_max_aggr
;
1017 priv
->lro
.lro_mgr
.max_desc
= IPOIB_MAX_LRO_DESCRIPTORS
;
1018 priv
->lro
.lro_mgr
.lro_arr
= priv
->lro
.lro_desc
;
1019 priv
->lro
.lro_mgr
.get_skb_header
= get_skb_hdr
;
1020 priv
->lro
.lro_mgr
.features
= LRO_F_NAPI
;
1021 priv
->lro
.lro_mgr
.dev
= priv
->dev
;
1022 priv
->lro
.lro_mgr
.ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
1025 static void ipoib_setup(struct net_device
*dev
)
1027 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1029 dev
->open
= ipoib_open
;
1030 dev
->stop
= ipoib_stop
;
1031 dev
->change_mtu
= ipoib_change_mtu
;
1032 dev
->hard_start_xmit
= ipoib_start_xmit
;
1033 dev
->tx_timeout
= ipoib_timeout
;
1034 dev
->header_ops
= &ipoib_header_ops
;
1035 dev
->set_multicast_list
= ipoib_set_mcast_list
;
1036 dev
->neigh_setup
= ipoib_neigh_setup_dev
;
1038 ipoib_set_ethtool_ops(dev
);
1040 netif_napi_add(dev
, &priv
->napi
, ipoib_poll
, 100);
1042 dev
->watchdog_timeo
= HZ
;
1044 dev
->flags
|= IFF_BROADCAST
| IFF_MULTICAST
;
1047 * We add in INFINIBAND_ALEN to allow for the destination
1048 * address "pseudoheader" for skbs without neighbour struct.
1050 dev
->hard_header_len
= IPOIB_ENCAP_LEN
+ INFINIBAND_ALEN
;
1051 dev
->addr_len
= INFINIBAND_ALEN
;
1052 dev
->type
= ARPHRD_INFINIBAND
;
1053 dev
->tx_queue_len
= ipoib_sendq_size
* 2;
1054 dev
->features
= (NETIF_F_VLAN_CHALLENGED
|
1058 memcpy(dev
->broadcast
, ipv4_bcast_addr
, INFINIBAND_ALEN
);
1060 netif_carrier_off(dev
);
1064 ipoib_lro_setup(priv
);
1066 spin_lock_init(&priv
->lock
);
1067 spin_lock_init(&priv
->tx_lock
);
1069 mutex_init(&priv
->vlan_mutex
);
1071 INIT_LIST_HEAD(&priv
->path_list
);
1072 INIT_LIST_HEAD(&priv
->child_intfs
);
1073 INIT_LIST_HEAD(&priv
->dead_ahs
);
1074 INIT_LIST_HEAD(&priv
->multicast_list
);
1076 INIT_DELAYED_WORK(&priv
->pkey_poll_task
, ipoib_pkey_poll
);
1077 INIT_DELAYED_WORK(&priv
->mcast_task
, ipoib_mcast_join_task
);
1078 INIT_WORK(&priv
->carrier_on_task
, ipoib_mcast_carrier_on_task
);
1079 INIT_WORK(&priv
->flush_light
, ipoib_ib_dev_flush_light
);
1080 INIT_WORK(&priv
->flush_normal
, ipoib_ib_dev_flush_normal
);
1081 INIT_WORK(&priv
->flush_heavy
, ipoib_ib_dev_flush_heavy
);
1082 INIT_WORK(&priv
->restart_task
, ipoib_mcast_restart_task
);
1083 INIT_DELAYED_WORK(&priv
->ah_reap_task
, ipoib_reap_ah
);
1086 struct ipoib_dev_priv
*ipoib_intf_alloc(const char *name
)
1088 struct net_device
*dev
;
1090 dev
= alloc_netdev((int) sizeof (struct ipoib_dev_priv
), name
,
1095 return netdev_priv(dev
);
1098 static ssize_t
show_pkey(struct device
*dev
,
1099 struct device_attribute
*attr
, char *buf
)
1101 struct ipoib_dev_priv
*priv
= netdev_priv(to_net_dev(dev
));
1103 return sprintf(buf
, "0x%04x\n", priv
->pkey
);
1105 static DEVICE_ATTR(pkey
, S_IRUGO
, show_pkey
, NULL
);
1107 static ssize_t
show_umcast(struct device
*dev
,
1108 struct device_attribute
*attr
, char *buf
)
1110 struct ipoib_dev_priv
*priv
= netdev_priv(to_net_dev(dev
));
1112 return sprintf(buf
, "%d\n", test_bit(IPOIB_FLAG_UMCAST
, &priv
->flags
));
1115 static ssize_t
set_umcast(struct device
*dev
,
1116 struct device_attribute
*attr
,
1117 const char *buf
, size_t count
)
1119 struct ipoib_dev_priv
*priv
= netdev_priv(to_net_dev(dev
));
1120 unsigned long umcast_val
= simple_strtoul(buf
, NULL
, 0);
1122 if (umcast_val
> 0) {
1123 set_bit(IPOIB_FLAG_UMCAST
, &priv
->flags
);
1124 ipoib_warn(priv
, "ignoring multicast groups joined directly "
1127 clear_bit(IPOIB_FLAG_UMCAST
, &priv
->flags
);
1131 static DEVICE_ATTR(umcast
, S_IWUSR
| S_IRUGO
, show_umcast
, set_umcast
);
1133 int ipoib_add_umcast_attr(struct net_device
*dev
)
1135 return device_create_file(&dev
->dev
, &dev_attr_umcast
);
1138 static ssize_t
create_child(struct device
*dev
,
1139 struct device_attribute
*attr
,
1140 const char *buf
, size_t count
)
1145 if (sscanf(buf
, "%i", &pkey
) != 1)
1148 if (pkey
< 0 || pkey
> 0xffff)
1152 * Set the full membership bit, so that we join the right
1153 * broadcast group, etc.
1157 ret
= ipoib_vlan_add(to_net_dev(dev
), pkey
);
1159 return ret
? ret
: count
;
1161 static DEVICE_ATTR(create_child
, S_IWUGO
, NULL
, create_child
);
1163 static ssize_t
delete_child(struct device
*dev
,
1164 struct device_attribute
*attr
,
1165 const char *buf
, size_t count
)
1170 if (sscanf(buf
, "%i", &pkey
) != 1)
1173 if (pkey
< 0 || pkey
> 0xffff)
1176 ret
= ipoib_vlan_delete(to_net_dev(dev
), pkey
);
1178 return ret
? ret
: count
;
1181 static DEVICE_ATTR(delete_child
, S_IWUGO
, NULL
, delete_child
);
1183 int ipoib_add_pkey_attr(struct net_device
*dev
)
1185 return device_create_file(&dev
->dev
, &dev_attr_pkey
);
1188 static struct net_device
*ipoib_add_port(const char *format
,
1189 struct ib_device
*hca
, u8 port
)
1191 struct ipoib_dev_priv
*priv
;
1192 struct ib_device_attr
*device_attr
;
1193 struct ib_port_attr attr
;
1194 int result
= -ENOMEM
;
1196 priv
= ipoib_intf_alloc(format
);
1198 goto alloc_mem_failed
;
1200 SET_NETDEV_DEV(priv
->dev
, hca
->dma_device
);
1202 if (!ib_query_port(hca
, port
, &attr
))
1203 priv
->max_ib_mtu
= ib_mtu_enum_to_int(attr
.max_mtu
);
1205 printk(KERN_WARNING
"%s: ib_query_port %d failed\n",
1207 goto device_init_failed
;
1210 /* MTU will be reset when mcast join happens */
1211 priv
->dev
->mtu
= IPOIB_UD_MTU(priv
->max_ib_mtu
);
1212 priv
->mcast_mtu
= priv
->admin_mtu
= priv
->dev
->mtu
;
1214 result
= ib_query_pkey(hca
, port
, 0, &priv
->pkey
);
1216 printk(KERN_WARNING
"%s: ib_query_pkey port %d failed (ret = %d)\n",
1217 hca
->name
, port
, result
);
1218 goto device_init_failed
;
1221 device_attr
= kmalloc(sizeof *device_attr
, GFP_KERNEL
);
1223 printk(KERN_WARNING
"%s: allocation of %zu bytes failed\n",
1224 hca
->name
, sizeof *device_attr
);
1225 goto device_init_failed
;
1228 result
= ib_query_device(hca
, device_attr
);
1230 printk(KERN_WARNING
"%s: ib_query_device failed (ret = %d)\n",
1233 goto device_init_failed
;
1235 priv
->hca_caps
= device_attr
->device_cap_flags
;
1239 if (priv
->hca_caps
& IB_DEVICE_UD_IP_CSUM
) {
1240 set_bit(IPOIB_FLAG_CSUM
, &priv
->flags
);
1241 priv
->dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
1245 priv
->dev
->features
|= NETIF_F_LRO
;
1248 * Set the full membership bit, so that we join the right
1249 * broadcast group, etc.
1251 priv
->pkey
|= 0x8000;
1253 priv
->dev
->broadcast
[8] = priv
->pkey
>> 8;
1254 priv
->dev
->broadcast
[9] = priv
->pkey
& 0xff;
1256 result
= ib_query_gid(hca
, port
, 0, &priv
->local_gid
);
1258 printk(KERN_WARNING
"%s: ib_query_gid port %d failed (ret = %d)\n",
1259 hca
->name
, port
, result
);
1260 goto device_init_failed
;
1262 memcpy(priv
->dev
->dev_addr
+ 4, priv
->local_gid
.raw
, sizeof (union ib_gid
));
1264 result
= ipoib_dev_init(priv
->dev
, hca
, port
);
1266 printk(KERN_WARNING
"%s: failed to initialize port %d (ret = %d)\n",
1267 hca
->name
, port
, result
);
1268 goto device_init_failed
;
1271 INIT_IB_EVENT_HANDLER(&priv
->event_handler
,
1272 priv
->ca
, ipoib_event
);
1273 result
= ib_register_event_handler(&priv
->event_handler
);
1275 printk(KERN_WARNING
"%s: ib_register_event_handler failed for "
1276 "port %d (ret = %d)\n",
1277 hca
->name
, port
, result
);
1281 if (priv
->dev
->features
& NETIF_F_SG
&& priv
->hca_caps
& IB_DEVICE_UD_TSO
)
1282 priv
->dev
->features
|= NETIF_F_TSO
;
1284 result
= register_netdev(priv
->dev
);
1286 printk(KERN_WARNING
"%s: couldn't register ipoib port %d; error %d\n",
1287 hca
->name
, port
, result
);
1288 goto register_failed
;
1291 ipoib_create_debug_files(priv
->dev
);
1293 if (ipoib_cm_add_mode_attr(priv
->dev
))
1295 if (ipoib_add_pkey_attr(priv
->dev
))
1297 if (ipoib_add_umcast_attr(priv
->dev
))
1299 if (device_create_file(&priv
->dev
->dev
, &dev_attr_create_child
))
1301 if (device_create_file(&priv
->dev
->dev
, &dev_attr_delete_child
))
1307 ipoib_delete_debug_files(priv
->dev
);
1308 unregister_netdev(priv
->dev
);
1311 ib_unregister_event_handler(&priv
->event_handler
);
1312 flush_workqueue(ipoib_workqueue
);
1315 ipoib_dev_cleanup(priv
->dev
);
1318 free_netdev(priv
->dev
);
1321 return ERR_PTR(result
);
1324 static void ipoib_add_one(struct ib_device
*device
)
1326 struct list_head
*dev_list
;
1327 struct net_device
*dev
;
1328 struct ipoib_dev_priv
*priv
;
1331 if (rdma_node_get_transport(device
->node_type
) != RDMA_TRANSPORT_IB
)
1334 dev_list
= kmalloc(sizeof *dev_list
, GFP_KERNEL
);
1338 INIT_LIST_HEAD(dev_list
);
1340 if (device
->node_type
== RDMA_NODE_IB_SWITCH
) {
1345 e
= device
->phys_port_cnt
;
1348 for (p
= s
; p
<= e
; ++p
) {
1349 dev
= ipoib_add_port("ib%d", device
, p
);
1351 priv
= netdev_priv(dev
);
1352 list_add_tail(&priv
->list
, dev_list
);
1356 ib_set_client_data(device
, &ipoib_client
, dev_list
);
1359 static void ipoib_remove_one(struct ib_device
*device
)
1361 struct ipoib_dev_priv
*priv
, *tmp
;
1362 struct list_head
*dev_list
;
1364 if (rdma_node_get_transport(device
->node_type
) != RDMA_TRANSPORT_IB
)
1367 dev_list
= ib_get_client_data(device
, &ipoib_client
);
1369 list_for_each_entry_safe(priv
, tmp
, dev_list
, list
) {
1370 ib_unregister_event_handler(&priv
->event_handler
);
1373 dev_change_flags(priv
->dev
, priv
->dev
->flags
& ~IFF_UP
);
1376 flush_workqueue(ipoib_workqueue
);
1378 unregister_netdev(priv
->dev
);
1379 ipoib_dev_cleanup(priv
->dev
);
1380 free_netdev(priv
->dev
);
1386 static int __init
ipoib_init_module(void)
1390 ipoib_recvq_size
= roundup_pow_of_two(ipoib_recvq_size
);
1391 ipoib_recvq_size
= min(ipoib_recvq_size
, IPOIB_MAX_QUEUE_SIZE
);
1392 ipoib_recvq_size
= max(ipoib_recvq_size
, IPOIB_MIN_QUEUE_SIZE
);
1394 ipoib_sendq_size
= roundup_pow_of_two(ipoib_sendq_size
);
1395 ipoib_sendq_size
= min(ipoib_sendq_size
, IPOIB_MAX_QUEUE_SIZE
);
1396 ipoib_sendq_size
= max(ipoib_sendq_size
, max(2 * MAX_SEND_CQE
,
1397 IPOIB_MIN_QUEUE_SIZE
));
1398 #ifdef CONFIG_INFINIBAND_IPOIB_CM
1399 ipoib_max_conn_qp
= min(ipoib_max_conn_qp
, IPOIB_CM_MAX_CONN_QP
);
1403 * When copying small received packets, we only copy from the
1404 * linear data part of the SKB, so we rely on this condition.
1406 BUILD_BUG_ON(IPOIB_CM_COPYBREAK
> IPOIB_CM_HEAD_SIZE
);
1408 ret
= ipoib_register_debugfs();
1413 * We create our own workqueue mainly because we want to be
1414 * able to flush it when devices are being removed. We can't
1415 * use schedule_work()/flush_scheduled_work() because both
1416 * unregister_netdev() and linkwatch_event take the rtnl lock,
1417 * so flush_scheduled_work() can deadlock during device
1420 ipoib_workqueue
= create_singlethread_workqueue("ipoib");
1421 if (!ipoib_workqueue
) {
1426 ib_sa_register_client(&ipoib_sa_client
);
1428 ret
= ib_register_client(&ipoib_client
);
1435 ib_sa_unregister_client(&ipoib_sa_client
);
1436 destroy_workqueue(ipoib_workqueue
);
1439 ipoib_unregister_debugfs();
1444 static void __exit
ipoib_cleanup_module(void)
1446 ib_unregister_client(&ipoib_client
);
1447 ib_sa_unregister_client(&ipoib_sa_client
);
1448 ipoib_unregister_debugfs();
1449 destroy_workqueue(ipoib_workqueue
);
1452 module_init(ipoib_init_module
);
1453 module_exit(ipoib_cleanup_module
);