2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
44 #include <linux/if_arp.h> /* For ARPHRD_xxx */
49 #include <linux/jhash.h>
51 #include <net/addrconf.h>
52 #include <linux/inetdevice.h>
53 #include <rdma/ib_cache.h>
55 #define DRV_VERSION "1.0.0"
57 const char ipoib_driver_version
[] = DRV_VERSION
;
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
61 MODULE_LICENSE("Dual BSD/GPL");
63 int ipoib_sendq_size __read_mostly
= IPOIB_TX_RING_SIZE
;
64 int ipoib_recvq_size __read_mostly
= IPOIB_RX_RING_SIZE
;
66 module_param_named(send_queue_size
, ipoib_sendq_size
, int, 0444);
67 MODULE_PARM_DESC(send_queue_size
, "Number of descriptors in send queue");
68 module_param_named(recv_queue_size
, ipoib_recvq_size
, int, 0444);
69 MODULE_PARM_DESC(recv_queue_size
, "Number of descriptors in receive queue");
71 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
72 int ipoib_debug_level
;
74 module_param_named(debug_level
, ipoib_debug_level
, int, 0644);
75 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0");
78 struct ipoib_path_iter
{
79 struct net_device
*dev
;
80 struct ipoib_path path
;
83 static const u8 ipv4_bcast_addr
[] = {
84 0x00, 0xff, 0xff, 0xff,
85 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
86 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
89 struct workqueue_struct
*ipoib_workqueue
;
91 struct ib_sa_client ipoib_sa_client
;
93 static void ipoib_add_one(struct ib_device
*device
);
94 static void ipoib_remove_one(struct ib_device
*device
, void *client_data
);
95 static void ipoib_neigh_reclaim(struct rcu_head
*rp
);
96 static struct net_device
*ipoib_get_net_dev_by_params(
97 struct ib_device
*dev
, u8 port
, u16 pkey
,
98 const union ib_gid
*gid
, const struct sockaddr
*addr
,
100 static int ipoib_set_mac(struct net_device
*dev
, void *addr
);
101 static int ipoib_ioctl(struct net_device
*dev
, struct ifreq
*ifr
,
104 static struct ib_client ipoib_client
= {
106 .add
= ipoib_add_one
,
107 .remove
= ipoib_remove_one
,
108 .get_net_dev_by_params
= ipoib_get_net_dev_by_params
,
111 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
112 static int ipoib_netdev_event(struct notifier_block
*this,
113 unsigned long event
, void *ptr
)
115 struct netdev_notifier_info
*ni
= ptr
;
116 struct net_device
*dev
= ni
->dev
;
118 if (dev
->netdev_ops
->ndo_open
!= ipoib_open
)
122 case NETDEV_REGISTER
:
123 ipoib_create_debug_files(dev
);
125 case NETDEV_CHANGENAME
:
126 ipoib_delete_debug_files(dev
);
127 ipoib_create_debug_files(dev
);
129 case NETDEV_UNREGISTER
:
130 ipoib_delete_debug_files(dev
);
138 int ipoib_open(struct net_device
*dev
)
140 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
142 ipoib_dbg(priv
, "bringing up interface\n");
144 netif_carrier_off(dev
);
146 set_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
);
148 priv
->sm_fullmember_sendonly_support
= false;
150 if (ipoib_ib_dev_open(dev
)) {
151 if (!test_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
))
156 ipoib_ib_dev_up(dev
);
158 if (!test_bit(IPOIB_FLAG_SUBINTERFACE
, &priv
->flags
)) {
159 struct ipoib_dev_priv
*cpriv
;
161 /* Bring up any child interfaces too */
162 down_read(&priv
->vlan_rwsem
);
163 list_for_each_entry(cpriv
, &priv
->child_intfs
, list
) {
166 flags
= cpriv
->dev
->flags
;
170 dev_change_flags(cpriv
->dev
, flags
| IFF_UP
);
172 up_read(&priv
->vlan_rwsem
);
175 netif_start_queue(dev
);
180 clear_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
);
185 static int ipoib_stop(struct net_device
*dev
)
187 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
189 ipoib_dbg(priv
, "stopping interface\n");
191 clear_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
);
193 netif_stop_queue(dev
);
195 ipoib_ib_dev_down(dev
);
196 ipoib_ib_dev_stop(dev
);
198 if (!test_bit(IPOIB_FLAG_SUBINTERFACE
, &priv
->flags
)) {
199 struct ipoib_dev_priv
*cpriv
;
201 /* Bring down any child interfaces too */
202 down_read(&priv
->vlan_rwsem
);
203 list_for_each_entry(cpriv
, &priv
->child_intfs
, list
) {
206 flags
= cpriv
->dev
->flags
;
207 if (!(flags
& IFF_UP
))
210 dev_change_flags(cpriv
->dev
, flags
& ~IFF_UP
);
212 up_read(&priv
->vlan_rwsem
);
218 static void ipoib_uninit(struct net_device
*dev
)
220 ipoib_dev_cleanup(dev
);
223 static netdev_features_t
ipoib_fix_features(struct net_device
*dev
, netdev_features_t features
)
225 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
227 if (test_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
))
228 features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_TSO
);
233 static int ipoib_change_mtu(struct net_device
*dev
, int new_mtu
)
235 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
238 /* dev->mtu > 2K ==> connected mode */
239 if (ipoib_cm_admin_enabled(dev
)) {
240 if (new_mtu
> ipoib_cm_max_mtu(dev
))
243 if (new_mtu
> priv
->mcast_mtu
)
244 ipoib_warn(priv
, "mtu > %d will cause multicast packet drops.\n",
251 if (new_mtu
> IPOIB_UD_MTU(priv
->max_ib_mtu
))
254 priv
->admin_mtu
= new_mtu
;
256 if (priv
->mcast_mtu
< priv
->admin_mtu
)
257 ipoib_dbg(priv
, "MTU must be smaller than the underlying "
258 "link layer MTU - 4 (%u)\n", priv
->mcast_mtu
);
260 new_mtu
= min(priv
->mcast_mtu
, priv
->admin_mtu
);
262 if (priv
->rn_ops
->ndo_change_mtu
) {
263 bool carrier_status
= netif_carrier_ok(dev
);
265 netif_carrier_off(dev
);
267 /* notify lower level on the real mtu */
268 ret
= priv
->rn_ops
->ndo_change_mtu(dev
, new_mtu
);
271 netif_carrier_on(dev
);
279 static void ipoib_get_stats(struct net_device
*dev
,
280 struct rtnl_link_stats64
*stats
)
282 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
284 if (priv
->rn_ops
->ndo_get_stats64
)
285 priv
->rn_ops
->ndo_get_stats64(dev
, stats
);
287 netdev_stats_to_stats64(stats
, &dev
->stats
);
290 /* Called with an RCU read lock taken */
291 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr
*addr
,
292 struct net_device
*dev
)
294 struct net
*net
= dev_net(dev
);
295 struct in_device
*in_dev
;
296 struct sockaddr_in
*addr_in
= (struct sockaddr_in
*)addr
;
297 struct sockaddr_in6
*addr_in6
= (struct sockaddr_in6
*)addr
;
300 switch (addr
->sa_family
) {
302 in_dev
= in_dev_get(dev
);
306 ret_addr
= inet_confirm_addr(net
, in_dev
, 0,
307 addr_in
->sin_addr
.s_addr
,
315 if (IS_ENABLED(CONFIG_IPV6
) &&
316 ipv6_chk_addr(net
, &addr_in6
->sin6_addr
, dev
, 1))
325 * Find the master net_device on top of the given net_device.
326 * @dev: base IPoIB net_device
328 * Returns the master net_device with a reference held, or the same net_device
329 * if no master exists.
331 static struct net_device
*ipoib_get_master_net_dev(struct net_device
*dev
)
333 struct net_device
*master
;
336 master
= netdev_master_upper_dev_get_rcu(dev
);
348 struct ipoib_walk_data
{
349 const struct sockaddr
*addr
;
350 struct net_device
*result
;
353 static int ipoib_upper_walk(struct net_device
*upper
, void *_data
)
355 struct ipoib_walk_data
*data
= _data
;
358 if (ipoib_is_dev_match_addr_rcu(data
->addr
, upper
)) {
360 data
->result
= upper
;
368 * Find a net_device matching the given address, which is an upper device of
369 * the given net_device.
370 * @addr: IP address to look for.
371 * @dev: base IPoIB net_device
373 * If found, returns the net_device with a reference held. Otherwise return
376 static struct net_device
*ipoib_get_net_dev_match_addr(
377 const struct sockaddr
*addr
, struct net_device
*dev
)
379 struct ipoib_walk_data data
= {
384 if (ipoib_is_dev_match_addr_rcu(addr
, dev
)) {
390 netdev_walk_all_upper_dev_rcu(dev
, ipoib_upper_walk
, &data
);
396 /* returns the number of IPoIB netdevs on top a given ipoib device matching a
397 * pkey_index and address, if one exists.
399 * @found_net_dev: contains a matching net_device if the return value >= 1,
400 * with a reference held. */
401 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv
*priv
,
402 const union ib_gid
*gid
,
404 const struct sockaddr
*addr
,
406 struct net_device
**found_net_dev
)
408 struct ipoib_dev_priv
*child_priv
;
409 struct net_device
*net_dev
= NULL
;
412 if (priv
->pkey_index
== pkey_index
&&
413 (!gid
|| !memcmp(gid
, &priv
->local_gid
, sizeof(*gid
)))) {
415 net_dev
= ipoib_get_master_net_dev(priv
->dev
);
417 /* Verify the net_device matches the IP address, as
418 * IPoIB child devices currently share a GID. */
419 net_dev
= ipoib_get_net_dev_match_addr(addr
, priv
->dev
);
423 *found_net_dev
= net_dev
;
430 /* Check child interfaces */
431 down_read_nested(&priv
->vlan_rwsem
, nesting
);
432 list_for_each_entry(child_priv
, &priv
->child_intfs
, list
) {
433 matches
+= ipoib_match_gid_pkey_addr(child_priv
, gid
,
440 up_read(&priv
->vlan_rwsem
);
445 /* Returns the number of matching net_devs found (between 0 and 2). Also
446 * return the matching net_device in the @net_dev parameter, holding a
447 * reference to the net_device, if the number of matches >= 1 */
448 static int __ipoib_get_net_dev_by_params(struct list_head
*dev_list
, u8 port
,
450 const union ib_gid
*gid
,
451 const struct sockaddr
*addr
,
452 struct net_device
**net_dev
)
454 struct ipoib_dev_priv
*priv
;
459 list_for_each_entry(priv
, dev_list
, list
) {
460 if (priv
->port
!= port
)
463 matches
+= ipoib_match_gid_pkey_addr(priv
, gid
, pkey_index
,
472 static struct net_device
*ipoib_get_net_dev_by_params(
473 struct ib_device
*dev
, u8 port
, u16 pkey
,
474 const union ib_gid
*gid
, const struct sockaddr
*addr
,
477 struct net_device
*net_dev
;
478 struct list_head
*dev_list
= client_data
;
483 if (!rdma_protocol_ib(dev
, port
))
486 ret
= ib_find_cached_pkey(dev
, port
, pkey
, &pkey_index
);
493 /* See if we can find a unique device matching the L2 parameters */
494 matches
= __ipoib_get_net_dev_by_params(dev_list
, port
, pkey_index
,
495 gid
, NULL
, &net_dev
);
506 /* Couldn't find a unique device with L2 parameters only. Use L3
507 * address to uniquely match the net device */
508 matches
= __ipoib_get_net_dev_by_params(dev_list
, port
, pkey_index
,
509 gid
, addr
, &net_dev
);
514 dev_warn_ratelimited(&dev
->dev
,
515 "duplicate IP address detected\n");
522 int ipoib_set_mode(struct net_device
*dev
, const char *buf
)
524 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
526 if ((test_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
) &&
527 !strcmp(buf
, "connected\n")) ||
528 (!test_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
) &&
529 !strcmp(buf
, "datagram\n"))) {
533 /* flush paths if we switch modes so that connections are restarted */
534 if (IPOIB_CM_SUPPORTED(dev
->dev_addr
) && !strcmp(buf
, "connected\n")) {
535 set_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
);
536 ipoib_warn(priv
, "enabling connected mode "
537 "will cause multicast packet drops\n");
538 netdev_update_features(dev
);
539 dev_set_mtu(dev
, ipoib_cm_max_mtu(dev
));
541 priv
->tx_wr
.wr
.send_flags
&= ~IB_SEND_IP_CSUM
;
543 ipoib_flush_paths(dev
);
544 return (!rtnl_trylock()) ? -EBUSY
: 0;
547 if (!strcmp(buf
, "datagram\n")) {
548 clear_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
);
549 netdev_update_features(dev
);
550 dev_set_mtu(dev
, min(priv
->mcast_mtu
, dev
->mtu
));
552 ipoib_flush_paths(dev
);
553 return (!rtnl_trylock()) ? -EBUSY
: 0;
559 struct ipoib_path
*__path_find(struct net_device
*dev
, void *gid
)
561 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
562 struct rb_node
*n
= priv
->path_tree
.rb_node
;
563 struct ipoib_path
*path
;
567 path
= rb_entry(n
, struct ipoib_path
, rb_node
);
569 ret
= memcmp(gid
, path
->pathrec
.dgid
.raw
,
570 sizeof (union ib_gid
));
583 static int __path_add(struct net_device
*dev
, struct ipoib_path
*path
)
585 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
586 struct rb_node
**n
= &priv
->path_tree
.rb_node
;
587 struct rb_node
*pn
= NULL
;
588 struct ipoib_path
*tpath
;
593 tpath
= rb_entry(pn
, struct ipoib_path
, rb_node
);
595 ret
= memcmp(path
->pathrec
.dgid
.raw
, tpath
->pathrec
.dgid
.raw
,
596 sizeof (union ib_gid
));
605 rb_link_node(&path
->rb_node
, pn
, n
);
606 rb_insert_color(&path
->rb_node
, &priv
->path_tree
);
608 list_add_tail(&path
->list
, &priv
->path_list
);
613 static void path_free(struct net_device
*dev
, struct ipoib_path
*path
)
617 while ((skb
= __skb_dequeue(&path
->queue
)))
618 dev_kfree_skb_irq(skb
);
620 ipoib_dbg(ipoib_priv(dev
), "path_free\n");
622 /* remove all neigh connected to this path */
623 ipoib_del_neighs_by_gid(dev
, path
->pathrec
.dgid
.raw
);
626 ipoib_put_ah(path
->ah
);
631 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
633 struct ipoib_path_iter
*ipoib_path_iter_init(struct net_device
*dev
)
635 struct ipoib_path_iter
*iter
;
637 iter
= kmalloc(sizeof *iter
, GFP_KERNEL
);
642 memset(iter
->path
.pathrec
.dgid
.raw
, 0, 16);
644 if (ipoib_path_iter_next(iter
)) {
652 int ipoib_path_iter_next(struct ipoib_path_iter
*iter
)
654 struct ipoib_dev_priv
*priv
= ipoib_priv(iter
->dev
);
656 struct ipoib_path
*path
;
659 spin_lock_irq(&priv
->lock
);
661 n
= rb_first(&priv
->path_tree
);
664 path
= rb_entry(n
, struct ipoib_path
, rb_node
);
666 if (memcmp(iter
->path
.pathrec
.dgid
.raw
, path
->pathrec
.dgid
.raw
,
667 sizeof (union ib_gid
)) < 0) {
676 spin_unlock_irq(&priv
->lock
);
681 void ipoib_path_iter_read(struct ipoib_path_iter
*iter
,
682 struct ipoib_path
*path
)
687 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
689 void ipoib_mark_paths_invalid(struct net_device
*dev
)
691 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
692 struct ipoib_path
*path
, *tp
;
694 spin_lock_irq(&priv
->lock
);
696 list_for_each_entry_safe(path
, tp
, &priv
->path_list
, list
) {
697 ipoib_dbg(priv
, "mark path LID 0x%08x GID %pI6 invalid\n",
698 be32_to_cpu(sa_path_get_dlid(&path
->pathrec
)),
699 path
->pathrec
.dgid
.raw
);
703 spin_unlock_irq(&priv
->lock
);
706 static void push_pseudo_header(struct sk_buff
*skb
, const char *daddr
)
708 struct ipoib_pseudo_header
*phdr
;
710 phdr
= skb_push(skb
, sizeof(*phdr
));
711 memcpy(phdr
->hwaddr
, daddr
, INFINIBAND_ALEN
);
714 void ipoib_flush_paths(struct net_device
*dev
)
716 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
717 struct ipoib_path
*path
, *tp
;
718 LIST_HEAD(remove_list
);
721 netif_tx_lock_bh(dev
);
722 spin_lock_irqsave(&priv
->lock
, flags
);
724 list_splice_init(&priv
->path_list
, &remove_list
);
726 list_for_each_entry(path
, &remove_list
, list
)
727 rb_erase(&path
->rb_node
, &priv
->path_tree
);
729 list_for_each_entry_safe(path
, tp
, &remove_list
, list
) {
731 ib_sa_cancel_query(path
->query_id
, path
->query
);
732 spin_unlock_irqrestore(&priv
->lock
, flags
);
733 netif_tx_unlock_bh(dev
);
734 wait_for_completion(&path
->done
);
735 path_free(dev
, path
);
736 netif_tx_lock_bh(dev
);
737 spin_lock_irqsave(&priv
->lock
, flags
);
740 spin_unlock_irqrestore(&priv
->lock
, flags
);
741 netif_tx_unlock_bh(dev
);
744 static void path_rec_completion(int status
,
745 struct sa_path_rec
*pathrec
,
748 struct ipoib_path
*path
= path_ptr
;
749 struct net_device
*dev
= path
->dev
;
750 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
751 struct ipoib_ah
*ah
= NULL
;
752 struct ipoib_ah
*old_ah
= NULL
;
753 struct ipoib_neigh
*neigh
, *tn
;
754 struct sk_buff_head skqueue
;
759 ipoib_dbg(priv
, "PathRec LID 0x%04x for GID %pI6\n",
760 be32_to_cpu(sa_path_get_dlid(pathrec
)),
763 ipoib_dbg(priv
, "PathRec status %d for GID %pI6\n",
764 status
, path
->pathrec
.dgid
.raw
);
766 skb_queue_head_init(&skqueue
);
769 struct rdma_ah_attr av
;
771 if (!ib_init_ah_from_path(priv
->ca
, priv
->port
, pathrec
, &av
))
772 ah
= ipoib_create_ah(dev
, priv
->pd
, &av
);
775 spin_lock_irqsave(&priv
->lock
, flags
);
777 if (!IS_ERR_OR_NULL(ah
)) {
778 path
->pathrec
= *pathrec
;
783 ipoib_dbg(priv
, "created address handle %p for LID 0x%04x, SL %d\n",
784 ah
, be32_to_cpu(sa_path_get_dlid(pathrec
)),
787 while ((skb
= __skb_dequeue(&path
->queue
)))
788 __skb_queue_tail(&skqueue
, skb
);
790 list_for_each_entry_safe(neigh
, tn
, &path
->neigh_list
, list
) {
792 WARN_ON(neigh
->ah
!= old_ah
);
794 * Dropping the ah reference inside
795 * priv->lock is safe here, because we
796 * will hold one more reference from
797 * the original value of path->ah (ie
800 ipoib_put_ah(neigh
->ah
);
802 kref_get(&path
->ah
->ref
);
803 neigh
->ah
= path
->ah
;
805 if (ipoib_cm_enabled(dev
, neigh
->daddr
)) {
806 if (!ipoib_cm_get(neigh
))
807 ipoib_cm_set(neigh
, ipoib_cm_create_tx(dev
,
810 if (!ipoib_cm_get(neigh
)) {
811 ipoib_neigh_free(neigh
);
816 while ((skb
= __skb_dequeue(&neigh
->queue
)))
817 __skb_queue_tail(&skqueue
, skb
);
823 complete(&path
->done
);
825 spin_unlock_irqrestore(&priv
->lock
, flags
);
827 if (IS_ERR_OR_NULL(ah
))
828 ipoib_del_neighs_by_gid(dev
, path
->pathrec
.dgid
.raw
);
831 ipoib_put_ah(old_ah
);
833 while ((skb
= __skb_dequeue(&skqueue
))) {
836 ret
= dev_queue_xmit(skb
);
838 ipoib_warn(priv
, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n",
843 static struct ipoib_path
*path_rec_create(struct net_device
*dev
, void *gid
)
845 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
846 struct ipoib_path
*path
;
848 if (!priv
->broadcast
)
851 path
= kzalloc(sizeof *path
, GFP_ATOMIC
);
857 skb_queue_head_init(&path
->queue
);
859 INIT_LIST_HEAD(&path
->neigh_list
);
861 if (rdma_cap_opa_ah(priv
->ca
, priv
->port
))
862 path
->pathrec
.rec_type
= SA_PATH_REC_TYPE_OPA
;
864 path
->pathrec
.rec_type
= SA_PATH_REC_TYPE_IB
;
865 memcpy(path
->pathrec
.dgid
.raw
, gid
, sizeof (union ib_gid
));
866 path
->pathrec
.sgid
= priv
->local_gid
;
867 path
->pathrec
.pkey
= cpu_to_be16(priv
->pkey
);
868 path
->pathrec
.numb_path
= 1;
869 path
->pathrec
.traffic_class
= priv
->broadcast
->mcmember
.traffic_class
;
874 static int path_rec_start(struct net_device
*dev
,
875 struct ipoib_path
*path
)
877 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
879 ipoib_dbg(priv
, "Start path record lookup for %pI6\n",
880 path
->pathrec
.dgid
.raw
);
882 init_completion(&path
->done
);
885 ib_sa_path_rec_get(&ipoib_sa_client
, priv
->ca
, priv
->port
,
887 IB_SA_PATH_REC_DGID
|
888 IB_SA_PATH_REC_SGID
|
889 IB_SA_PATH_REC_NUMB_PATH
|
890 IB_SA_PATH_REC_TRAFFIC_CLASS
|
895 if (path
->query_id
< 0) {
896 ipoib_warn(priv
, "ib_sa_path_rec_get failed: %d\n", path
->query_id
);
898 complete(&path
->done
);
899 return path
->query_id
;
905 static void neigh_add_path(struct sk_buff
*skb
, u8
*daddr
,
906 struct net_device
*dev
)
908 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
909 struct rdma_netdev
*rn
= netdev_priv(dev
);
910 struct ipoib_path
*path
;
911 struct ipoib_neigh
*neigh
;
914 spin_lock_irqsave(&priv
->lock
, flags
);
915 neigh
= ipoib_neigh_alloc(daddr
, dev
);
917 spin_unlock_irqrestore(&priv
->lock
, flags
);
918 ++dev
->stats
.tx_dropped
;
919 dev_kfree_skb_any(skb
);
923 path
= __path_find(dev
, daddr
+ 4);
925 path
= path_rec_create(dev
, daddr
+ 4);
929 __path_add(dev
, path
);
932 list_add_tail(&neigh
->list
, &path
->neigh_list
);
935 kref_get(&path
->ah
->ref
);
936 neigh
->ah
= path
->ah
;
938 if (ipoib_cm_enabled(dev
, neigh
->daddr
)) {
939 if (!ipoib_cm_get(neigh
))
940 ipoib_cm_set(neigh
, ipoib_cm_create_tx(dev
, path
, neigh
));
941 if (!ipoib_cm_get(neigh
)) {
942 ipoib_neigh_free(neigh
);
945 if (skb_queue_len(&neigh
->queue
) <
946 IPOIB_MAX_PATH_REC_QUEUE
) {
947 push_pseudo_header(skb
, neigh
->daddr
);
948 __skb_queue_tail(&neigh
->queue
, skb
);
950 ipoib_warn(priv
, "queue length limit %d. Packet drop.\n",
951 skb_queue_len(&neigh
->queue
));
955 spin_unlock_irqrestore(&priv
->lock
, flags
);
956 path
->ah
->last_send
= rn
->send(dev
, skb
, path
->ah
->ah
,
958 ipoib_neigh_put(neigh
);
964 if (!path
->query
&& path_rec_start(dev
, path
))
966 if (skb_queue_len(&neigh
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
) {
967 push_pseudo_header(skb
, neigh
->daddr
);
968 __skb_queue_tail(&neigh
->queue
, skb
);
974 spin_unlock_irqrestore(&priv
->lock
, flags
);
975 ipoib_neigh_put(neigh
);
979 ipoib_neigh_free(neigh
);
981 ++dev
->stats
.tx_dropped
;
982 dev_kfree_skb_any(skb
);
984 spin_unlock_irqrestore(&priv
->lock
, flags
);
985 ipoib_neigh_put(neigh
);
988 static void unicast_arp_send(struct sk_buff
*skb
, struct net_device
*dev
,
989 struct ipoib_pseudo_header
*phdr
)
991 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
992 struct rdma_netdev
*rn
= netdev_priv(dev
);
993 struct ipoib_path
*path
;
996 spin_lock_irqsave(&priv
->lock
, flags
);
998 path
= __path_find(dev
, phdr
->hwaddr
+ 4);
999 if (!path
|| !path
->valid
) {
1003 path
= path_rec_create(dev
, phdr
->hwaddr
+ 4);
1007 if (skb_queue_len(&path
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
) {
1008 push_pseudo_header(skb
, phdr
->hwaddr
);
1009 __skb_queue_tail(&path
->queue
, skb
);
1011 ++dev
->stats
.tx_dropped
;
1012 dev_kfree_skb_any(skb
);
1015 if (!path
->query
&& path_rec_start(dev
, path
)) {
1016 spin_unlock_irqrestore(&priv
->lock
, flags
);
1018 path_free(dev
, path
);
1021 __path_add(dev
, path
);
1023 ++dev
->stats
.tx_dropped
;
1024 dev_kfree_skb_any(skb
);
1027 spin_unlock_irqrestore(&priv
->lock
, flags
);
1032 ipoib_dbg(priv
, "Send unicast ARP to %08x\n",
1033 be32_to_cpu(sa_path_get_dlid(&path
->pathrec
)));
1035 spin_unlock_irqrestore(&priv
->lock
, flags
);
1036 path
->ah
->last_send
= rn
->send(dev
, skb
, path
->ah
->ah
,
1037 IPOIB_QPN(phdr
->hwaddr
));
1039 } else if ((path
->query
|| !path_rec_start(dev
, path
)) &&
1040 skb_queue_len(&path
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
) {
1041 push_pseudo_header(skb
, phdr
->hwaddr
);
1042 __skb_queue_tail(&path
->queue
, skb
);
1044 ++dev
->stats
.tx_dropped
;
1045 dev_kfree_skb_any(skb
);
1048 spin_unlock_irqrestore(&priv
->lock
, flags
);
1051 static int ipoib_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1053 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1054 struct rdma_netdev
*rn
= netdev_priv(dev
);
1055 struct ipoib_neigh
*neigh
;
1056 struct ipoib_pseudo_header
*phdr
;
1057 struct ipoib_header
*header
;
1058 unsigned long flags
;
1060 phdr
= (struct ipoib_pseudo_header
*) skb
->data
;
1061 skb_pull(skb
, sizeof(*phdr
));
1062 header
= (struct ipoib_header
*) skb
->data
;
1064 if (unlikely(phdr
->hwaddr
[4] == 0xff)) {
1065 /* multicast, arrange "if" according to probability */
1066 if ((header
->proto
!= htons(ETH_P_IP
)) &&
1067 (header
->proto
!= htons(ETH_P_IPV6
)) &&
1068 (header
->proto
!= htons(ETH_P_ARP
)) &&
1069 (header
->proto
!= htons(ETH_P_RARP
)) &&
1070 (header
->proto
!= htons(ETH_P_TIPC
))) {
1071 /* ethertype not supported by IPoIB */
1072 ++dev
->stats
.tx_dropped
;
1073 dev_kfree_skb_any(skb
);
1074 return NETDEV_TX_OK
;
1076 /* Add in the P_Key for multicast*/
1077 phdr
->hwaddr
[8] = (priv
->pkey
>> 8) & 0xff;
1078 phdr
->hwaddr
[9] = priv
->pkey
& 0xff;
1080 neigh
= ipoib_neigh_get(dev
, phdr
->hwaddr
);
1082 goto send_using_neigh
;
1083 ipoib_mcast_send(dev
, phdr
->hwaddr
, skb
);
1084 return NETDEV_TX_OK
;
1087 /* unicast, arrange "switch" according to probability */
1088 switch (header
->proto
) {
1089 case htons(ETH_P_IP
):
1090 case htons(ETH_P_IPV6
):
1091 case htons(ETH_P_TIPC
):
1092 neigh
= ipoib_neigh_get(dev
, phdr
->hwaddr
);
1093 if (unlikely(!neigh
)) {
1094 neigh_add_path(skb
, phdr
->hwaddr
, dev
);
1095 return NETDEV_TX_OK
;
1098 case htons(ETH_P_ARP
):
1099 case htons(ETH_P_RARP
):
1100 /* for unicast ARP and RARP should always perform path find */
1101 unicast_arp_send(skb
, dev
, phdr
);
1102 return NETDEV_TX_OK
;
1104 /* ethertype not supported by IPoIB */
1105 ++dev
->stats
.tx_dropped
;
1106 dev_kfree_skb_any(skb
);
1107 return NETDEV_TX_OK
;
1111 /* note we now hold a ref to neigh */
1112 if (ipoib_cm_get(neigh
)) {
1113 if (ipoib_cm_up(neigh
)) {
1114 ipoib_cm_send(dev
, skb
, ipoib_cm_get(neigh
));
1117 } else if (neigh
->ah
) {
1118 neigh
->ah
->last_send
= rn
->send(dev
, skb
, neigh
->ah
->ah
,
1119 IPOIB_QPN(phdr
->hwaddr
));
1123 if (skb_queue_len(&neigh
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
) {
1124 push_pseudo_header(skb
, phdr
->hwaddr
);
1125 spin_lock_irqsave(&priv
->lock
, flags
);
1126 __skb_queue_tail(&neigh
->queue
, skb
);
1127 spin_unlock_irqrestore(&priv
->lock
, flags
);
1129 ++dev
->stats
.tx_dropped
;
1130 dev_kfree_skb_any(skb
);
1134 ipoib_neigh_put(neigh
);
1136 return NETDEV_TX_OK
;
1139 static void ipoib_timeout(struct net_device
*dev
)
1141 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1143 ipoib_warn(priv
, "transmit timeout: latency %d msecs\n",
1144 jiffies_to_msecs(jiffies
- dev_trans_start(dev
)));
1145 ipoib_warn(priv
, "queue stopped %d, tx_head %u, tx_tail %u\n",
1146 netif_queue_stopped(dev
),
1147 priv
->tx_head
, priv
->tx_tail
);
1148 /* XXX reset QP, etc. */
1151 static int ipoib_hard_header(struct sk_buff
*skb
,
1152 struct net_device
*dev
,
1153 unsigned short type
,
1154 const void *daddr
, const void *saddr
, unsigned len
)
1156 struct ipoib_header
*header
;
1158 header
= skb_push(skb
, sizeof *header
);
1160 header
->proto
= htons(type
);
1161 header
->reserved
= 0;
1164 * we don't rely on dst_entry structure, always stuff the
1165 * destination address into skb hard header so we can figure out where
1166 * to send the packet later.
1168 push_pseudo_header(skb
, daddr
);
1170 return IPOIB_HARD_LEN
;
1173 static void ipoib_set_mcast_list(struct net_device
*dev
)
1175 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1177 if (!test_bit(IPOIB_FLAG_OPER_UP
, &priv
->flags
)) {
1178 ipoib_dbg(priv
, "IPOIB_FLAG_OPER_UP not set");
1182 queue_work(priv
->wq
, &priv
->restart_task
);
1185 static int ipoib_get_iflink(const struct net_device
*dev
)
1187 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1189 /* parent interface */
1190 if (!test_bit(IPOIB_FLAG_SUBINTERFACE
, &priv
->flags
))
1191 return dev
->ifindex
;
1193 /* child/vlan interface */
1194 return priv
->parent
->ifindex
;
1197 static u32
ipoib_addr_hash(struct ipoib_neigh_hash
*htbl
, u8
*daddr
)
1200 * Use only the address parts that contributes to spreading
1201 * The subnet prefix is not used as one can not connect to
1202 * same remote port (GUID) using the same remote QPN via two
1203 * different subnets.
1205 /* qpn octets[1:4) & port GUID octets[12:20) */
1206 u32
*d32
= (u32
*) daddr
;
1209 hv
= jhash_3words(d32
[3], d32
[4], IPOIB_QPN_MASK
& d32
[0], 0);
1210 return hv
& htbl
->mask
;
1213 struct ipoib_neigh
*ipoib_neigh_get(struct net_device
*dev
, u8
*daddr
)
1215 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1216 struct ipoib_neigh_table
*ntbl
= &priv
->ntbl
;
1217 struct ipoib_neigh_hash
*htbl
;
1218 struct ipoib_neigh
*neigh
= NULL
;
1223 htbl
= rcu_dereference_bh(ntbl
->htbl
);
1228 hash_val
= ipoib_addr_hash(htbl
, daddr
);
1229 for (neigh
= rcu_dereference_bh(htbl
->buckets
[hash_val
]);
1231 neigh
= rcu_dereference_bh(neigh
->hnext
)) {
1232 if (memcmp(daddr
, neigh
->daddr
, INFINIBAND_ALEN
) == 0) {
1233 /* found, take one ref on behalf of the caller */
1234 if (!atomic_inc_not_zero(&neigh
->refcnt
)) {
1240 if (likely(skb_queue_len(&neigh
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
))
1241 neigh
->alive
= jiffies
;
1247 rcu_read_unlock_bh();
1251 static void __ipoib_reap_neigh(struct ipoib_dev_priv
*priv
)
1253 struct ipoib_neigh_table
*ntbl
= &priv
->ntbl
;
1254 struct ipoib_neigh_hash
*htbl
;
1255 unsigned long neigh_obsolete
;
1257 unsigned long flags
;
1259 LIST_HEAD(remove_list
);
1261 if (test_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
))
1264 spin_lock_irqsave(&priv
->lock
, flags
);
1266 htbl
= rcu_dereference_protected(ntbl
->htbl
,
1267 lockdep_is_held(&priv
->lock
));
1272 /* neigh is obsolete if it was idle for two GC periods */
1273 dt
= 2 * arp_tbl
.gc_interval
;
1274 neigh_obsolete
= jiffies
- dt
;
1275 /* handle possible race condition */
1276 if (test_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
))
1279 for (i
= 0; i
< htbl
->size
; i
++) {
1280 struct ipoib_neigh
*neigh
;
1281 struct ipoib_neigh __rcu
**np
= &htbl
->buckets
[i
];
1283 while ((neigh
= rcu_dereference_protected(*np
,
1284 lockdep_is_held(&priv
->lock
))) != NULL
) {
1285 /* was the neigh idle for two GC periods */
1286 if (time_after(neigh_obsolete
, neigh
->alive
)) {
1288 ipoib_check_and_add_mcast_sendonly(priv
, neigh
->daddr
+ 4, &remove_list
);
1290 rcu_assign_pointer(*np
,
1291 rcu_dereference_protected(neigh
->hnext
,
1292 lockdep_is_held(&priv
->lock
)));
1293 /* remove from path/mc list */
1294 list_del_init(&neigh
->list
);
1295 call_rcu(&neigh
->rcu
, ipoib_neigh_reclaim
);
1304 spin_unlock_irqrestore(&priv
->lock
, flags
);
1305 ipoib_mcast_remove_list(&remove_list
);
1308 static void ipoib_reap_neigh(struct work_struct
*work
)
1310 struct ipoib_dev_priv
*priv
=
1311 container_of(work
, struct ipoib_dev_priv
, neigh_reap_task
.work
);
1313 __ipoib_reap_neigh(priv
);
1315 if (!test_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
))
1316 queue_delayed_work(priv
->wq
, &priv
->neigh_reap_task
,
1317 arp_tbl
.gc_interval
);
1321 static struct ipoib_neigh
*ipoib_neigh_ctor(u8
*daddr
,
1322 struct net_device
*dev
)
1324 struct ipoib_neigh
*neigh
;
1326 neigh
= kzalloc(sizeof *neigh
, GFP_ATOMIC
);
1331 memcpy(&neigh
->daddr
, daddr
, sizeof(neigh
->daddr
));
1332 skb_queue_head_init(&neigh
->queue
);
1333 INIT_LIST_HEAD(&neigh
->list
);
1334 ipoib_cm_set(neigh
, NULL
);
1335 /* one ref on behalf of the caller */
1336 atomic_set(&neigh
->refcnt
, 1);
1341 struct ipoib_neigh
*ipoib_neigh_alloc(u8
*daddr
,
1342 struct net_device
*dev
)
1344 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1345 struct ipoib_neigh_table
*ntbl
= &priv
->ntbl
;
1346 struct ipoib_neigh_hash
*htbl
;
1347 struct ipoib_neigh
*neigh
;
1350 htbl
= rcu_dereference_protected(ntbl
->htbl
,
1351 lockdep_is_held(&priv
->lock
));
1357 /* need to add a new neigh, but maybe some other thread succeeded?
1358 * recalc hash, maybe hash resize took place so we do a search
1360 hash_val
= ipoib_addr_hash(htbl
, daddr
);
1361 for (neigh
= rcu_dereference_protected(htbl
->buckets
[hash_val
],
1362 lockdep_is_held(&priv
->lock
));
1364 neigh
= rcu_dereference_protected(neigh
->hnext
,
1365 lockdep_is_held(&priv
->lock
))) {
1366 if (memcmp(daddr
, neigh
->daddr
, INFINIBAND_ALEN
) == 0) {
1367 /* found, take one ref on behalf of the caller */
1368 if (!atomic_inc_not_zero(&neigh
->refcnt
)) {
1373 neigh
->alive
= jiffies
;
1378 neigh
= ipoib_neigh_ctor(daddr
, dev
);
1382 /* one ref on behalf of the hash table */
1383 atomic_inc(&neigh
->refcnt
);
1384 neigh
->alive
= jiffies
;
1386 rcu_assign_pointer(neigh
->hnext
,
1387 rcu_dereference_protected(htbl
->buckets
[hash_val
],
1388 lockdep_is_held(&priv
->lock
)));
1389 rcu_assign_pointer(htbl
->buckets
[hash_val
], neigh
);
1390 atomic_inc(&ntbl
->entries
);
1397 void ipoib_neigh_dtor(struct ipoib_neigh
*neigh
)
1399 /* neigh reference count was dropprd to zero */
1400 struct net_device
*dev
= neigh
->dev
;
1401 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1402 struct sk_buff
*skb
;
1404 ipoib_put_ah(neigh
->ah
);
1405 while ((skb
= __skb_dequeue(&neigh
->queue
))) {
1406 ++dev
->stats
.tx_dropped
;
1407 dev_kfree_skb_any(skb
);
1409 if (ipoib_cm_get(neigh
))
1410 ipoib_cm_destroy_tx(ipoib_cm_get(neigh
));
1411 ipoib_dbg(ipoib_priv(dev
),
1412 "neigh free for %06x %pI6\n",
1413 IPOIB_QPN(neigh
->daddr
),
1416 if (atomic_dec_and_test(&priv
->ntbl
.entries
)) {
1417 if (test_bit(IPOIB_NEIGH_TBL_FLUSH
, &priv
->flags
))
1418 complete(&priv
->ntbl
.flushed
);
1422 static void ipoib_neigh_reclaim(struct rcu_head
*rp
)
1424 /* Called as a result of removal from hash table */
1425 struct ipoib_neigh
*neigh
= container_of(rp
, struct ipoib_neigh
, rcu
);
1426 /* note TX context may hold another ref */
1427 ipoib_neigh_put(neigh
);
1430 void ipoib_neigh_free(struct ipoib_neigh
*neigh
)
1432 struct net_device
*dev
= neigh
->dev
;
1433 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1434 struct ipoib_neigh_table
*ntbl
= &priv
->ntbl
;
1435 struct ipoib_neigh_hash
*htbl
;
1436 struct ipoib_neigh __rcu
**np
;
1437 struct ipoib_neigh
*n
;
1440 htbl
= rcu_dereference_protected(ntbl
->htbl
,
1441 lockdep_is_held(&priv
->lock
));
1445 hash_val
= ipoib_addr_hash(htbl
, neigh
->daddr
);
1446 np
= &htbl
->buckets
[hash_val
];
1447 for (n
= rcu_dereference_protected(*np
,
1448 lockdep_is_held(&priv
->lock
));
1450 n
= rcu_dereference_protected(*np
,
1451 lockdep_is_held(&priv
->lock
))) {
1454 rcu_assign_pointer(*np
,
1455 rcu_dereference_protected(neigh
->hnext
,
1456 lockdep_is_held(&priv
->lock
)));
1457 /* remove from parent list */
1458 list_del_init(&neigh
->list
);
1459 call_rcu(&neigh
->rcu
, ipoib_neigh_reclaim
);
1467 static int ipoib_neigh_hash_init(struct ipoib_dev_priv
*priv
)
1469 struct ipoib_neigh_table
*ntbl
= &priv
->ntbl
;
1470 struct ipoib_neigh_hash
*htbl
;
1471 struct ipoib_neigh __rcu
**buckets
;
1474 clear_bit(IPOIB_NEIGH_TBL_FLUSH
, &priv
->flags
);
1476 htbl
= kzalloc(sizeof(*htbl
), GFP_KERNEL
);
1479 set_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
);
1480 size
= roundup_pow_of_two(arp_tbl
.gc_thresh3
);
1481 buckets
= kzalloc(size
* sizeof(*buckets
), GFP_KERNEL
);
1487 htbl
->mask
= (size
- 1);
1488 htbl
->buckets
= buckets
;
1489 RCU_INIT_POINTER(ntbl
->htbl
, htbl
);
1491 atomic_set(&ntbl
->entries
, 0);
1493 /* start garbage collection */
1494 clear_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
);
1495 queue_delayed_work(priv
->wq
, &priv
->neigh_reap_task
,
1496 arp_tbl
.gc_interval
);
1501 static void neigh_hash_free_rcu(struct rcu_head
*head
)
1503 struct ipoib_neigh_hash
*htbl
= container_of(head
,
1504 struct ipoib_neigh_hash
,
1506 struct ipoib_neigh __rcu
**buckets
= htbl
->buckets
;
1507 struct ipoib_neigh_table
*ntbl
= htbl
->ntbl
;
1511 complete(&ntbl
->deleted
);
1514 void ipoib_del_neighs_by_gid(struct net_device
*dev
, u8
*gid
)
1516 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1517 struct ipoib_neigh_table
*ntbl
= &priv
->ntbl
;
1518 struct ipoib_neigh_hash
*htbl
;
1519 unsigned long flags
;
1522 /* remove all neigh connected to a given path or mcast */
1523 spin_lock_irqsave(&priv
->lock
, flags
);
1525 htbl
= rcu_dereference_protected(ntbl
->htbl
,
1526 lockdep_is_held(&priv
->lock
));
1531 for (i
= 0; i
< htbl
->size
; i
++) {
1532 struct ipoib_neigh
*neigh
;
1533 struct ipoib_neigh __rcu
**np
= &htbl
->buckets
[i
];
1535 while ((neigh
= rcu_dereference_protected(*np
,
1536 lockdep_is_held(&priv
->lock
))) != NULL
) {
1537 /* delete neighs belong to this parent */
1538 if (!memcmp(gid
, neigh
->daddr
+ 4, sizeof (union ib_gid
))) {
1539 rcu_assign_pointer(*np
,
1540 rcu_dereference_protected(neigh
->hnext
,
1541 lockdep_is_held(&priv
->lock
)));
1542 /* remove from parent list */
1543 list_del_init(&neigh
->list
);
1544 call_rcu(&neigh
->rcu
, ipoib_neigh_reclaim
);
1552 spin_unlock_irqrestore(&priv
->lock
, flags
);
1555 static void ipoib_flush_neighs(struct ipoib_dev_priv
*priv
)
1557 struct ipoib_neigh_table
*ntbl
= &priv
->ntbl
;
1558 struct ipoib_neigh_hash
*htbl
;
1559 unsigned long flags
;
1560 int i
, wait_flushed
= 0;
1562 init_completion(&priv
->ntbl
.flushed
);
1563 set_bit(IPOIB_NEIGH_TBL_FLUSH
, &priv
->flags
);
1565 spin_lock_irqsave(&priv
->lock
, flags
);
1567 htbl
= rcu_dereference_protected(ntbl
->htbl
,
1568 lockdep_is_held(&priv
->lock
));
1572 wait_flushed
= atomic_read(&priv
->ntbl
.entries
);
1576 for (i
= 0; i
< htbl
->size
; i
++) {
1577 struct ipoib_neigh
*neigh
;
1578 struct ipoib_neigh __rcu
**np
= &htbl
->buckets
[i
];
1580 while ((neigh
= rcu_dereference_protected(*np
,
1581 lockdep_is_held(&priv
->lock
))) != NULL
) {
1582 rcu_assign_pointer(*np
,
1583 rcu_dereference_protected(neigh
->hnext
,
1584 lockdep_is_held(&priv
->lock
)));
1585 /* remove from path/mc list */
1586 list_del_init(&neigh
->list
);
1587 call_rcu(&neigh
->rcu
, ipoib_neigh_reclaim
);
1592 rcu_assign_pointer(ntbl
->htbl
, NULL
);
1593 call_rcu(&htbl
->rcu
, neigh_hash_free_rcu
);
1596 spin_unlock_irqrestore(&priv
->lock
, flags
);
1598 wait_for_completion(&priv
->ntbl
.flushed
);
1601 static void ipoib_neigh_hash_uninit(struct net_device
*dev
)
1603 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1606 ipoib_dbg(priv
, "ipoib_neigh_hash_uninit\n");
1607 init_completion(&priv
->ntbl
.deleted
);
1609 /* Stop GC if called at init fail need to cancel work */
1610 stopped
= test_and_set_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
);
1612 cancel_delayed_work(&priv
->neigh_reap_task
);
1614 ipoib_flush_neighs(priv
);
1616 wait_for_completion(&priv
->ntbl
.deleted
);
1619 static void ipoib_napi_add(struct net_device
*dev
)
1621 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1623 netif_napi_add(dev
, &priv
->recv_napi
, ipoib_rx_poll
, IPOIB_NUM_WC
);
1624 netif_napi_add(dev
, &priv
->send_napi
, ipoib_tx_poll
, MAX_SEND_CQE
);
1627 static void ipoib_napi_del(struct net_device
*dev
)
1629 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1631 netif_napi_del(&priv
->recv_napi
);
1632 netif_napi_del(&priv
->send_napi
);
1635 static void ipoib_dev_uninit_default(struct net_device
*dev
)
1637 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1639 ipoib_transport_dev_cleanup(dev
);
1641 ipoib_napi_del(dev
);
1643 ipoib_cm_dev_cleanup(dev
);
1645 kfree(priv
->rx_ring
);
1646 vfree(priv
->tx_ring
);
1648 priv
->rx_ring
= NULL
;
1649 priv
->tx_ring
= NULL
;
1652 static int ipoib_dev_init_default(struct net_device
*dev
)
1654 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1656 ipoib_napi_add(dev
);
1658 /* Allocate RX/TX "rings" to hold queued skbs */
1659 priv
->rx_ring
= kzalloc(ipoib_recvq_size
* sizeof *priv
->rx_ring
,
1664 priv
->tx_ring
= vzalloc(ipoib_sendq_size
* sizeof *priv
->tx_ring
);
1665 if (!priv
->tx_ring
) {
1666 printk(KERN_WARNING
"%s: failed to allocate TX ring (%d entries)\n",
1667 priv
->ca
->name
, ipoib_sendq_size
);
1668 goto out_rx_ring_cleanup
;
1671 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
1673 if (ipoib_transport_dev_init(dev
, priv
->ca
)) {
1674 pr_warn("%s: ipoib_transport_dev_init failed\n",
1676 goto out_tx_ring_cleanup
;
1679 /* after qp created set dev address */
1680 priv
->dev
->dev_addr
[1] = (priv
->qp
->qp_num
>> 16) & 0xff;
1681 priv
->dev
->dev_addr
[2] = (priv
->qp
->qp_num
>> 8) & 0xff;
1682 priv
->dev
->dev_addr
[3] = (priv
->qp
->qp_num
) & 0xff;
1686 out_tx_ring_cleanup
:
1687 vfree(priv
->tx_ring
);
1689 out_rx_ring_cleanup
:
1690 kfree(priv
->rx_ring
);
1693 ipoib_napi_del(dev
);
1697 static int ipoib_ioctl(struct net_device
*dev
, struct ifreq
*ifr
,
1700 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1702 if (!priv
->rn_ops
->ndo_do_ioctl
)
1705 return priv
->rn_ops
->ndo_do_ioctl(dev
, ifr
, cmd
);
1708 int ipoib_dev_init(struct net_device
*dev
, struct ib_device
*ca
, int port
)
1710 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1718 * the various IPoIB tasks assume they will never race against
1719 * themselves, so always use a single thread workqueue
1721 priv
->wq
= alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM
);
1723 pr_warn("%s: failed to allocate device WQ\n", dev
->name
);
1727 /* create pd, which used both for control and datapath*/
1728 priv
->pd
= ib_alloc_pd(priv
->ca
, 0);
1729 if (IS_ERR(priv
->pd
)) {
1730 pr_warn("%s: failed to allocate PD\n", ca
->name
);
1734 ret
= priv
->rn_ops
->ndo_init(dev
);
1736 pr_warn("%s failed to init HW resource\n", dev
->name
);
1740 if (ipoib_neigh_hash_init(priv
) < 0) {
1741 pr_warn("%s failed to init neigh hash\n", dev
->name
);
1742 goto out_dev_uninit
;
1745 if (dev
->flags
& IFF_UP
) {
1746 if (ipoib_ib_dev_open(dev
)) {
1747 pr_warn("%s failed to open device\n", dev
->name
);
1749 goto out_dev_uninit
;
1756 ipoib_ib_dev_cleanup(dev
);
1760 ib_dealloc_pd(priv
->pd
);
1766 destroy_workqueue(priv
->wq
);
1774 void ipoib_dev_cleanup(struct net_device
*dev
)
1776 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
), *cpriv
, *tcpriv
;
1781 /* Delete any child interfaces first */
1782 list_for_each_entry_safe(cpriv
, tcpriv
, &priv
->child_intfs
, list
) {
1783 /* Stop GC on child */
1784 set_bit(IPOIB_STOP_NEIGH_GC
, &cpriv
->flags
);
1785 cancel_delayed_work(&cpriv
->neigh_reap_task
);
1786 unregister_netdevice_queue(cpriv
->dev
, &head
);
1788 unregister_netdevice_many(&head
);
1790 ipoib_neigh_hash_uninit(dev
);
1792 ipoib_ib_dev_cleanup(dev
);
1794 /* no more works over the priv->wq */
1796 flush_workqueue(priv
->wq
);
1797 destroy_workqueue(priv
->wq
);
1802 static int ipoib_set_vf_link_state(struct net_device
*dev
, int vf
, int link_state
)
1804 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1806 return ib_set_vf_link_state(priv
->ca
, vf
, priv
->port
, link_state
);
1809 static int ipoib_get_vf_config(struct net_device
*dev
, int vf
,
1810 struct ifla_vf_info
*ivf
)
1812 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1815 err
= ib_get_vf_config(priv
->ca
, vf
, priv
->port
, ivf
);
1824 static int ipoib_set_vf_guid(struct net_device
*dev
, int vf
, u64 guid
, int type
)
1826 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1828 if (type
!= IFLA_VF_IB_NODE_GUID
&& type
!= IFLA_VF_IB_PORT_GUID
)
1831 return ib_set_vf_guid(priv
->ca
, vf
, priv
->port
, guid
, type
);
1834 static int ipoib_get_vf_stats(struct net_device
*dev
, int vf
,
1835 struct ifla_vf_stats
*vf_stats
)
1837 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1839 return ib_get_vf_stats(priv
->ca
, vf
, priv
->port
, vf_stats
);
1842 static const struct header_ops ipoib_header_ops
= {
1843 .create
= ipoib_hard_header
,
1846 static const struct net_device_ops ipoib_netdev_ops_pf
= {
1847 .ndo_uninit
= ipoib_uninit
,
1848 .ndo_open
= ipoib_open
,
1849 .ndo_stop
= ipoib_stop
,
1850 .ndo_change_mtu
= ipoib_change_mtu
,
1851 .ndo_fix_features
= ipoib_fix_features
,
1852 .ndo_start_xmit
= ipoib_start_xmit
,
1853 .ndo_tx_timeout
= ipoib_timeout
,
1854 .ndo_set_rx_mode
= ipoib_set_mcast_list
,
1855 .ndo_get_iflink
= ipoib_get_iflink
,
1856 .ndo_set_vf_link_state
= ipoib_set_vf_link_state
,
1857 .ndo_get_vf_config
= ipoib_get_vf_config
,
1858 .ndo_get_vf_stats
= ipoib_get_vf_stats
,
1859 .ndo_set_vf_guid
= ipoib_set_vf_guid
,
1860 .ndo_set_mac_address
= ipoib_set_mac
,
1861 .ndo_get_stats64
= ipoib_get_stats
,
1862 .ndo_do_ioctl
= ipoib_ioctl
,
1865 static const struct net_device_ops ipoib_netdev_ops_vf
= {
1866 .ndo_uninit
= ipoib_uninit
,
1867 .ndo_open
= ipoib_open
,
1868 .ndo_stop
= ipoib_stop
,
1869 .ndo_change_mtu
= ipoib_change_mtu
,
1870 .ndo_fix_features
= ipoib_fix_features
,
1871 .ndo_start_xmit
= ipoib_start_xmit
,
1872 .ndo_tx_timeout
= ipoib_timeout
,
1873 .ndo_set_rx_mode
= ipoib_set_mcast_list
,
1874 .ndo_get_iflink
= ipoib_get_iflink
,
1875 .ndo_get_stats64
= ipoib_get_stats
,
1876 .ndo_do_ioctl
= ipoib_ioctl
,
1879 void ipoib_setup_common(struct net_device
*dev
)
1881 dev
->header_ops
= &ipoib_header_ops
;
1883 ipoib_set_ethtool_ops(dev
);
1885 dev
->watchdog_timeo
= HZ
;
1887 dev
->flags
|= IFF_BROADCAST
| IFF_MULTICAST
;
1889 dev
->hard_header_len
= IPOIB_HARD_LEN
;
1890 dev
->addr_len
= INFINIBAND_ALEN
;
1891 dev
->type
= ARPHRD_INFINIBAND
;
1892 dev
->tx_queue_len
= ipoib_sendq_size
* 2;
1893 dev
->features
= (NETIF_F_VLAN_CHALLENGED
|
1895 netif_keep_dst(dev
);
1897 memcpy(dev
->broadcast
, ipv4_bcast_addr
, INFINIBAND_ALEN
);
1900 static void ipoib_build_priv(struct net_device
*dev
)
1902 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
1905 spin_lock_init(&priv
->lock
);
1906 init_rwsem(&priv
->vlan_rwsem
);
1907 mutex_init(&priv
->mcast_mutex
);
1908 mutex_init(&priv
->sysfs_mutex
);
1910 INIT_LIST_HEAD(&priv
->path_list
);
1911 INIT_LIST_HEAD(&priv
->child_intfs
);
1912 INIT_LIST_HEAD(&priv
->dead_ahs
);
1913 INIT_LIST_HEAD(&priv
->multicast_list
);
1915 INIT_DELAYED_WORK(&priv
->mcast_task
, ipoib_mcast_join_task
);
1916 INIT_WORK(&priv
->carrier_on_task
, ipoib_mcast_carrier_on_task
);
1917 INIT_WORK(&priv
->flush_light
, ipoib_ib_dev_flush_light
);
1918 INIT_WORK(&priv
->flush_normal
, ipoib_ib_dev_flush_normal
);
1919 INIT_WORK(&priv
->flush_heavy
, ipoib_ib_dev_flush_heavy
);
1920 INIT_WORK(&priv
->restart_task
, ipoib_mcast_restart_task
);
1921 INIT_DELAYED_WORK(&priv
->ah_reap_task
, ipoib_reap_ah
);
1922 INIT_DELAYED_WORK(&priv
->neigh_reap_task
, ipoib_reap_neigh
);
1925 static const struct net_device_ops ipoib_netdev_default_pf
= {
1926 .ndo_init
= ipoib_dev_init_default
,
1927 .ndo_uninit
= ipoib_dev_uninit_default
,
1928 .ndo_open
= ipoib_ib_dev_open_default
,
1929 .ndo_stop
= ipoib_ib_dev_stop_default
,
1932 static struct net_device
1933 *ipoib_create_netdev_default(struct ib_device
*hca
,
1935 unsigned char name_assign_type
,
1936 void (*setup
)(struct net_device
*))
1938 struct net_device
*dev
;
1939 struct rdma_netdev
*rn
;
1941 dev
= alloc_netdev((int)sizeof(struct rdma_netdev
),
1943 name_assign_type
, setup
);
1947 rn
= netdev_priv(dev
);
1949 rn
->send
= ipoib_send
;
1950 rn
->attach_mcast
= ipoib_mcast_attach
;
1951 rn
->detach_mcast
= ipoib_mcast_detach
;
1952 rn
->free_rdma_netdev
= free_netdev
;
1955 dev
->netdev_ops
= &ipoib_netdev_default_pf
;
1960 static struct net_device
*ipoib_get_netdev(struct ib_device
*hca
, u8 port
,
1963 struct net_device
*dev
;
1965 if (hca
->alloc_rdma_netdev
) {
1966 dev
= hca
->alloc_rdma_netdev(hca
, port
,
1967 RDMA_NETDEV_IPOIB
, name
,
1969 ipoib_setup_common
);
1970 if (IS_ERR_OR_NULL(dev
) && PTR_ERR(dev
) != -EOPNOTSUPP
)
1974 if (!hca
->alloc_rdma_netdev
|| PTR_ERR(dev
) == -EOPNOTSUPP
)
1975 dev
= ipoib_create_netdev_default(hca
, name
, NET_NAME_UNKNOWN
,
1976 ipoib_setup_common
);
1981 struct ipoib_dev_priv
*ipoib_intf_alloc(struct ib_device
*hca
, u8 port
,
1984 struct net_device
*dev
;
1985 struct ipoib_dev_priv
*priv
;
1986 struct rdma_netdev
*rn
;
1988 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
1992 dev
= ipoib_get_netdev(hca
, port
, name
);
1996 priv
->rn_ops
= dev
->netdev_ops
;
1998 /* fixme : should be after the query_cap */
1999 if (priv
->hca_caps
& IB_DEVICE_VIRTUAL_FUNCTION
)
2000 dev
->netdev_ops
= &ipoib_netdev_ops_vf
;
2002 dev
->netdev_ops
= &ipoib_netdev_ops_pf
;
2004 rn
= netdev_priv(dev
);
2005 rn
->clnt_priv
= priv
;
2006 ipoib_build_priv(dev
);
2014 static ssize_t
show_pkey(struct device
*dev
,
2015 struct device_attribute
*attr
, char *buf
)
2017 struct net_device
*ndev
= to_net_dev(dev
);
2018 struct ipoib_dev_priv
*priv
= ipoib_priv(ndev
);
2020 return sprintf(buf
, "0x%04x\n", priv
->pkey
);
2022 static DEVICE_ATTR(pkey
, S_IRUGO
, show_pkey
, NULL
);
2024 static ssize_t
show_umcast(struct device
*dev
,
2025 struct device_attribute
*attr
, char *buf
)
2027 struct net_device
*ndev
= to_net_dev(dev
);
2028 struct ipoib_dev_priv
*priv
= ipoib_priv(ndev
);
2030 return sprintf(buf
, "%d\n", test_bit(IPOIB_FLAG_UMCAST
, &priv
->flags
));
2033 void ipoib_set_umcast(struct net_device
*ndev
, int umcast_val
)
2035 struct ipoib_dev_priv
*priv
= ipoib_priv(ndev
);
2037 if (umcast_val
> 0) {
2038 set_bit(IPOIB_FLAG_UMCAST
, &priv
->flags
);
2039 ipoib_warn(priv
, "ignoring multicast groups joined directly "
2042 clear_bit(IPOIB_FLAG_UMCAST
, &priv
->flags
);
2045 static ssize_t
set_umcast(struct device
*dev
,
2046 struct device_attribute
*attr
,
2047 const char *buf
, size_t count
)
2049 unsigned long umcast_val
= simple_strtoul(buf
, NULL
, 0);
2051 ipoib_set_umcast(to_net_dev(dev
), umcast_val
);
2055 static DEVICE_ATTR(umcast
, S_IWUSR
| S_IRUGO
, show_umcast
, set_umcast
);
2057 int ipoib_add_umcast_attr(struct net_device
*dev
)
2059 return device_create_file(&dev
->dev
, &dev_attr_umcast
);
2062 static void set_base_guid(struct ipoib_dev_priv
*priv
, union ib_gid
*gid
)
2064 struct ipoib_dev_priv
*child_priv
;
2065 struct net_device
*netdev
= priv
->dev
;
2067 netif_addr_lock_bh(netdev
);
2069 memcpy(&priv
->local_gid
.global
.interface_id
,
2070 &gid
->global
.interface_id
,
2071 sizeof(gid
->global
.interface_id
));
2072 memcpy(netdev
->dev_addr
+ 4, &priv
->local_gid
, sizeof(priv
->local_gid
));
2073 clear_bit(IPOIB_FLAG_DEV_ADDR_SET
, &priv
->flags
);
2075 netif_addr_unlock_bh(netdev
);
2077 if (!test_bit(IPOIB_FLAG_SUBINTERFACE
, &priv
->flags
)) {
2078 down_read(&priv
->vlan_rwsem
);
2079 list_for_each_entry(child_priv
, &priv
->child_intfs
, list
)
2080 set_base_guid(child_priv
, gid
);
2081 up_read(&priv
->vlan_rwsem
);
2085 static int ipoib_check_lladdr(struct net_device
*dev
,
2086 struct sockaddr_storage
*ss
)
2088 union ib_gid
*gid
= (union ib_gid
*)(ss
->__data
+ 4);
2091 netif_addr_lock_bh(dev
);
2093 /* Make sure the QPN, reserved and subnet prefix match the current
2094 * lladdr, it also makes sure the lladdr is unicast.
2096 if (memcmp(dev
->dev_addr
, ss
->__data
,
2097 4 + sizeof(gid
->global
.subnet_prefix
)) ||
2098 gid
->global
.interface_id
== 0)
2101 netif_addr_unlock_bh(dev
);
2106 static int ipoib_set_mac(struct net_device
*dev
, void *addr
)
2108 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
2109 struct sockaddr_storage
*ss
= addr
;
2112 if (!(dev
->priv_flags
& IFF_LIVE_ADDR_CHANGE
) && netif_running(dev
))
2115 ret
= ipoib_check_lladdr(dev
, ss
);
2119 set_base_guid(priv
, (union ib_gid
*)(ss
->__data
+ 4));
2121 queue_work(ipoib_workqueue
, &priv
->flush_light
);
2126 static ssize_t
create_child(struct device
*dev
,
2127 struct device_attribute
*attr
,
2128 const char *buf
, size_t count
)
2133 if (sscanf(buf
, "%i", &pkey
) != 1)
2136 if (pkey
<= 0 || pkey
> 0xffff || pkey
== 0x8000)
2140 * Set the full membership bit, so that we join the right
2141 * broadcast group, etc.
2145 ret
= ipoib_vlan_add(to_net_dev(dev
), pkey
);
2147 return ret
? ret
: count
;
2149 static DEVICE_ATTR(create_child
, S_IWUSR
, NULL
, create_child
);
2151 static ssize_t
delete_child(struct device
*dev
,
2152 struct device_attribute
*attr
,
2153 const char *buf
, size_t count
)
2158 if (sscanf(buf
, "%i", &pkey
) != 1)
2161 if (pkey
< 0 || pkey
> 0xffff)
2164 ret
= ipoib_vlan_delete(to_net_dev(dev
), pkey
);
2166 return ret
? ret
: count
;
2169 static DEVICE_ATTR(delete_child
, S_IWUSR
, NULL
, delete_child
);
2171 int ipoib_add_pkey_attr(struct net_device
*dev
)
2173 return device_create_file(&dev
->dev
, &dev_attr_pkey
);
2176 void ipoib_set_dev_features(struct ipoib_dev_priv
*priv
, struct ib_device
*hca
)
2178 priv
->hca_caps
= hca
->attrs
.device_cap_flags
;
2180 if (priv
->hca_caps
& IB_DEVICE_UD_IP_CSUM
) {
2181 priv
->dev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
2183 if (priv
->hca_caps
& IB_DEVICE_UD_TSO
)
2184 priv
->dev
->hw_features
|= NETIF_F_TSO
;
2186 priv
->dev
->features
|= priv
->dev
->hw_features
;
2190 static struct net_device
*ipoib_add_port(const char *format
,
2191 struct ib_device
*hca
, u8 port
)
2193 struct ipoib_dev_priv
*priv
;
2194 struct ib_port_attr attr
;
2195 struct rdma_netdev
*rn
;
2196 int result
= -ENOMEM
;
2198 priv
= ipoib_intf_alloc(hca
, port
, format
);
2200 goto alloc_mem_failed
;
2202 SET_NETDEV_DEV(priv
->dev
, hca
->dev
.parent
);
2203 priv
->dev
->dev_id
= port
- 1;
2205 result
= ib_query_port(hca
, port
, &attr
);
2207 printk(KERN_WARNING
"%s: ib_query_port %d failed\n",
2209 goto device_init_failed
;
2212 priv
->max_ib_mtu
= ib_mtu_enum_to_int(attr
.max_mtu
);
2214 /* MTU will be reset when mcast join happens */
2215 priv
->dev
->mtu
= IPOIB_UD_MTU(priv
->max_ib_mtu
);
2216 priv
->mcast_mtu
= priv
->admin_mtu
= priv
->dev
->mtu
;
2217 priv
->dev
->max_mtu
= IPOIB_CM_MTU
;
2219 priv
->dev
->neigh_priv_len
= sizeof(struct ipoib_neigh
);
2221 result
= ib_query_pkey(hca
, port
, 0, &priv
->pkey
);
2223 printk(KERN_WARNING
"%s: ib_query_pkey port %d failed (ret = %d)\n",
2224 hca
->name
, port
, result
);
2225 goto device_init_failed
;
2228 ipoib_set_dev_features(priv
, hca
);
2231 * Set the full membership bit, so that we join the right
2232 * broadcast group, etc.
2234 priv
->pkey
|= 0x8000;
2236 priv
->dev
->broadcast
[8] = priv
->pkey
>> 8;
2237 priv
->dev
->broadcast
[9] = priv
->pkey
& 0xff;
2239 result
= ib_query_gid(hca
, port
, 0, &priv
->local_gid
, NULL
);
2241 printk(KERN_WARNING
"%s: ib_query_gid port %d failed (ret = %d)\n",
2242 hca
->name
, port
, result
);
2243 goto device_init_failed
;
2246 memcpy(priv
->dev
->dev_addr
+ 4, priv
->local_gid
.raw
,
2247 sizeof(union ib_gid
));
2248 set_bit(IPOIB_FLAG_DEV_ADDR_SET
, &priv
->flags
);
2250 result
= ipoib_dev_init(priv
->dev
, hca
, port
);
2252 printk(KERN_WARNING
"%s: failed to initialize port %d (ret = %d)\n",
2253 hca
->name
, port
, result
);
2254 goto device_init_failed
;
2257 INIT_IB_EVENT_HANDLER(&priv
->event_handler
,
2258 priv
->ca
, ipoib_event
);
2259 ib_register_event_handler(&priv
->event_handler
);
2261 result
= register_netdev(priv
->dev
);
2263 printk(KERN_WARNING
"%s: couldn't register ipoib port %d; error %d\n",
2264 hca
->name
, port
, result
);
2265 goto register_failed
;
2269 if (ipoib_cm_add_mode_attr(priv
->dev
))
2271 if (ipoib_add_pkey_attr(priv
->dev
))
2273 if (ipoib_add_umcast_attr(priv
->dev
))
2275 if (device_create_file(&priv
->dev
->dev
, &dev_attr_create_child
))
2277 if (device_create_file(&priv
->dev
->dev
, &dev_attr_delete_child
))
2283 unregister_netdev(priv
->dev
);
2286 ib_unregister_event_handler(&priv
->event_handler
);
2287 flush_workqueue(ipoib_workqueue
);
2288 /* Stop GC if started before flush */
2289 set_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
);
2290 cancel_delayed_work(&priv
->neigh_reap_task
);
2291 flush_workqueue(priv
->wq
);
2292 ipoib_dev_cleanup(priv
->dev
);
2295 rn
= netdev_priv(priv
->dev
);
2296 rn
->free_rdma_netdev(priv
->dev
);
2300 return ERR_PTR(result
);
2303 static void ipoib_add_one(struct ib_device
*device
)
2305 struct list_head
*dev_list
;
2306 struct net_device
*dev
;
2307 struct ipoib_dev_priv
*priv
;
2311 dev_list
= kmalloc(sizeof *dev_list
, GFP_KERNEL
);
2315 INIT_LIST_HEAD(dev_list
);
2317 for (p
= rdma_start_port(device
); p
<= rdma_end_port(device
); ++p
) {
2318 if (!rdma_protocol_ib(device
, p
))
2320 dev
= ipoib_add_port("ib%d", device
, p
);
2322 priv
= ipoib_priv(dev
);
2323 list_add_tail(&priv
->list
, dev_list
);
2329 pr_err("Failed to init port, removing it\n");
2330 ipoib_remove_one(device
, dev_list
);
2334 ib_set_client_data(device
, &ipoib_client
, dev_list
);
2337 static void ipoib_remove_one(struct ib_device
*device
, void *client_data
)
2339 struct ipoib_dev_priv
*priv
, *tmp
, *cpriv
, *tcpriv
;
2340 struct list_head
*dev_list
= client_data
;
2345 list_for_each_entry_safe(priv
, tmp
, dev_list
, list
) {
2346 struct rdma_netdev
*parent_rn
= netdev_priv(priv
->dev
);
2348 ib_unregister_event_handler(&priv
->event_handler
);
2349 flush_workqueue(ipoib_workqueue
);
2351 /* mark interface in the middle of destruction */
2352 set_bit(IPOIB_FLAG_GOING_DOWN
, &priv
->flags
);
2355 dev_change_flags(priv
->dev
, priv
->dev
->flags
& ~IFF_UP
);
2359 set_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
);
2360 cancel_delayed_work(&priv
->neigh_reap_task
);
2361 flush_workqueue(priv
->wq
);
2363 /* Wrap rtnl_lock/unlock with mutex to protect sysfs calls */
2364 mutex_lock(&priv
->sysfs_mutex
);
2365 unregister_netdev(priv
->dev
);
2366 mutex_unlock(&priv
->sysfs_mutex
);
2368 parent_rn
->free_rdma_netdev(priv
->dev
);
2370 list_for_each_entry_safe(cpriv
, tcpriv
, &priv
->child_intfs
, list
) {
2371 struct rdma_netdev
*child_rn
;
2373 child_rn
= netdev_priv(cpriv
->dev
);
2374 child_rn
->free_rdma_netdev(cpriv
->dev
);
2384 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2385 static struct notifier_block ipoib_netdev_notifier
= {
2386 .notifier_call
= ipoib_netdev_event
,
2390 static int __init
ipoib_init_module(void)
2394 ipoib_recvq_size
= roundup_pow_of_two(ipoib_recvq_size
);
2395 ipoib_recvq_size
= min(ipoib_recvq_size
, IPOIB_MAX_QUEUE_SIZE
);
2396 ipoib_recvq_size
= max(ipoib_recvq_size
, IPOIB_MIN_QUEUE_SIZE
);
2398 ipoib_sendq_size
= roundup_pow_of_two(ipoib_sendq_size
);
2399 ipoib_sendq_size
= min(ipoib_sendq_size
, IPOIB_MAX_QUEUE_SIZE
);
2400 ipoib_sendq_size
= max3(ipoib_sendq_size
, 2 * MAX_SEND_CQE
, IPOIB_MIN_QUEUE_SIZE
);
2401 #ifdef CONFIG_INFINIBAND_IPOIB_CM
2402 ipoib_max_conn_qp
= min(ipoib_max_conn_qp
, IPOIB_CM_MAX_CONN_QP
);
2403 ipoib_max_conn_qp
= max(ipoib_max_conn_qp
, 0);
2407 * When copying small received packets, we only copy from the
2408 * linear data part of the SKB, so we rely on this condition.
2410 BUILD_BUG_ON(IPOIB_CM_COPYBREAK
> IPOIB_CM_HEAD_SIZE
);
2412 ret
= ipoib_register_debugfs();
2417 * We create a global workqueue here that is used for all flush
2418 * operations. However, if you attempt to flush a workqueue
2419 * from a task on that same workqueue, it deadlocks the system.
2420 * We want to be able to flush the tasks associated with a
2421 * specific net device, so we also create a workqueue for each
2422 * netdevice. We queue up the tasks for that device only on
2423 * its private workqueue, and we only queue up flush events
2424 * on our global flush workqueue. This avoids the deadlocks.
2426 ipoib_workqueue
= alloc_ordered_workqueue("ipoib_flush",
2428 if (!ipoib_workqueue
) {
2433 ib_sa_register_client(&ipoib_sa_client
);
2435 ret
= ib_register_client(&ipoib_client
);
2439 ret
= ipoib_netlink_init();
2443 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2444 register_netdevice_notifier(&ipoib_netdev_notifier
);
2449 ib_unregister_client(&ipoib_client
);
2452 ib_sa_unregister_client(&ipoib_sa_client
);
2453 destroy_workqueue(ipoib_workqueue
);
2456 ipoib_unregister_debugfs();
2461 static void __exit
ipoib_cleanup_module(void)
2463 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2464 unregister_netdevice_notifier(&ipoib_netdev_notifier
);
2466 ipoib_netlink_fini();
2467 ib_unregister_client(&ipoib_client
);
2468 ib_sa_unregister_client(&ipoib_sa_client
);
2469 ipoib_unregister_debugfs();
2470 destroy_workqueue(ipoib_workqueue
);
2473 module_init(ipoib_init_module
);
2474 module_exit(ipoib_cleanup_module
);