2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
35 #include <linux/if_arp.h>
36 #include <linux/netdevice.h>
38 #include <linux/if_vlan.h>
39 #include <net/udp_tunnel.h>
40 #include <net/sch_generic.h>
41 #include <linux/netfilter.h>
42 #include <rdma/ib_addr.h>
48 static LIST_HEAD(rxe_dev_list
);
49 static spinlock_t dev_list_lock
; /* spinlock for device list */
51 struct rxe_dev
*net_to_rxe(struct net_device
*ndev
)
54 struct rxe_dev
*found
= NULL
;
56 spin_lock_bh(&dev_list_lock
);
57 list_for_each_entry(rxe
, &rxe_dev_list
, list
) {
58 if (rxe
->ndev
== ndev
) {
63 spin_unlock_bh(&dev_list_lock
);
68 struct rxe_dev
*get_rxe_by_name(const char* name
)
71 struct rxe_dev
*found
= NULL
;
73 spin_lock_bh(&dev_list_lock
);
74 list_for_each_entry(rxe
, &rxe_dev_list
, list
) {
75 if (!strcmp(name
, rxe
->ib_dev
.name
)) {
80 spin_unlock_bh(&dev_list_lock
);
85 struct rxe_recv_sockets recv_sockets
;
87 static __be64
rxe_mac_to_eui64(struct net_device
*ndev
)
89 unsigned char *mac_addr
= ndev
->dev_addr
;
91 unsigned char *dst
= (unsigned char *)&eui64
;
93 dst
[0] = mac_addr
[0] ^ 2;
100 dst
[7] = mac_addr
[5];
105 static __be64
node_guid(struct rxe_dev
*rxe
)
107 return rxe_mac_to_eui64(rxe
->ndev
);
110 static __be64
port_guid(struct rxe_dev
*rxe
)
112 return rxe_mac_to_eui64(rxe
->ndev
);
115 static struct device
*dma_device(struct rxe_dev
*rxe
)
117 struct net_device
*ndev
;
121 if (ndev
->priv_flags
& IFF_802_1Q_VLAN
)
122 ndev
= vlan_dev_real_dev(ndev
);
124 return ndev
->dev
.parent
;
127 static int mcast_add(struct rxe_dev
*rxe
, union ib_gid
*mgid
)
130 unsigned char ll_addr
[ETH_ALEN
];
132 ipv6_eth_mc_map((struct in6_addr
*)mgid
->raw
, ll_addr
);
133 err
= dev_mc_add(rxe
->ndev
, ll_addr
);
138 static int mcast_delete(struct rxe_dev
*rxe
, union ib_gid
*mgid
)
141 unsigned char ll_addr
[ETH_ALEN
];
143 ipv6_eth_mc_map((struct in6_addr
*)mgid
->raw
, ll_addr
);
144 err
= dev_mc_del(rxe
->ndev
, ll_addr
);
149 static struct dst_entry
*rxe_find_route4(struct net_device
*ndev
,
150 struct in_addr
*saddr
,
151 struct in_addr
*daddr
)
154 struct flowi4 fl
= { { 0 } };
156 memset(&fl
, 0, sizeof(fl
));
157 fl
.flowi4_oif
= ndev
->ifindex
;
158 memcpy(&fl
.saddr
, saddr
, sizeof(*saddr
));
159 memcpy(&fl
.daddr
, daddr
, sizeof(*daddr
));
160 fl
.flowi4_proto
= IPPROTO_UDP
;
162 rt
= ip_route_output_key(&init_net
, &fl
);
164 pr_err_ratelimited("no route to %pI4\n", &daddr
->s_addr
);
171 #if IS_ENABLED(CONFIG_IPV6)
172 static struct dst_entry
*rxe_find_route6(struct net_device
*ndev
,
173 struct in6_addr
*saddr
,
174 struct in6_addr
*daddr
)
176 struct dst_entry
*ndst
;
177 struct flowi6 fl6
= { { 0 } };
179 memset(&fl6
, 0, sizeof(fl6
));
180 fl6
.flowi6_oif
= ndev
->ifindex
;
181 memcpy(&fl6
.saddr
, saddr
, sizeof(*saddr
));
182 memcpy(&fl6
.daddr
, daddr
, sizeof(*daddr
));
183 fl6
.flowi6_proto
= IPPROTO_UDP
;
185 if (unlikely(ipv6_stub
->ipv6_dst_lookup(sock_net(recv_sockets
.sk6
->sk
),
186 recv_sockets
.sk6
->sk
, &ndst
, &fl6
))) {
187 pr_err_ratelimited("no route to %pI6\n", daddr
);
191 if (unlikely(ndst
->error
)) {
192 pr_err("no route to %pI6\n", daddr
);
204 static struct dst_entry
*rxe_find_route6(struct net_device
*ndev
,
205 struct in6_addr
*saddr
,
206 struct in6_addr
*daddr
)
213 static int rxe_udp_encap_recv(struct sock
*sk
, struct sk_buff
*skb
)
216 struct net_device
*ndev
= skb
->dev
;
217 struct rxe_dev
*rxe
= net_to_rxe(ndev
);
218 struct rxe_pkt_info
*pkt
= SKB_TO_PKT(skb
);
223 if (skb_linearize(skb
)) {
224 pr_err("skb_linearize failed\n");
231 pkt
->hdr
= (u8
*)(udph
+ 1);
232 pkt
->mask
= RXE_GRH_MASK
;
233 pkt
->paylen
= be16_to_cpu(udph
->len
) - sizeof(*udph
);
241 static struct socket
*rxe_setup_udp_tunnel(struct net
*net
, __be16 port
,
246 struct udp_port_cfg udp_cfg
;
247 struct udp_tunnel_sock_cfg tnl_cfg
;
249 memset(&udp_cfg
, 0, sizeof(udp_cfg
));
252 udp_cfg
.family
= AF_INET6
;
253 udp_cfg
.ipv6_v6only
= 1;
255 udp_cfg
.family
= AF_INET
;
258 udp_cfg
.local_udp_port
= port
;
260 /* Create UDP socket */
261 err
= udp_sock_create(net
, &udp_cfg
, &sock
);
263 pr_err("failed to create udp socket. err = %d\n", err
);
267 tnl_cfg
.sk_user_data
= NULL
;
268 tnl_cfg
.encap_type
= 1;
269 tnl_cfg
.encap_rcv
= rxe_udp_encap_recv
;
270 tnl_cfg
.encap_destroy
= NULL
;
272 /* Setup UDP tunnel */
273 setup_udp_tunnel_sock(net
, sock
, &tnl_cfg
);
278 void rxe_release_udp_tunnel(struct socket
*sk
)
281 udp_tunnel_sock_release(sk
);
284 static void prepare_udp_hdr(struct sk_buff
*skb
, __be16 src_port
,
289 __skb_push(skb
, sizeof(*udph
));
290 skb_reset_transport_header(skb
);
293 udph
->dest
= dst_port
;
294 udph
->source
= src_port
;
295 udph
->len
= htons(skb
->len
);
299 static void prepare_ipv4_hdr(struct dst_entry
*dst
, struct sk_buff
*skb
,
300 __be32 saddr
, __be32 daddr
, __u8 proto
,
301 __u8 tos
, __u8 ttl
, __be16 df
, bool xnet
)
305 skb_scrub_packet(skb
, xnet
);
308 skb_dst_set(skb
, dst
);
309 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
311 skb_push(skb
, sizeof(struct iphdr
));
312 skb_reset_network_header(skb
);
316 iph
->version
= IPVERSION
;
317 iph
->ihl
= sizeof(struct iphdr
) >> 2;
319 iph
->protocol
= proto
;
324 __ip_select_ident(dev_net(dst
->dev
), iph
,
325 skb_shinfo(skb
)->gso_segs
?: 1);
326 iph
->tot_len
= htons(skb
->len
);
330 static void prepare_ipv6_hdr(struct dst_entry
*dst
, struct sk_buff
*skb
,
331 struct in6_addr
*saddr
, struct in6_addr
*daddr
,
332 __u8 proto
, __u8 prio
, __u8 ttl
)
334 struct ipv6hdr
*ip6h
;
336 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
337 IPCB(skb
)->flags
&= ~(IPSKB_XFRM_TUNNEL_SIZE
| IPSKB_XFRM_TRANSFORMED
339 skb_dst_set(skb
, dst
);
341 __skb_push(skb
, sizeof(*ip6h
));
342 skb_reset_network_header(skb
);
343 ip6h
= ipv6_hdr(skb
);
344 ip6_flow_hdr(ip6h
, prio
, htonl(0));
345 ip6h
->payload_len
= htons(skb
->len
);
346 ip6h
->nexthdr
= proto
;
347 ip6h
->hop_limit
= ttl
;
348 ip6h
->daddr
= *daddr
;
349 ip6h
->saddr
= *saddr
;
350 ip6h
->payload_len
= htons(skb
->len
- sizeof(*ip6h
));
353 static int prepare4(struct rxe_dev
*rxe
, struct sk_buff
*skb
, struct rxe_av
*av
)
355 struct dst_entry
*dst
;
357 __be16 df
= htons(IP_DF
);
358 struct in_addr
*saddr
= &av
->sgid_addr
._sockaddr_in
.sin_addr
;
359 struct in_addr
*daddr
= &av
->dgid_addr
._sockaddr_in
.sin_addr
;
360 struct rxe_pkt_info
*pkt
= SKB_TO_PKT(skb
);
362 dst
= rxe_find_route4(rxe
->ndev
, saddr
, daddr
);
364 pr_err("Host not reachable\n");
365 return -EHOSTUNREACH
;
368 if (!memcmp(saddr
, daddr
, sizeof(*daddr
)))
369 pkt
->mask
|= RXE_LOOPBACK_MASK
;
371 prepare_udp_hdr(skb
, htons(RXE_ROCE_V2_SPORT
),
372 htons(ROCE_V2_UDP_DPORT
));
374 prepare_ipv4_hdr(dst
, skb
, saddr
->s_addr
, daddr
->s_addr
, IPPROTO_UDP
,
375 av
->grh
.traffic_class
, av
->grh
.hop_limit
, df
, xnet
);
379 static int prepare6(struct rxe_dev
*rxe
, struct sk_buff
*skb
, struct rxe_av
*av
)
381 struct dst_entry
*dst
;
382 struct in6_addr
*saddr
= &av
->sgid_addr
._sockaddr_in6
.sin6_addr
;
383 struct in6_addr
*daddr
= &av
->dgid_addr
._sockaddr_in6
.sin6_addr
;
384 struct rxe_pkt_info
*pkt
= SKB_TO_PKT(skb
);
386 dst
= rxe_find_route6(rxe
->ndev
, saddr
, daddr
);
388 pr_err("Host not reachable\n");
389 return -EHOSTUNREACH
;
392 if (!memcmp(saddr
, daddr
, sizeof(*daddr
)))
393 pkt
->mask
|= RXE_LOOPBACK_MASK
;
395 prepare_udp_hdr(skb
, htons(RXE_ROCE_V2_SPORT
),
396 htons(ROCE_V2_UDP_DPORT
));
398 prepare_ipv6_hdr(dst
, skb
, saddr
, daddr
, IPPROTO_UDP
,
399 av
->grh
.traffic_class
,
404 static int prepare(struct rxe_dev
*rxe
, struct rxe_pkt_info
*pkt
,
405 struct sk_buff
*skb
, u32
*crc
)
408 struct rxe_av
*av
= rxe_get_av(pkt
);
410 if (av
->network_type
== RDMA_NETWORK_IPV4
)
411 err
= prepare4(rxe
, skb
, av
);
412 else if (av
->network_type
== RDMA_NETWORK_IPV6
)
413 err
= prepare6(rxe
, skb
, av
);
415 *crc
= rxe_icrc_hdr(pkt
, skb
);
420 static void rxe_skb_tx_dtor(struct sk_buff
*skb
)
422 struct sock
*sk
= skb
->sk
;
423 struct rxe_qp
*qp
= sk
->sk_user_data
;
424 int skb_out
= atomic_dec_return(&qp
->skb_out
);
426 if (unlikely(qp
->need_req_skb
&&
427 skb_out
< RXE_INFLIGHT_SKBS_PER_QP_LOW
))
428 rxe_run_task(&qp
->req
.task
, 1);
431 static int send(struct rxe_dev
*rxe
, struct rxe_pkt_info
*pkt
,
434 struct sk_buff
*nskb
;
438 av
= rxe_get_av(pkt
);
440 nskb
= skb_clone(skb
, GFP_ATOMIC
);
444 nskb
->destructor
= rxe_skb_tx_dtor
;
445 nskb
->sk
= pkt
->qp
->sk
->sk
;
447 if (av
->network_type
== RDMA_NETWORK_IPV4
) {
448 err
= ip_local_out(dev_net(skb_dst(skb
)->dev
), nskb
->sk
, nskb
);
449 } else if (av
->network_type
== RDMA_NETWORK_IPV6
) {
450 err
= ip6_local_out(dev_net(skb_dst(skb
)->dev
), nskb
->sk
, nskb
);
452 pr_err("Unknown layer 3 protocol: %d\n", av
->network_type
);
457 if (unlikely(net_xmit_eval(err
))) {
458 pr_debug("error sending packet: %d\n", err
);
467 static int loopback(struct sk_buff
*skb
)
472 static inline int addr_same(struct rxe_dev
*rxe
, struct rxe_av
*av
)
474 return rxe
->port
.port_guid
== av
->grh
.dgid
.global
.interface_id
;
477 static struct sk_buff
*init_packet(struct rxe_dev
*rxe
, struct rxe_av
*av
,
478 int paylen
, struct rxe_pkt_info
*pkt
)
480 unsigned int hdr_len
;
483 if (av
->network_type
== RDMA_NETWORK_IPV4
)
484 hdr_len
= ETH_HLEN
+ sizeof(struct udphdr
) +
485 sizeof(struct iphdr
);
487 hdr_len
= ETH_HLEN
+ sizeof(struct udphdr
) +
488 sizeof(struct ipv6hdr
);
490 skb
= alloc_skb(paylen
+ hdr_len
+ LL_RESERVED_SPACE(rxe
->ndev
),
495 skb_reserve(skb
, hdr_len
+ LL_RESERVED_SPACE(rxe
->ndev
));
497 skb
->dev
= rxe
->ndev
;
498 if (av
->network_type
== RDMA_NETWORK_IPV4
)
499 skb
->protocol
= htons(ETH_P_IP
);
501 skb
->protocol
= htons(ETH_P_IPV6
);
505 pkt
->hdr
= skb_put(skb
, paylen
);
506 pkt
->mask
|= RXE_GRH_MASK
;
508 memset(pkt
->hdr
, 0, paylen
);
514 * this is required by rxe_cfg to match rxe devices in
515 * /sys/class/infiniband up with their underlying ethernet devices
517 static char *parent_name(struct rxe_dev
*rxe
, unsigned int port_num
)
519 return rxe
->ndev
->name
;
522 static enum rdma_link_layer
link_layer(struct rxe_dev
*rxe
,
523 unsigned int port_num
)
525 return IB_LINK_LAYER_ETHERNET
;
528 static struct rxe_ifc_ops ifc_ops
= {
529 .node_guid
= node_guid
,
530 .port_guid
= port_guid
,
531 .dma_device
= dma_device
,
532 .mcast_add
= mcast_add
,
533 .mcast_delete
= mcast_delete
,
536 .loopback
= loopback
,
537 .init_packet
= init_packet
,
538 .parent_name
= parent_name
,
539 .link_layer
= link_layer
,
542 struct rxe_dev
*rxe_net_add(struct net_device
*ndev
)
545 struct rxe_dev
*rxe
= NULL
;
547 rxe
= (struct rxe_dev
*)ib_alloc_device(sizeof(*rxe
));
551 rxe
->ifc_ops
= &ifc_ops
;
554 err
= rxe_add(rxe
, ndev
->mtu
);
556 ib_dealloc_device(&rxe
->ib_dev
);
560 spin_lock_bh(&dev_list_lock
);
561 list_add_tail(&rxe_dev_list
, &rxe
->list
);
562 spin_unlock_bh(&dev_list_lock
);
566 void rxe_remove_all(void)
568 spin_lock_bh(&dev_list_lock
);
569 while (!list_empty(&rxe_dev_list
)) {
570 struct rxe_dev
*rxe
=
571 list_first_entry(&rxe_dev_list
, struct rxe_dev
, list
);
573 list_del(&rxe
->list
);
574 spin_unlock_bh(&dev_list_lock
);
576 spin_lock_bh(&dev_list_lock
);
578 spin_unlock_bh(&dev_list_lock
);
580 EXPORT_SYMBOL(rxe_remove_all
);
582 static void rxe_port_event(struct rxe_dev
*rxe
,
583 enum ib_event_type event
)
587 ev
.device
= &rxe
->ib_dev
;
588 ev
.element
.port_num
= 1;
591 ib_dispatch_event(&ev
);
594 /* Caller must hold net_info_lock */
595 void rxe_port_up(struct rxe_dev
*rxe
)
597 struct rxe_port
*port
;
600 port
->attr
.state
= IB_PORT_ACTIVE
;
601 port
->attr
.phys_state
= IB_PHYS_STATE_LINK_UP
;
603 rxe_port_event(rxe
, IB_EVENT_PORT_ACTIVE
);
604 pr_info("rxe: set %s active\n", rxe
->ib_dev
.name
);
608 /* Caller must hold net_info_lock */
609 void rxe_port_down(struct rxe_dev
*rxe
)
611 struct rxe_port
*port
;
614 port
->attr
.state
= IB_PORT_DOWN
;
615 port
->attr
.phys_state
= IB_PHYS_STATE_LINK_DOWN
;
617 rxe_port_event(rxe
, IB_EVENT_PORT_ERR
);
618 pr_info("rxe: set %s down\n", rxe
->ib_dev
.name
);
622 static int rxe_notify(struct notifier_block
*not_blk
,
626 struct net_device
*ndev
= netdev_notifier_info_to_dev(arg
);
627 struct rxe_dev
*rxe
= net_to_rxe(ndev
);
633 case NETDEV_UNREGISTER
:
634 list_del(&rxe
->list
);
643 case NETDEV_CHANGEMTU
:
644 pr_info("rxe: %s changed mtu to %d\n", ndev
->name
, ndev
->mtu
);
645 rxe_set_mtu(rxe
, ndev
->mtu
);
649 case NETDEV_GOING_DOWN
:
650 case NETDEV_CHANGEADDR
:
651 case NETDEV_CHANGENAME
:
652 case NETDEV_FEAT_CHANGE
:
654 pr_info("rxe: ignoring netdev event = %ld for %s\n",
662 struct notifier_block rxe_net_notifier
= {
663 .notifier_call
= rxe_notify
,
666 int rxe_net_ipv4_init(void)
668 spin_lock_init(&dev_list_lock
);
670 recv_sockets
.sk4
= rxe_setup_udp_tunnel(&init_net
,
671 htons(ROCE_V2_UDP_DPORT
), false);
672 if (IS_ERR(recv_sockets
.sk4
)) {
673 recv_sockets
.sk4
= NULL
;
674 pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
681 int rxe_net_ipv6_init(void)
683 #if IS_ENABLED(CONFIG_IPV6)
685 spin_lock_init(&dev_list_lock
);
687 recv_sockets
.sk6
= rxe_setup_udp_tunnel(&init_net
,
688 htons(ROCE_V2_UDP_DPORT
), true);
689 if (IS_ERR(recv_sockets
.sk6
)) {
690 recv_sockets
.sk6
= NULL
;
691 pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
698 void rxe_net_exit(void)
700 rxe_release_udp_tunnel(recv_sockets
.sk6
);
701 rxe_release_udp_tunnel(recv_sockets
.sk4
);
702 unregister_netdevice_notifier(&rxe_net_notifier
);