2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_cm.h>
36 #include <linux/icmpv6.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/moduleparam.h>
44 int ipoib_max_conn_qp
= 128;
46 module_param_named(max_nonsrq_conn_qp
, ipoib_max_conn_qp
, int, 0444);
47 MODULE_PARM_DESC(max_nonsrq_conn_qp
,
48 "Max number of connected-mode QPs per interface "
49 "(applied only if shared receive queue is not available)");
51 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
52 static int data_debug_level
;
54 module_param_named(cm_data_debug_level
, data_debug_level
, int, 0644);
55 MODULE_PARM_DESC(cm_data_debug_level
,
56 "Enable data path debug tracing for connected mode if > 0");
59 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
61 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
62 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
63 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
64 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
66 #define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
68 static struct ib_qp_attr ipoib_cm_err_attr
= {
69 .qp_state
= IB_QPS_ERR
72 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
74 static struct ib_send_wr ipoib_cm_rx_drain_wr
= {
78 static int ipoib_cm_tx_handler(struct ib_cm_id
*cm_id
,
79 struct ib_cm_event
*event
);
81 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv
*priv
, int frags
,
82 u64 mapping
[IPOIB_CM_RX_SG
])
86 ib_dma_unmap_single(priv
->ca
, mapping
[0], IPOIB_CM_HEAD_SIZE
, DMA_FROM_DEVICE
);
88 for (i
= 0; i
< frags
; ++i
)
89 ib_dma_unmap_page(priv
->ca
, mapping
[i
+ 1], PAGE_SIZE
, DMA_FROM_DEVICE
);
92 static int ipoib_cm_post_receive_srq(struct net_device
*dev
, int id
)
94 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
95 struct ib_recv_wr
*bad_wr
;
98 priv
->cm
.rx_wr
.wr_id
= id
| IPOIB_OP_CM
| IPOIB_OP_RECV
;
100 for (i
= 0; i
< priv
->cm
.num_frags
; ++i
)
101 priv
->cm
.rx_sge
[i
].addr
= priv
->cm
.srq_ring
[id
].mapping
[i
];
103 ret
= ib_post_srq_recv(priv
->cm
.srq
, &priv
->cm
.rx_wr
, &bad_wr
);
105 ipoib_warn(priv
, "post srq failed for buf %d (%d)\n", id
, ret
);
106 ipoib_cm_dma_unmap_rx(priv
, priv
->cm
.num_frags
- 1,
107 priv
->cm
.srq_ring
[id
].mapping
);
108 dev_kfree_skb_any(priv
->cm
.srq_ring
[id
].skb
);
109 priv
->cm
.srq_ring
[id
].skb
= NULL
;
115 static int ipoib_cm_post_receive_nonsrq(struct net_device
*dev
,
116 struct ipoib_cm_rx
*rx
,
117 struct ib_recv_wr
*wr
,
118 struct ib_sge
*sge
, int id
)
120 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
121 struct ib_recv_wr
*bad_wr
;
124 wr
->wr_id
= id
| IPOIB_OP_CM
| IPOIB_OP_RECV
;
126 for (i
= 0; i
< IPOIB_CM_RX_SG
; ++i
)
127 sge
[i
].addr
= rx
->rx_ring
[id
].mapping
[i
];
129 ret
= ib_post_recv(rx
->qp
, wr
, &bad_wr
);
131 ipoib_warn(priv
, "post recv failed for buf %d (%d)\n", id
, ret
);
132 ipoib_cm_dma_unmap_rx(priv
, IPOIB_CM_RX_SG
- 1,
133 rx
->rx_ring
[id
].mapping
);
134 dev_kfree_skb_any(rx
->rx_ring
[id
].skb
);
135 rx
->rx_ring
[id
].skb
= NULL
;
141 static struct sk_buff
*ipoib_cm_alloc_rx_skb(struct net_device
*dev
,
142 struct ipoib_cm_rx_buf
*rx_ring
,
144 u64 mapping
[IPOIB_CM_RX_SG
],
147 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
151 skb
= dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE
+ IPOIB_PSEUDO_LEN
, 16));
156 * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
157 * IP header to a multiple of 16.
159 skb_reserve(skb
, IPOIB_CM_RX_RESERVE
);
161 mapping
[0] = ib_dma_map_single(priv
->ca
, skb
->data
, IPOIB_CM_HEAD_SIZE
,
163 if (unlikely(ib_dma_mapping_error(priv
->ca
, mapping
[0]))) {
164 dev_kfree_skb_any(skb
);
168 for (i
= 0; i
< frags
; i
++) {
169 struct page
*page
= alloc_page(gfp
);
173 skb_fill_page_desc(skb
, i
, page
, 0, PAGE_SIZE
);
175 mapping
[i
+ 1] = ib_dma_map_page(priv
->ca
, page
,
176 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
177 if (unlikely(ib_dma_mapping_error(priv
->ca
, mapping
[i
+ 1])))
181 rx_ring
[id
].skb
= skb
;
186 ib_dma_unmap_single(priv
->ca
, mapping
[0], IPOIB_CM_HEAD_SIZE
, DMA_FROM_DEVICE
);
189 ib_dma_unmap_page(priv
->ca
, mapping
[i
], PAGE_SIZE
, DMA_FROM_DEVICE
);
191 dev_kfree_skb_any(skb
);
195 static void ipoib_cm_free_rx_ring(struct net_device
*dev
,
196 struct ipoib_cm_rx_buf
*rx_ring
)
198 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
201 for (i
= 0; i
< ipoib_recvq_size
; ++i
)
202 if (rx_ring
[i
].skb
) {
203 ipoib_cm_dma_unmap_rx(priv
, IPOIB_CM_RX_SG
- 1,
205 dev_kfree_skb_any(rx_ring
[i
].skb
);
211 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv
*priv
)
213 struct ib_send_wr
*bad_wr
;
214 struct ipoib_cm_rx
*p
;
216 /* We only reserved 1 extra slot in CQ for drain WRs, so
217 * make sure we have at most 1 outstanding WR. */
218 if (list_empty(&priv
->cm
.rx_flush_list
) ||
219 !list_empty(&priv
->cm
.rx_drain_list
))
223 * QPs on flush list are error state. This way, a "flush
224 * error" WC will be immediately generated for each WR we post.
226 p
= list_entry(priv
->cm
.rx_flush_list
.next
, typeof(*p
), list
);
227 ipoib_cm_rx_drain_wr
.wr_id
= IPOIB_CM_RX_DRAIN_WRID
;
228 if (ib_post_send(p
->qp
, &ipoib_cm_rx_drain_wr
, &bad_wr
))
229 ipoib_warn(priv
, "failed to post drain wr\n");
231 list_splice_init(&priv
->cm
.rx_flush_list
, &priv
->cm
.rx_drain_list
);
234 static void ipoib_cm_rx_event_handler(struct ib_event
*event
, void *ctx
)
236 struct ipoib_cm_rx
*p
= ctx
;
237 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
240 if (event
->event
!= IB_EVENT_QP_LAST_WQE_REACHED
)
243 spin_lock_irqsave(&priv
->lock
, flags
);
244 list_move(&p
->list
, &priv
->cm
.rx_flush_list
);
245 p
->state
= IPOIB_CM_RX_FLUSH
;
246 ipoib_cm_start_rx_drain(priv
);
247 spin_unlock_irqrestore(&priv
->lock
, flags
);
250 static struct ib_qp
*ipoib_cm_create_rx_qp(struct net_device
*dev
,
251 struct ipoib_cm_rx
*p
)
253 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
254 struct ib_qp_init_attr attr
= {
255 .event_handler
= ipoib_cm_rx_event_handler
,
256 .send_cq
= priv
->recv_cq
, /* For drain WR */
257 .recv_cq
= priv
->recv_cq
,
259 .cap
.max_send_wr
= 1, /* For drain WR */
260 .cap
.max_send_sge
= 1, /* FIXME: 0 Seems not to work */
261 .sq_sig_type
= IB_SIGNAL_ALL_WR
,
262 .qp_type
= IB_QPT_RC
,
266 if (!ipoib_cm_has_srq(dev
)) {
267 attr
.cap
.max_recv_wr
= ipoib_recvq_size
;
268 attr
.cap
.max_recv_sge
= IPOIB_CM_RX_SG
;
271 return ib_create_qp(priv
->pd
, &attr
);
274 static int ipoib_cm_modify_rx_qp(struct net_device
*dev
,
275 struct ib_cm_id
*cm_id
, struct ib_qp
*qp
,
278 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
279 struct ib_qp_attr qp_attr
;
280 int qp_attr_mask
, ret
;
282 qp_attr
.qp_state
= IB_QPS_INIT
;
283 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
285 ipoib_warn(priv
, "failed to init QP attr for INIT: %d\n", ret
);
288 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
290 ipoib_warn(priv
, "failed to modify QP to INIT: %d\n", ret
);
293 qp_attr
.qp_state
= IB_QPS_RTR
;
294 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
296 ipoib_warn(priv
, "failed to init QP attr for RTR: %d\n", ret
);
299 qp_attr
.rq_psn
= psn
;
300 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
302 ipoib_warn(priv
, "failed to modify QP to RTR: %d\n", ret
);
307 * Current Mellanox HCA firmware won't generate completions
308 * with error for drain WRs unless the QP has been moved to
309 * RTS first. This work-around leaves a window where a QP has
310 * moved to error asynchronously, but this will eventually get
311 * fixed in firmware, so let's not error out if modify QP
314 qp_attr
.qp_state
= IB_QPS_RTS
;
315 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
317 ipoib_warn(priv
, "failed to init QP attr for RTS: %d\n", ret
);
320 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
322 ipoib_warn(priv
, "failed to modify QP to RTS: %d\n", ret
);
329 static void ipoib_cm_init_rx_wr(struct net_device
*dev
,
330 struct ib_recv_wr
*wr
,
333 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
336 for (i
= 0; i
< priv
->cm
.num_frags
; ++i
)
337 sge
[i
].lkey
= priv
->pd
->local_dma_lkey
;
339 sge
[0].length
= IPOIB_CM_HEAD_SIZE
;
340 for (i
= 1; i
< priv
->cm
.num_frags
; ++i
)
341 sge
[i
].length
= PAGE_SIZE
;
345 wr
->num_sge
= priv
->cm
.num_frags
;
348 static int ipoib_cm_nonsrq_init_rx(struct net_device
*dev
, struct ib_cm_id
*cm_id
,
349 struct ipoib_cm_rx
*rx
)
351 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
353 struct ib_recv_wr wr
;
354 struct ib_sge sge
[IPOIB_CM_RX_SG
];
359 rx
->rx_ring
= vzalloc(ipoib_recvq_size
* sizeof *rx
->rx_ring
);
363 t
= kmalloc(sizeof *t
, GFP_KERNEL
);
369 ipoib_cm_init_rx_wr(dev
, &t
->wr
, t
->sge
);
371 spin_lock_irq(&priv
->lock
);
373 if (priv
->cm
.nonsrq_conn_qp
>= ipoib_max_conn_qp
) {
374 spin_unlock_irq(&priv
->lock
);
375 ib_send_cm_rej(cm_id
, IB_CM_REJ_NO_QP
, NULL
, 0, NULL
, 0);
379 ++priv
->cm
.nonsrq_conn_qp
;
381 spin_unlock_irq(&priv
->lock
);
383 for (i
= 0; i
< ipoib_recvq_size
; ++i
) {
384 if (!ipoib_cm_alloc_rx_skb(dev
, rx
->rx_ring
, i
, IPOIB_CM_RX_SG
- 1,
385 rx
->rx_ring
[i
].mapping
,
387 ipoib_warn(priv
, "failed to allocate receive buffer %d\n", i
);
391 ret
= ipoib_cm_post_receive_nonsrq(dev
, rx
, &t
->wr
, t
->sge
, i
);
393 ipoib_warn(priv
, "ipoib_cm_post_receive_nonsrq "
394 "failed for buf %d\n", i
);
400 rx
->recv_count
= ipoib_recvq_size
;
407 spin_lock_irq(&priv
->lock
);
408 --priv
->cm
.nonsrq_conn_qp
;
409 spin_unlock_irq(&priv
->lock
);
413 ipoib_cm_free_rx_ring(dev
, rx
->rx_ring
);
418 static int ipoib_cm_send_rep(struct net_device
*dev
, struct ib_cm_id
*cm_id
,
419 struct ib_qp
*qp
, struct ib_cm_req_event_param
*req
,
422 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
423 struct ipoib_cm_data data
= {};
424 struct ib_cm_rep_param rep
= {};
426 data
.qpn
= cpu_to_be32(priv
->qp
->qp_num
);
427 data
.mtu
= cpu_to_be32(IPOIB_CM_BUF_SIZE
);
429 rep
.private_data
= &data
;
430 rep
.private_data_len
= sizeof data
;
431 rep
.flow_control
= 0;
432 rep
.rnr_retry_count
= req
->rnr_retry_count
;
433 rep
.srq
= ipoib_cm_has_srq(dev
);
434 rep
.qp_num
= qp
->qp_num
;
435 rep
.starting_psn
= psn
;
436 return ib_send_cm_rep(cm_id
, &rep
);
439 static int ipoib_cm_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
)
441 struct net_device
*dev
= cm_id
->context
;
442 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
443 struct ipoib_cm_rx
*p
;
447 ipoib_dbg(priv
, "REQ arrived\n");
448 p
= kzalloc(sizeof *p
, GFP_KERNEL
);
454 p
->state
= IPOIB_CM_RX_LIVE
;
455 p
->jiffies
= jiffies
;
456 INIT_LIST_HEAD(&p
->list
);
458 p
->qp
= ipoib_cm_create_rx_qp(dev
, p
);
460 ret
= PTR_ERR(p
->qp
);
464 psn
= prandom_u32() & 0xffffff;
465 ret
= ipoib_cm_modify_rx_qp(dev
, cm_id
, p
->qp
, psn
);
469 if (!ipoib_cm_has_srq(dev
)) {
470 ret
= ipoib_cm_nonsrq_init_rx(dev
, cm_id
, p
);
475 spin_lock_irq(&priv
->lock
);
476 queue_delayed_work(priv
->wq
,
477 &priv
->cm
.stale_task
, IPOIB_CM_RX_DELAY
);
478 /* Add this entry to passive ids list head, but do not re-add it
479 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
480 p
->jiffies
= jiffies
;
481 if (p
->state
== IPOIB_CM_RX_LIVE
)
482 list_move(&p
->list
, &priv
->cm
.passive_ids
);
483 spin_unlock_irq(&priv
->lock
);
485 ret
= ipoib_cm_send_rep(dev
, cm_id
, p
->qp
, &event
->param
.req_rcvd
, psn
);
487 ipoib_warn(priv
, "failed to send REP: %d\n", ret
);
488 if (ib_modify_qp(p
->qp
, &ipoib_cm_err_attr
, IB_QP_STATE
))
489 ipoib_warn(priv
, "unable to move qp to error state\n");
494 ib_destroy_qp(p
->qp
);
500 static int ipoib_cm_rx_handler(struct ib_cm_id
*cm_id
,
501 struct ib_cm_event
*event
)
503 struct ipoib_cm_rx
*p
;
504 struct ipoib_dev_priv
*priv
;
506 switch (event
->event
) {
507 case IB_CM_REQ_RECEIVED
:
508 return ipoib_cm_req_handler(cm_id
, event
);
509 case IB_CM_DREQ_RECEIVED
:
511 ib_send_cm_drep(cm_id
, NULL
, 0);
513 case IB_CM_REJ_RECEIVED
:
515 priv
= netdev_priv(p
->dev
);
516 if (ib_modify_qp(p
->qp
, &ipoib_cm_err_attr
, IB_QP_STATE
))
517 ipoib_warn(priv
, "unable to move qp to error state\n");
523 /* Adjust length of skb with fragments to match received data */
524 static void skb_put_frags(struct sk_buff
*skb
, unsigned int hdr_space
,
525 unsigned int length
, struct sk_buff
*toskb
)
530 /* put header into skb */
531 size
= min(length
, hdr_space
);
536 num_frags
= skb_shinfo(skb
)->nr_frags
;
537 for (i
= 0; i
< num_frags
; i
++) {
538 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
541 /* don't need this page */
542 skb_fill_page_desc(toskb
, i
, skb_frag_page(frag
),
544 --skb_shinfo(skb
)->nr_frags
;
546 size
= min(length
, (unsigned) PAGE_SIZE
);
548 skb_frag_size_set(frag
, size
);
549 skb
->data_len
+= size
;
550 skb
->truesize
+= size
;
557 void ipoib_cm_handle_rx_wc(struct net_device
*dev
, struct ib_wc
*wc
)
559 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
560 struct ipoib_cm_rx_buf
*rx_ring
;
561 unsigned int wr_id
= wc
->wr_id
& ~(IPOIB_OP_CM
| IPOIB_OP_RECV
);
562 struct sk_buff
*skb
, *newskb
;
563 struct ipoib_cm_rx
*p
;
565 u64 mapping
[IPOIB_CM_RX_SG
];
568 struct sk_buff
*small_skb
;
570 ipoib_dbg_data(priv
, "cm recv completion: id %d, status: %d\n",
573 if (unlikely(wr_id
>= ipoib_recvq_size
)) {
574 if (wr_id
== (IPOIB_CM_RX_DRAIN_WRID
& ~(IPOIB_OP_CM
| IPOIB_OP_RECV
))) {
575 spin_lock_irqsave(&priv
->lock
, flags
);
576 list_splice_init(&priv
->cm
.rx_drain_list
, &priv
->cm
.rx_reap_list
);
577 ipoib_cm_start_rx_drain(priv
);
578 queue_work(priv
->wq
, &priv
->cm
.rx_reap_task
);
579 spin_unlock_irqrestore(&priv
->lock
, flags
);
581 ipoib_warn(priv
, "cm recv completion event with wrid %d (> %d)\n",
582 wr_id
, ipoib_recvq_size
);
586 p
= wc
->qp
->qp_context
;
588 has_srq
= ipoib_cm_has_srq(dev
);
589 rx_ring
= has_srq
? priv
->cm
.srq_ring
: p
->rx_ring
;
591 skb
= rx_ring
[wr_id
].skb
;
593 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
594 ipoib_dbg(priv
, "cm recv error "
595 "(status=%d, wrid=%d vend_err %x)\n",
596 wc
->status
, wr_id
, wc
->vendor_err
);
597 ++dev
->stats
.rx_dropped
;
601 if (!--p
->recv_count
) {
602 spin_lock_irqsave(&priv
->lock
, flags
);
603 list_move(&p
->list
, &priv
->cm
.rx_reap_list
);
604 spin_unlock_irqrestore(&priv
->lock
, flags
);
605 queue_work(priv
->wq
, &priv
->cm
.rx_reap_task
);
611 if (unlikely(!(wr_id
& IPOIB_CM_RX_UPDATE_MASK
))) {
612 if (p
&& time_after_eq(jiffies
, p
->jiffies
+ IPOIB_CM_RX_UPDATE_TIME
)) {
613 spin_lock_irqsave(&priv
->lock
, flags
);
614 p
->jiffies
= jiffies
;
615 /* Move this entry to list head, but do not re-add it
616 * if it has been moved out of list. */
617 if (p
->state
== IPOIB_CM_RX_LIVE
)
618 list_move(&p
->list
, &priv
->cm
.passive_ids
);
619 spin_unlock_irqrestore(&priv
->lock
, flags
);
623 if (wc
->byte_len
< IPOIB_CM_COPYBREAK
) {
624 int dlen
= wc
->byte_len
;
626 small_skb
= dev_alloc_skb(dlen
+ IPOIB_CM_RX_RESERVE
);
628 skb_reserve(small_skb
, IPOIB_CM_RX_RESERVE
);
629 ib_dma_sync_single_for_cpu(priv
->ca
, rx_ring
[wr_id
].mapping
[0],
630 dlen
, DMA_FROM_DEVICE
);
631 skb_copy_from_linear_data(skb
, small_skb
->data
, dlen
);
632 ib_dma_sync_single_for_device(priv
->ca
, rx_ring
[wr_id
].mapping
[0],
633 dlen
, DMA_FROM_DEVICE
);
634 skb_put(small_skb
, dlen
);
640 frags
= PAGE_ALIGN(wc
->byte_len
- min(wc
->byte_len
,
641 (unsigned)IPOIB_CM_HEAD_SIZE
)) / PAGE_SIZE
;
643 newskb
= ipoib_cm_alloc_rx_skb(dev
, rx_ring
, wr_id
, frags
,
644 mapping
, GFP_ATOMIC
);
645 if (unlikely(!newskb
)) {
647 * If we can't allocate a new RX buffer, dump
648 * this packet and reuse the old buffer.
650 ipoib_dbg(priv
, "failed to allocate receive buffer %d\n", wr_id
);
651 ++dev
->stats
.rx_dropped
;
655 ipoib_cm_dma_unmap_rx(priv
, frags
, rx_ring
[wr_id
].mapping
);
656 memcpy(rx_ring
[wr_id
].mapping
, mapping
, (frags
+ 1) * sizeof *mapping
);
658 ipoib_dbg_data(priv
, "received %d bytes, SLID 0x%04x\n",
659 wc
->byte_len
, wc
->slid
);
661 skb_put_frags(skb
, IPOIB_CM_HEAD_SIZE
, wc
->byte_len
, newskb
);
664 skb
->protocol
= ((struct ipoib_header
*) skb
->data
)->proto
;
665 skb_add_pseudo_hdr(skb
);
667 ++dev
->stats
.rx_packets
;
668 dev
->stats
.rx_bytes
+= skb
->len
;
671 /* XXX get correct PACKET_ type here */
672 skb
->pkt_type
= PACKET_HOST
;
673 netif_receive_skb(skb
);
677 if (unlikely(ipoib_cm_post_receive_srq(dev
, wr_id
)))
678 ipoib_warn(priv
, "ipoib_cm_post_receive_srq failed "
679 "for buf %d\n", wr_id
);
681 if (unlikely(ipoib_cm_post_receive_nonsrq(dev
, p
,
686 ipoib_warn(priv
, "ipoib_cm_post_receive_nonsrq failed "
687 "for buf %d\n", wr_id
);
692 static inline int post_send(struct ipoib_dev_priv
*priv
,
693 struct ipoib_cm_tx
*tx
,
695 struct ipoib_tx_buf
*tx_req
)
697 struct ib_send_wr
*bad_wr
;
699 ipoib_build_sge(priv
, tx_req
);
701 priv
->tx_wr
.wr
.wr_id
= wr_id
| IPOIB_OP_CM
;
703 return ib_post_send(tx
->qp
, &priv
->tx_wr
.wr
, &bad_wr
);
706 void ipoib_cm_send(struct net_device
*dev
, struct sk_buff
*skb
, struct ipoib_cm_tx
*tx
)
708 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
709 struct ipoib_tx_buf
*tx_req
;
711 unsigned usable_sge
= tx
->max_send_sge
- !!skb_headlen(skb
);
713 if (unlikely(skb
->len
> tx
->mtu
)) {
714 ipoib_warn(priv
, "packet len %d (> %d) too long to send, dropping\n",
716 ++dev
->stats
.tx_dropped
;
717 ++dev
->stats
.tx_errors
;
718 ipoib_cm_skb_too_long(dev
, skb
, tx
->mtu
- IPOIB_ENCAP_LEN
);
721 if (skb_shinfo(skb
)->nr_frags
> usable_sge
) {
722 if (skb_linearize(skb
) < 0) {
723 ipoib_warn(priv
, "skb could not be linearized\n");
724 ++dev
->stats
.tx_dropped
;
725 ++dev
->stats
.tx_errors
;
726 dev_kfree_skb_any(skb
);
729 /* Does skb_linearize return ok without reducing nr_frags? */
730 if (skb_shinfo(skb
)->nr_frags
> usable_sge
) {
731 ipoib_warn(priv
, "too many frags after skb linearize\n");
732 ++dev
->stats
.tx_dropped
;
733 ++dev
->stats
.tx_errors
;
734 dev_kfree_skb_any(skb
);
738 ipoib_dbg_data(priv
, "sending packet: head 0x%x length %d connection 0x%x\n",
739 tx
->tx_head
, skb
->len
, tx
->qp
->qp_num
);
742 * We put the skb into the tx_ring _before_ we call post_send()
743 * because it's entirely possible that the completion handler will
744 * run before we execute anything after the post_send(). That
745 * means we have to make sure everything is properly recorded and
746 * our state is consistent before we call post_send().
748 tx_req
= &tx
->tx_ring
[tx
->tx_head
& (ipoib_sendq_size
- 1)];
751 if (unlikely(ipoib_dma_map_tx(priv
->ca
, tx_req
))) {
752 ++dev
->stats
.tx_errors
;
753 dev_kfree_skb_any(skb
);
760 rc
= post_send(priv
, tx
, tx
->tx_head
& (ipoib_sendq_size
- 1), tx_req
);
762 ipoib_warn(priv
, "post_send failed, error %d\n", rc
);
763 ++dev
->stats
.tx_errors
;
764 ipoib_dma_unmap_tx(priv
, tx_req
);
765 dev_kfree_skb_any(skb
);
767 netif_trans_update(dev
);
770 if (++priv
->tx_outstanding
== ipoib_sendq_size
) {
771 ipoib_dbg(priv
, "TX ring 0x%x full, stopping kernel net queue\n",
773 netif_stop_queue(dev
);
774 rc
= ib_req_notify_cq(priv
->send_cq
,
775 IB_CQ_NEXT_COMP
| IB_CQ_REPORT_MISSED_EVENTS
);
777 ipoib_warn(priv
, "request notify on send CQ failed\n");
779 ipoib_send_comp_handler(priv
->send_cq
, dev
);
784 void ipoib_cm_handle_tx_wc(struct net_device
*dev
, struct ib_wc
*wc
)
786 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
787 struct ipoib_cm_tx
*tx
= wc
->qp
->qp_context
;
788 unsigned int wr_id
= wc
->wr_id
& ~IPOIB_OP_CM
;
789 struct ipoib_tx_buf
*tx_req
;
792 ipoib_dbg_data(priv
, "cm send completion: id %d, status: %d\n",
795 if (unlikely(wr_id
>= ipoib_sendq_size
)) {
796 ipoib_warn(priv
, "cm send completion event with wrid %d (> %d)\n",
797 wr_id
, ipoib_sendq_size
);
801 tx_req
= &tx
->tx_ring
[wr_id
];
803 ipoib_dma_unmap_tx(priv
, tx_req
);
805 /* FIXME: is this right? Shouldn't we only increment on success? */
806 ++dev
->stats
.tx_packets
;
807 dev
->stats
.tx_bytes
+= tx_req
->skb
->len
;
809 dev_kfree_skb_any(tx_req
->skb
);
814 if (unlikely(--priv
->tx_outstanding
== ipoib_sendq_size
>> 1) &&
815 netif_queue_stopped(dev
) &&
816 test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
))
817 netif_wake_queue(dev
);
819 if (wc
->status
!= IB_WC_SUCCESS
&&
820 wc
->status
!= IB_WC_WR_FLUSH_ERR
) {
821 struct ipoib_neigh
*neigh
;
823 ipoib_dbg(priv
, "failed cm send event "
824 "(status=%d, wrid=%d vend_err %x)\n",
825 wc
->status
, wr_id
, wc
->vendor_err
);
827 spin_lock_irqsave(&priv
->lock
, flags
);
832 ipoib_neigh_free(neigh
);
837 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
)) {
838 list_move(&tx
->list
, &priv
->cm
.reap_list
);
839 queue_work(priv
->wq
, &priv
->cm
.reap_task
);
842 clear_bit(IPOIB_FLAG_OPER_UP
, &tx
->flags
);
844 spin_unlock_irqrestore(&priv
->lock
, flags
);
847 netif_tx_unlock(dev
);
850 int ipoib_cm_dev_open(struct net_device
*dev
)
852 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
855 if (!IPOIB_CM_SUPPORTED(dev
->dev_addr
))
858 priv
->cm
.id
= ib_create_cm_id(priv
->ca
, ipoib_cm_rx_handler
, dev
);
859 if (IS_ERR(priv
->cm
.id
)) {
860 printk(KERN_WARNING
"%s: failed to create CM ID\n", priv
->ca
->name
);
861 ret
= PTR_ERR(priv
->cm
.id
);
865 ret
= ib_cm_listen(priv
->cm
.id
, cpu_to_be64(IPOIB_CM_IETF_ID
| priv
->qp
->qp_num
),
868 printk(KERN_WARNING
"%s: failed to listen on ID 0x%llx\n", priv
->ca
->name
,
869 IPOIB_CM_IETF_ID
| priv
->qp
->qp_num
);
876 ib_destroy_cm_id(priv
->cm
.id
);
882 static void ipoib_cm_free_rx_reap_list(struct net_device
*dev
)
884 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
885 struct ipoib_cm_rx
*rx
, *n
;
888 spin_lock_irq(&priv
->lock
);
889 list_splice_init(&priv
->cm
.rx_reap_list
, &list
);
890 spin_unlock_irq(&priv
->lock
);
892 list_for_each_entry_safe(rx
, n
, &list
, list
) {
893 ib_destroy_cm_id(rx
->id
);
894 ib_destroy_qp(rx
->qp
);
895 if (!ipoib_cm_has_srq(dev
)) {
896 ipoib_cm_free_rx_ring(priv
->dev
, rx
->rx_ring
);
897 spin_lock_irq(&priv
->lock
);
898 --priv
->cm
.nonsrq_conn_qp
;
899 spin_unlock_irq(&priv
->lock
);
905 void ipoib_cm_dev_stop(struct net_device
*dev
)
907 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
908 struct ipoib_cm_rx
*p
;
912 if (!IPOIB_CM_SUPPORTED(dev
->dev_addr
) || !priv
->cm
.id
)
915 ib_destroy_cm_id(priv
->cm
.id
);
918 spin_lock_irq(&priv
->lock
);
919 while (!list_empty(&priv
->cm
.passive_ids
)) {
920 p
= list_entry(priv
->cm
.passive_ids
.next
, typeof(*p
), list
);
921 list_move(&p
->list
, &priv
->cm
.rx_error_list
);
922 p
->state
= IPOIB_CM_RX_ERROR
;
923 spin_unlock_irq(&priv
->lock
);
924 ret
= ib_modify_qp(p
->qp
, &ipoib_cm_err_attr
, IB_QP_STATE
);
926 ipoib_warn(priv
, "unable to move qp to error state: %d\n", ret
);
927 spin_lock_irq(&priv
->lock
);
930 /* Wait for all RX to be drained */
933 while (!list_empty(&priv
->cm
.rx_error_list
) ||
934 !list_empty(&priv
->cm
.rx_flush_list
) ||
935 !list_empty(&priv
->cm
.rx_drain_list
)) {
936 if (time_after(jiffies
, begin
+ 5 * HZ
)) {
937 ipoib_warn(priv
, "RX drain timing out\n");
940 * assume the HW is wedged and just free up everything.
942 list_splice_init(&priv
->cm
.rx_flush_list
,
943 &priv
->cm
.rx_reap_list
);
944 list_splice_init(&priv
->cm
.rx_error_list
,
945 &priv
->cm
.rx_reap_list
);
946 list_splice_init(&priv
->cm
.rx_drain_list
,
947 &priv
->cm
.rx_reap_list
);
950 spin_unlock_irq(&priv
->lock
);
953 spin_lock_irq(&priv
->lock
);
956 spin_unlock_irq(&priv
->lock
);
958 ipoib_cm_free_rx_reap_list(dev
);
960 cancel_delayed_work(&priv
->cm
.stale_task
);
963 static int ipoib_cm_rep_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
)
965 struct ipoib_cm_tx
*p
= cm_id
->context
;
966 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
967 struct ipoib_cm_data
*data
= event
->private_data
;
968 struct sk_buff_head skqueue
;
969 struct ib_qp_attr qp_attr
;
970 int qp_attr_mask
, ret
;
973 p
->mtu
= be32_to_cpu(data
->mtu
);
975 if (p
->mtu
<= IPOIB_ENCAP_LEN
) {
976 ipoib_warn(priv
, "Rejecting connection: mtu %d <= %d\n",
977 p
->mtu
, IPOIB_ENCAP_LEN
);
981 qp_attr
.qp_state
= IB_QPS_RTR
;
982 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
984 ipoib_warn(priv
, "failed to init QP attr for RTR: %d\n", ret
);
988 qp_attr
.rq_psn
= 0 /* FIXME */;
989 ret
= ib_modify_qp(p
->qp
, &qp_attr
, qp_attr_mask
);
991 ipoib_warn(priv
, "failed to modify QP to RTR: %d\n", ret
);
995 qp_attr
.qp_state
= IB_QPS_RTS
;
996 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
998 ipoib_warn(priv
, "failed to init QP attr for RTS: %d\n", ret
);
1001 ret
= ib_modify_qp(p
->qp
, &qp_attr
, qp_attr_mask
);
1003 ipoib_warn(priv
, "failed to modify QP to RTS: %d\n", ret
);
1007 skb_queue_head_init(&skqueue
);
1009 spin_lock_irq(&priv
->lock
);
1010 set_bit(IPOIB_FLAG_OPER_UP
, &p
->flags
);
1012 while ((skb
= __skb_dequeue(&p
->neigh
->queue
)))
1013 __skb_queue_tail(&skqueue
, skb
);
1014 spin_unlock_irq(&priv
->lock
);
1016 while ((skb
= __skb_dequeue(&skqueue
))) {
1018 if (dev_queue_xmit(skb
))
1019 ipoib_warn(priv
, "dev_queue_xmit failed "
1020 "to requeue packet\n");
1023 ret
= ib_send_cm_rtu(cm_id
, NULL
, 0);
1025 ipoib_warn(priv
, "failed to send RTU: %d\n", ret
);
1031 static struct ib_qp
*ipoib_cm_create_tx_qp(struct net_device
*dev
, struct ipoib_cm_tx
*tx
)
1033 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1034 struct ib_qp_init_attr attr
= {
1035 .send_cq
= priv
->recv_cq
,
1036 .recv_cq
= priv
->recv_cq
,
1037 .srq
= priv
->cm
.srq
,
1038 .cap
.max_send_wr
= ipoib_sendq_size
,
1039 .cap
.max_send_sge
= 1,
1040 .sq_sig_type
= IB_SIGNAL_ALL_WR
,
1041 .qp_type
= IB_QPT_RC
,
1043 .create_flags
= IB_QP_CREATE_USE_GFP_NOIO
1046 struct ib_qp
*tx_qp
;
1048 if (dev
->features
& NETIF_F_SG
)
1049 attr
.cap
.max_send_sge
=
1050 min_t(u32
, priv
->ca
->attrs
.max_sge
, MAX_SKB_FRAGS
+ 1);
1052 tx_qp
= ib_create_qp(priv
->pd
, &attr
);
1053 if (PTR_ERR(tx_qp
) == -EINVAL
) {
1054 attr
.create_flags
&= ~IB_QP_CREATE_USE_GFP_NOIO
;
1055 tx_qp
= ib_create_qp(priv
->pd
, &attr
);
1057 tx
->max_send_sge
= attr
.cap
.max_send_sge
;
1061 static int ipoib_cm_send_req(struct net_device
*dev
,
1062 struct ib_cm_id
*id
, struct ib_qp
*qp
,
1064 struct ib_sa_path_rec
*pathrec
)
1066 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1067 struct ipoib_cm_data data
= {};
1068 struct ib_cm_req_param req
= {};
1070 data
.qpn
= cpu_to_be32(priv
->qp
->qp_num
);
1071 data
.mtu
= cpu_to_be32(IPOIB_CM_BUF_SIZE
);
1073 req
.primary_path
= pathrec
;
1074 req
.alternate_path
= NULL
;
1075 req
.service_id
= cpu_to_be64(IPOIB_CM_IETF_ID
| qpn
);
1076 req
.qp_num
= qp
->qp_num
;
1077 req
.qp_type
= qp
->qp_type
;
1078 req
.private_data
= &data
;
1079 req
.private_data_len
= sizeof data
;
1080 req
.flow_control
= 0;
1082 req
.starting_psn
= 0; /* FIXME */
1085 * Pick some arbitrary defaults here; we could make these
1086 * module parameters if anyone cared about setting them.
1088 req
.responder_resources
= 4;
1089 req
.remote_cm_response_timeout
= 20;
1090 req
.local_cm_response_timeout
= 20;
1091 req
.retry_count
= 0; /* RFC draft warns against retries */
1092 req
.rnr_retry_count
= 0; /* RFC draft warns against retries */
1093 req
.max_cm_retries
= 15;
1094 req
.srq
= ipoib_cm_has_srq(dev
);
1095 return ib_send_cm_req(id
, &req
);
1098 static int ipoib_cm_modify_tx_init(struct net_device
*dev
,
1099 struct ib_cm_id
*cm_id
, struct ib_qp
*qp
)
1101 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1102 struct ib_qp_attr qp_attr
;
1103 int qp_attr_mask
, ret
;
1104 ret
= ib_find_pkey(priv
->ca
, priv
->port
, priv
->pkey
, &qp_attr
.pkey_index
);
1106 ipoib_warn(priv
, "pkey 0x%x not found: %d\n", priv
->pkey
, ret
);
1110 qp_attr
.qp_state
= IB_QPS_INIT
;
1111 qp_attr
.qp_access_flags
= IB_ACCESS_LOCAL_WRITE
;
1112 qp_attr
.port_num
= priv
->port
;
1113 qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
1115 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
1117 ipoib_warn(priv
, "failed to modify tx QP to INIT: %d\n", ret
);
1123 static int ipoib_cm_tx_init(struct ipoib_cm_tx
*p
, u32 qpn
,
1124 struct ib_sa_path_rec
*pathrec
)
1126 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
1129 p
->tx_ring
= __vmalloc(ipoib_sendq_size
* sizeof *p
->tx_ring
,
1130 GFP_NOIO
, PAGE_KERNEL
);
1135 memset(p
->tx_ring
, 0, ipoib_sendq_size
* sizeof *p
->tx_ring
);
1137 p
->qp
= ipoib_cm_create_tx_qp(p
->dev
, p
);
1138 if (IS_ERR(p
->qp
)) {
1139 ret
= PTR_ERR(p
->qp
);
1140 ipoib_warn(priv
, "failed to allocate tx qp: %d\n", ret
);
1144 p
->id
= ib_create_cm_id(priv
->ca
, ipoib_cm_tx_handler
, p
);
1145 if (IS_ERR(p
->id
)) {
1146 ret
= PTR_ERR(p
->id
);
1147 ipoib_warn(priv
, "failed to create tx cm id: %d\n", ret
);
1151 ret
= ipoib_cm_modify_tx_init(p
->dev
, p
->id
, p
->qp
);
1153 ipoib_warn(priv
, "failed to modify tx qp to rtr: %d\n", ret
);
1157 ret
= ipoib_cm_send_req(p
->dev
, p
->id
, p
->qp
, qpn
, pathrec
);
1159 ipoib_warn(priv
, "failed to send cm req: %d\n", ret
);
1163 ipoib_dbg(priv
, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
1164 p
->qp
->qp_num
, pathrec
->dgid
.raw
, qpn
);
1170 ib_destroy_cm_id(p
->id
);
1173 ib_destroy_qp(p
->qp
);
1181 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx
*p
)
1183 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
1184 struct ipoib_tx_buf
*tx_req
;
1185 unsigned long begin
;
1187 ipoib_dbg(priv
, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1188 p
->qp
? p
->qp
->qp_num
: 0, p
->tx_head
, p
->tx_tail
);
1191 ib_destroy_cm_id(p
->id
);
1194 /* Wait for all sends to complete */
1196 while ((int) p
->tx_tail
- (int) p
->tx_head
< 0) {
1197 if (time_after(jiffies
, begin
+ 5 * HZ
)) {
1198 ipoib_warn(priv
, "timing out; %d sends not completed\n",
1199 p
->tx_head
- p
->tx_tail
);
1209 while ((int) p
->tx_tail
- (int) p
->tx_head
< 0) {
1210 tx_req
= &p
->tx_ring
[p
->tx_tail
& (ipoib_sendq_size
- 1)];
1211 ipoib_dma_unmap_tx(priv
, tx_req
);
1212 dev_kfree_skb_any(tx_req
->skb
);
1214 netif_tx_lock_bh(p
->dev
);
1215 if (unlikely(--priv
->tx_outstanding
== ipoib_sendq_size
>> 1) &&
1216 netif_queue_stopped(p
->dev
) &&
1217 test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
))
1218 netif_wake_queue(p
->dev
);
1219 netif_tx_unlock_bh(p
->dev
);
1223 ib_destroy_qp(p
->qp
);
1229 static int ipoib_cm_tx_handler(struct ib_cm_id
*cm_id
,
1230 struct ib_cm_event
*event
)
1232 struct ipoib_cm_tx
*tx
= cm_id
->context
;
1233 struct ipoib_dev_priv
*priv
= netdev_priv(tx
->dev
);
1234 struct net_device
*dev
= priv
->dev
;
1235 struct ipoib_neigh
*neigh
;
1236 unsigned long flags
;
1239 switch (event
->event
) {
1240 case IB_CM_DREQ_RECEIVED
:
1241 ipoib_dbg(priv
, "DREQ received.\n");
1242 ib_send_cm_drep(cm_id
, NULL
, 0);
1244 case IB_CM_REP_RECEIVED
:
1245 ipoib_dbg(priv
, "REP received.\n");
1246 ret
= ipoib_cm_rep_handler(cm_id
, event
);
1248 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
1251 case IB_CM_REQ_ERROR
:
1252 case IB_CM_REJ_RECEIVED
:
1253 case IB_CM_TIMEWAIT_EXIT
:
1254 ipoib_dbg(priv
, "CM error %d.\n", event
->event
);
1255 netif_tx_lock_bh(dev
);
1256 spin_lock_irqsave(&priv
->lock
, flags
);
1261 ipoib_neigh_free(neigh
);
1266 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
)) {
1267 list_move(&tx
->list
, &priv
->cm
.reap_list
);
1268 queue_work(priv
->wq
, &priv
->cm
.reap_task
);
1271 spin_unlock_irqrestore(&priv
->lock
, flags
);
1272 netif_tx_unlock_bh(dev
);
1281 struct ipoib_cm_tx
*ipoib_cm_create_tx(struct net_device
*dev
, struct ipoib_path
*path
,
1282 struct ipoib_neigh
*neigh
)
1284 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1285 struct ipoib_cm_tx
*tx
;
1287 tx
= kzalloc(sizeof *tx
, GFP_ATOMIC
);
1295 list_add(&tx
->list
, &priv
->cm
.start_list
);
1296 set_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
);
1297 queue_work(priv
->wq
, &priv
->cm
.start_task
);
1301 void ipoib_cm_destroy_tx(struct ipoib_cm_tx
*tx
)
1303 struct ipoib_dev_priv
*priv
= netdev_priv(tx
->dev
);
1304 unsigned long flags
;
1305 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
)) {
1306 spin_lock_irqsave(&priv
->lock
, flags
);
1307 list_move(&tx
->list
, &priv
->cm
.reap_list
);
1308 queue_work(priv
->wq
, &priv
->cm
.reap_task
);
1309 ipoib_dbg(priv
, "Reap connection for gid %pI6\n",
1310 tx
->neigh
->daddr
+ 4);
1312 spin_unlock_irqrestore(&priv
->lock
, flags
);
1316 #define QPN_AND_OPTIONS_OFFSET 4
1318 static void ipoib_cm_tx_start(struct work_struct
*work
)
1320 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1322 struct net_device
*dev
= priv
->dev
;
1323 struct ipoib_neigh
*neigh
;
1324 struct ipoib_cm_tx
*p
;
1325 unsigned long flags
;
1326 struct ipoib_path
*path
;
1329 struct ib_sa_path_rec pathrec
;
1332 netif_tx_lock_bh(dev
);
1333 spin_lock_irqsave(&priv
->lock
, flags
);
1335 while (!list_empty(&priv
->cm
.start_list
)) {
1336 p
= list_entry(priv
->cm
.start_list
.next
, typeof(*p
), list
);
1337 list_del_init(&p
->list
);
1340 qpn
= IPOIB_QPN(neigh
->daddr
);
1342 * As long as the search is with these 2 locks,
1343 * path existence indicates its validity.
1345 path
= __path_find(dev
, neigh
->daddr
+ QPN_AND_OPTIONS_OFFSET
);
1347 pr_info("%s ignore not valid path %pI6\n",
1349 neigh
->daddr
+ QPN_AND_OPTIONS_OFFSET
);
1352 memcpy(&pathrec
, &p
->path
->pathrec
, sizeof pathrec
);
1354 spin_unlock_irqrestore(&priv
->lock
, flags
);
1355 netif_tx_unlock_bh(dev
);
1357 ret
= ipoib_cm_tx_init(p
, qpn
, &pathrec
);
1359 netif_tx_lock_bh(dev
);
1360 spin_lock_irqsave(&priv
->lock
, flags
);
1367 ipoib_neigh_free(neigh
);
1374 spin_unlock_irqrestore(&priv
->lock
, flags
);
1375 netif_tx_unlock_bh(dev
);
1378 static void ipoib_cm_tx_reap(struct work_struct
*work
)
1380 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1382 struct net_device
*dev
= priv
->dev
;
1383 struct ipoib_cm_tx
*p
;
1384 unsigned long flags
;
1386 netif_tx_lock_bh(dev
);
1387 spin_lock_irqsave(&priv
->lock
, flags
);
1389 while (!list_empty(&priv
->cm
.reap_list
)) {
1390 p
= list_entry(priv
->cm
.reap_list
.next
, typeof(*p
), list
);
1392 spin_unlock_irqrestore(&priv
->lock
, flags
);
1393 netif_tx_unlock_bh(dev
);
1394 ipoib_cm_tx_destroy(p
);
1395 netif_tx_lock_bh(dev
);
1396 spin_lock_irqsave(&priv
->lock
, flags
);
1399 spin_unlock_irqrestore(&priv
->lock
, flags
);
1400 netif_tx_unlock_bh(dev
);
1403 static void ipoib_cm_skb_reap(struct work_struct
*work
)
1405 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1407 struct net_device
*dev
= priv
->dev
;
1408 struct sk_buff
*skb
;
1409 unsigned long flags
;
1410 unsigned mtu
= priv
->mcast_mtu
;
1412 netif_tx_lock_bh(dev
);
1413 spin_lock_irqsave(&priv
->lock
, flags
);
1415 while ((skb
= skb_dequeue(&priv
->cm
.skb_queue
))) {
1416 spin_unlock_irqrestore(&priv
->lock
, flags
);
1417 netif_tx_unlock_bh(dev
);
1419 if (skb
->protocol
== htons(ETH_P_IP
))
1420 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
1421 #if IS_ENABLED(CONFIG_IPV6)
1422 else if (skb
->protocol
== htons(ETH_P_IPV6
))
1423 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
1425 dev_kfree_skb_any(skb
);
1427 netif_tx_lock_bh(dev
);
1428 spin_lock_irqsave(&priv
->lock
, flags
);
1431 spin_unlock_irqrestore(&priv
->lock
, flags
);
1432 netif_tx_unlock_bh(dev
);
1435 void ipoib_cm_skb_too_long(struct net_device
*dev
, struct sk_buff
*skb
,
1438 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1439 int e
= skb_queue_empty(&priv
->cm
.skb_queue
);
1442 skb_dst(skb
)->ops
->update_pmtu(skb_dst(skb
), NULL
, skb
, mtu
);
1444 skb_queue_tail(&priv
->cm
.skb_queue
, skb
);
1446 queue_work(priv
->wq
, &priv
->cm
.skb_task
);
1449 static void ipoib_cm_rx_reap(struct work_struct
*work
)
1451 ipoib_cm_free_rx_reap_list(container_of(work
, struct ipoib_dev_priv
,
1452 cm
.rx_reap_task
)->dev
);
1455 static void ipoib_cm_stale_task(struct work_struct
*work
)
1457 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1458 cm
.stale_task
.work
);
1459 struct ipoib_cm_rx
*p
;
1462 spin_lock_irq(&priv
->lock
);
1463 while (!list_empty(&priv
->cm
.passive_ids
)) {
1464 /* List is sorted by LRU, start from tail,
1465 * stop when we see a recently used entry */
1466 p
= list_entry(priv
->cm
.passive_ids
.prev
, typeof(*p
), list
);
1467 if (time_before_eq(jiffies
, p
->jiffies
+ IPOIB_CM_RX_TIMEOUT
))
1469 list_move(&p
->list
, &priv
->cm
.rx_error_list
);
1470 p
->state
= IPOIB_CM_RX_ERROR
;
1471 spin_unlock_irq(&priv
->lock
);
1472 ret
= ib_modify_qp(p
->qp
, &ipoib_cm_err_attr
, IB_QP_STATE
);
1474 ipoib_warn(priv
, "unable to move qp to error state: %d\n", ret
);
1475 spin_lock_irq(&priv
->lock
);
1478 if (!list_empty(&priv
->cm
.passive_ids
))
1479 queue_delayed_work(priv
->wq
,
1480 &priv
->cm
.stale_task
, IPOIB_CM_RX_DELAY
);
1481 spin_unlock_irq(&priv
->lock
);
1484 static ssize_t
show_mode(struct device
*d
, struct device_attribute
*attr
,
1487 struct ipoib_dev_priv
*priv
= netdev_priv(to_net_dev(d
));
1489 if (test_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
))
1490 return sprintf(buf
, "connected\n");
1492 return sprintf(buf
, "datagram\n");
1495 static ssize_t
set_mode(struct device
*d
, struct device_attribute
*attr
,
1496 const char *buf
, size_t count
)
1498 struct net_device
*dev
= to_net_dev(d
);
1500 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1502 if (test_bit(IPOIB_FLAG_GOING_DOWN
, &priv
->flags
))
1505 if (!rtnl_trylock())
1506 return restart_syscall();
1508 ret
= ipoib_set_mode(dev
, buf
);
1518 static DEVICE_ATTR(mode
, S_IWUSR
| S_IRUGO
, show_mode
, set_mode
);
1520 int ipoib_cm_add_mode_attr(struct net_device
*dev
)
1522 return device_create_file(&dev
->dev
, &dev_attr_mode
);
1525 static void ipoib_cm_create_srq(struct net_device
*dev
, int max_sge
)
1527 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1528 struct ib_srq_init_attr srq_init_attr
= {
1529 .srq_type
= IB_SRQT_BASIC
,
1531 .max_wr
= ipoib_recvq_size
,
1536 priv
->cm
.srq
= ib_create_srq(priv
->pd
, &srq_init_attr
);
1537 if (IS_ERR(priv
->cm
.srq
)) {
1538 if (PTR_ERR(priv
->cm
.srq
) != -ENOSYS
)
1539 printk(KERN_WARNING
"%s: failed to allocate SRQ, error %ld\n",
1540 priv
->ca
->name
, PTR_ERR(priv
->cm
.srq
));
1541 priv
->cm
.srq
= NULL
;
1545 priv
->cm
.srq_ring
= vzalloc(ipoib_recvq_size
* sizeof *priv
->cm
.srq_ring
);
1546 if (!priv
->cm
.srq_ring
) {
1547 ib_destroy_srq(priv
->cm
.srq
);
1548 priv
->cm
.srq
= NULL
;
1554 int ipoib_cm_dev_init(struct net_device
*dev
)
1556 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1559 INIT_LIST_HEAD(&priv
->cm
.passive_ids
);
1560 INIT_LIST_HEAD(&priv
->cm
.reap_list
);
1561 INIT_LIST_HEAD(&priv
->cm
.start_list
);
1562 INIT_LIST_HEAD(&priv
->cm
.rx_error_list
);
1563 INIT_LIST_HEAD(&priv
->cm
.rx_flush_list
);
1564 INIT_LIST_HEAD(&priv
->cm
.rx_drain_list
);
1565 INIT_LIST_HEAD(&priv
->cm
.rx_reap_list
);
1566 INIT_WORK(&priv
->cm
.start_task
, ipoib_cm_tx_start
);
1567 INIT_WORK(&priv
->cm
.reap_task
, ipoib_cm_tx_reap
);
1568 INIT_WORK(&priv
->cm
.skb_task
, ipoib_cm_skb_reap
);
1569 INIT_WORK(&priv
->cm
.rx_reap_task
, ipoib_cm_rx_reap
);
1570 INIT_DELAYED_WORK(&priv
->cm
.stale_task
, ipoib_cm_stale_task
);
1572 skb_queue_head_init(&priv
->cm
.skb_queue
);
1574 ipoib_dbg(priv
, "max_srq_sge=%d\n", priv
->ca
->attrs
.max_srq_sge
);
1576 max_srq_sge
= min_t(int, IPOIB_CM_RX_SG
, priv
->ca
->attrs
.max_srq_sge
);
1577 ipoib_cm_create_srq(dev
, max_srq_sge
);
1578 if (ipoib_cm_has_srq(dev
)) {
1579 priv
->cm
.max_cm_mtu
= max_srq_sge
* PAGE_SIZE
- 0x10;
1580 priv
->cm
.num_frags
= max_srq_sge
;
1581 ipoib_dbg(priv
, "max_cm_mtu = 0x%x, num_frags=%d\n",
1582 priv
->cm
.max_cm_mtu
, priv
->cm
.num_frags
);
1584 priv
->cm
.max_cm_mtu
= IPOIB_CM_MTU
;
1585 priv
->cm
.num_frags
= IPOIB_CM_RX_SG
;
1588 ipoib_cm_init_rx_wr(dev
, &priv
->cm
.rx_wr
, priv
->cm
.rx_sge
);
1590 if (ipoib_cm_has_srq(dev
)) {
1591 for (i
= 0; i
< ipoib_recvq_size
; ++i
) {
1592 if (!ipoib_cm_alloc_rx_skb(dev
, priv
->cm
.srq_ring
, i
,
1593 priv
->cm
.num_frags
- 1,
1594 priv
->cm
.srq_ring
[i
].mapping
,
1596 ipoib_warn(priv
, "failed to allocate "
1597 "receive buffer %d\n", i
);
1598 ipoib_cm_dev_cleanup(dev
);
1602 if (ipoib_cm_post_receive_srq(dev
, i
)) {
1603 ipoib_warn(priv
, "ipoib_cm_post_receive_srq "
1604 "failed for buf %d\n", i
);
1605 ipoib_cm_dev_cleanup(dev
);
1611 priv
->dev
->dev_addr
[0] = IPOIB_FLAGS_RC
;
1615 void ipoib_cm_dev_cleanup(struct net_device
*dev
)
1617 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1623 ipoib_dbg(priv
, "Cleanup ipoib connected mode.\n");
1625 ret
= ib_destroy_srq(priv
->cm
.srq
);
1627 ipoib_warn(priv
, "ib_destroy_srq failed: %d\n", ret
);
1629 priv
->cm
.srq
= NULL
;
1630 if (!priv
->cm
.srq_ring
)
1633 ipoib_cm_free_rx_ring(dev
, priv
->cm
.srq_ring
);
1634 priv
->cm
.srq_ring
= NULL
;