]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/ulp/ipoib/ipoib_cm.c
IB/CM: Add braces when using sizeof
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / ulp / ipoib / ipoib_cm.c
CommitLineData
839fcaba
MT
1/*
2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
839fcaba
MT
31 */
32
33#include <rdma/ib_cm.h>
839fcaba
MT
34#include <net/dst.h>
35#include <net/icmp.h>
36#include <linux/icmpv6.h>
518b1646 37#include <linux/delay.h>
5a0e3ad6 38#include <linux/slab.h>
10313cbb 39#include <linux/vmalloc.h>
fec14d2f 40#include <linux/moduleparam.h>
174cd4b1 41#include <linux/sched/signal.h>
839fcaba 42
68e995a2
PS
43#include "ipoib.h"
44
45int ipoib_max_conn_qp = 128;
46
47module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
48MODULE_PARM_DESC(max_nonsrq_conn_qp,
49 "Max number of connected-mode QPs per interface "
50 "(applied only if shared receive queue is not available)");
51
839fcaba
MT
52#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
53static int data_debug_level;
54
55module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
56MODULE_PARM_DESC(cm_data_debug_level,
57 "Enable data path debug tracing for connected mode if > 0");
58#endif
59
839fcaba
MT
60#define IPOIB_CM_IETF_ID 0x1000000000000000ULL
61
62#define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
63#define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
64#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
65#define IPOIB_CM_RX_UPDATE_MASK (0x3)
66
fc791b63
PA
67#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
68
518b1646
MT
69static struct ib_qp_attr ipoib_cm_err_attr = {
70 .qp_state = IB_QPS_ERR
71};
72
09f60f8f 73#define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
518b1646 74
ec56dc0b 75static struct ib_send_wr ipoib_cm_rx_drain_wr = {
ec56dc0b 76 .opcode = IB_WR_SEND,
518b1646
MT
77};
78
839fcaba
MT
79static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
80 struct ib_cm_event *event);
81
1812063b 82static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
839fcaba
MT
83 u64 mapping[IPOIB_CM_RX_SG])
84{
85 int i;
86
87 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
88
1812063b 89 for (i = 0; i < frags; ++i)
787adb9d 90 ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
839fcaba
MT
91}
92
68e995a2 93static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
839fcaba 94{
c1048aff 95 struct ipoib_dev_priv *priv = ipoib_priv(dev);
839fcaba
MT
96 struct ib_recv_wr *bad_wr;
97 int i, ret;
98
1b524963 99 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
839fcaba 100
586a6934 101 for (i = 0; i < priv->cm.num_frags; ++i)
839fcaba
MT
102 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
103
104 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
105 if (unlikely(ret)) {
106 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
586a6934 107 ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
1812063b 108 priv->cm.srq_ring[id].mapping);
839fcaba
MT
109 dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
110 priv->cm.srq_ring[id].skb = NULL;
111 }
112
113 return ret;
114}
115
68e995a2 116static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
a7d834c4
RD
117 struct ipoib_cm_rx *rx,
118 struct ib_recv_wr *wr,
119 struct ib_sge *sge, int id)
68e995a2 120{
c1048aff 121 struct ipoib_dev_priv *priv = ipoib_priv(dev);
68e995a2
PS
122 struct ib_recv_wr *bad_wr;
123 int i, ret;
124
a7d834c4 125 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
68e995a2
PS
126
127 for (i = 0; i < IPOIB_CM_RX_SG; ++i)
a7d834c4 128 sge[i].addr = rx->rx_ring[id].mapping[i];
68e995a2 129
a7d834c4 130 ret = ib_post_recv(rx->qp, wr, &bad_wr);
68e995a2
PS
131 if (unlikely(ret)) {
132 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
133 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
134 rx->rx_ring[id].mapping);
135 dev_kfree_skb_any(rx->rx_ring[id].skb);
136 rx->rx_ring[id].skb = NULL;
137 }
138
139 return ret;
140}
141
142static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
143 struct ipoib_cm_rx_buf *rx_ring,
144 int id, int frags,
22252b4e
TA
145 u64 mapping[IPOIB_CM_RX_SG],
146 gfp_t gfp)
839fcaba 147{
c1048aff 148 struct ipoib_dev_priv *priv = ipoib_priv(dev);
839fcaba
MT
149 struct sk_buff *skb;
150 int i;
151
fc791b63 152 skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
839fcaba 153 if (unlikely(!skb))
1812063b 154 return NULL;
839fcaba
MT
155
156 /*
fc791b63 157 * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
839fcaba
MT
158 * IP header to a multiple of 16.
159 */
fc791b63 160 skb_reserve(skb, IPOIB_CM_RX_RESERVE);
839fcaba
MT
161
162 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
163 DMA_FROM_DEVICE);
164 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
165 dev_kfree_skb_any(skb);
1812063b 166 return NULL;
839fcaba
MT
167 }
168
1812063b 169 for (i = 0; i < frags; i++) {
22252b4e 170 struct page *page = alloc_page(gfp);
839fcaba
MT
171
172 if (!page)
173 goto partial_error;
174 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
175
5581be3b 176 mapping[i + 1] = ib_dma_map_page(priv->ca, page,
6371ea3d 177 0, PAGE_SIZE, DMA_FROM_DEVICE);
839fcaba
MT
178 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
179 goto partial_error;
180 }
181
68e995a2 182 rx_ring[id].skb = skb;
1812063b 183 return skb;
839fcaba
MT
184
185partial_error:
186
187 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
188
841adfca 189 for (; i > 0; --i)
787adb9d 190 ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
839fcaba 191
8a2e65f8 192 dev_kfree_skb_any(skb);
1812063b 193 return NULL;
839fcaba
MT
194}
195
1efb6144
RD
196static void ipoib_cm_free_rx_ring(struct net_device *dev,
197 struct ipoib_cm_rx_buf *rx_ring)
198{
c1048aff 199 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1efb6144
RD
200 int i;
201
202 for (i = 0; i < ipoib_recvq_size; ++i)
203 if (rx_ring[i].skb) {
204 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
205 rx_ring[i].mapping);
206 dev_kfree_skb_any(rx_ring[i].skb);
207 }
208
b1404069 209 vfree(rx_ring);
1efb6144
RD
210}
211
2337f809 212static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
518b1646 213{
ec56dc0b
MT
214 struct ib_send_wr *bad_wr;
215 struct ipoib_cm_rx *p;
518b1646 216
ec56dc0b 217 /* We only reserved 1 extra slot in CQ for drain WRs, so
518b1646
MT
218 * make sure we have at most 1 outstanding WR. */
219 if (list_empty(&priv->cm.rx_flush_list) ||
220 !list_empty(&priv->cm.rx_drain_list))
221 return;
222
ec56dc0b
MT
223 /*
224 * QPs on flush list are error state. This way, a "flush
225 * error" WC will be immediately generated for each WR we post.
226 */
227 p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
14d3a3b2 228 ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID;
ec56dc0b
MT
229 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
230 ipoib_warn(priv, "failed to post drain wr\n");
518b1646
MT
231
232 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
233}
234
235static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
236{
237 struct ipoib_cm_rx *p = ctx;
c1048aff 238 struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
518b1646
MT
239 unsigned long flags;
240
241 if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
242 return;
243
244 spin_lock_irqsave(&priv->lock, flags);
245 list_move(&p->list, &priv->cm.rx_flush_list);
246 p->state = IPOIB_CM_RX_FLUSH;
247 ipoib_cm_start_rx_drain(priv);
248 spin_unlock_irqrestore(&priv->lock, flags);
249}
250
839fcaba
MT
251static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
252 struct ipoib_cm_rx *p)
253{
c1048aff 254 struct ipoib_dev_priv *priv = ipoib_priv(dev);
839fcaba 255 struct ib_qp_init_attr attr = {
518b1646 256 .event_handler = ipoib_cm_rx_event_handler,
f56bcd80
EC
257 .send_cq = priv->recv_cq, /* For drain WR */
258 .recv_cq = priv->recv_cq,
839fcaba 259 .srq = priv->cm.srq,
ec56dc0b 260 .cap.max_send_wr = 1, /* For drain WR */
839fcaba
MT
261 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
262 .sq_sig_type = IB_SIGNAL_ALL_WR,
263 .qp_type = IB_QPT_RC,
264 .qp_context = p,
265 };
68e995a2
PS
266
267 if (!ipoib_cm_has_srq(dev)) {
268 attr.cap.max_recv_wr = ipoib_recvq_size;
269 attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
270 }
271
839fcaba
MT
272 return ib_create_qp(priv->pd, &attr);
273}
274
275static int ipoib_cm_modify_rx_qp(struct net_device *dev,
68e995a2
PS
276 struct ib_cm_id *cm_id, struct ib_qp *qp,
277 unsigned psn)
839fcaba 278{
c1048aff 279 struct ipoib_dev_priv *priv = ipoib_priv(dev);
839fcaba
MT
280 struct ib_qp_attr qp_attr;
281 int qp_attr_mask, ret;
282
283 qp_attr.qp_state = IB_QPS_INIT;
284 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
285 if (ret) {
286 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
287 return ret;
288 }
289 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
290 if (ret) {
291 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
292 return ret;
293 }
294 qp_attr.qp_state = IB_QPS_RTR;
295 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
296 if (ret) {
297 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
298 return ret;
299 }
300 qp_attr.rq_psn = psn;
301 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
302 if (ret) {
303 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
304 return ret;
305 }
ec56dc0b
MT
306
307 /*
308 * Current Mellanox HCA firmware won't generate completions
309 * with error for drain WRs unless the QP has been moved to
310 * RTS first. This work-around leaves a window where a QP has
311 * moved to error asynchronously, but this will eventually get
312 * fixed in firmware, so let's not error out if modify QP
313 * fails.
314 */
315 qp_attr.qp_state = IB_QPS_RTS;
316 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
317 if (ret) {
318 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
319 return 0;
320 }
321 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
322 if (ret) {
323 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
324 return 0;
325 }
326
839fcaba
MT
327 return 0;
328}
329
a7d834c4
RD
330static void ipoib_cm_init_rx_wr(struct net_device *dev,
331 struct ib_recv_wr *wr,
332 struct ib_sge *sge)
333{
c1048aff 334 struct ipoib_dev_priv *priv = ipoib_priv(dev);
a7d834c4
RD
335 int i;
336
337 for (i = 0; i < priv->cm.num_frags; ++i)
77b1f996 338 sge[i].lkey = priv->pd->local_dma_lkey;
a7d834c4
RD
339
340 sge[0].length = IPOIB_CM_HEAD_SIZE;
341 for (i = 1; i < priv->cm.num_frags; ++i)
342 sge[i].length = PAGE_SIZE;
343
344 wr->next = NULL;
e0819816 345 wr->sg_list = sge;
a7d834c4
RD
346 wr->num_sge = priv->cm.num_frags;
347}
348
68e995a2
PS
349static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
350 struct ipoib_cm_rx *rx)
351{
c1048aff 352 struct ipoib_dev_priv *priv = ipoib_priv(dev);
a7d834c4
RD
353 struct {
354 struct ib_recv_wr wr;
355 struct ib_sge sge[IPOIB_CM_RX_SG];
356 } *t;
68e995a2
PS
357 int ret;
358 int i;
359
948579cd 360 rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
74226649 361 if (!rx->rx_ring)
68e995a2 362 return -ENOMEM;
b1404069 363
a7d834c4
RD
364 t = kmalloc(sizeof *t, GFP_KERNEL);
365 if (!t) {
366 ret = -ENOMEM;
c5e8f57b 367 goto err_free_1;
a7d834c4
RD
368 }
369
370 ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
371
68e995a2
PS
372 spin_lock_irq(&priv->lock);
373
374 if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
375 spin_unlock_irq(&priv->lock);
376 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
377 ret = -EINVAL;
378 goto err_free;
379 } else
380 ++priv->cm.nonsrq_conn_qp;
381
382 spin_unlock_irq(&priv->lock);
383
384 for (i = 0; i < ipoib_recvq_size; ++i) {
385 if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
22252b4e
TA
386 rx->rx_ring[i].mapping,
387 GFP_KERNEL)) {
68e995a2 388 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
8f71c1a2
BVA
389 ret = -ENOMEM;
390 goto err_count;
a7d834c4
RD
391 }
392 ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
68e995a2
PS
393 if (ret) {
394 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
395 "failed for buf %d\n", i);
396 ret = -EIO;
397 goto err_count;
398 }
399 }
400
401 rx->recv_count = ipoib_recvq_size;
402
a7d834c4
RD
403 kfree(t);
404
68e995a2
PS
405 return 0;
406
407err_count:
408 spin_lock_irq(&priv->lock);
409 --priv->cm.nonsrq_conn_qp;
410 spin_unlock_irq(&priv->lock);
411
412err_free:
a7d834c4 413 kfree(t);
c5e8f57b
ZY
414
415err_free_1:
68e995a2
PS
416 ipoib_cm_free_rx_ring(dev, rx->rx_ring);
417
418 return ret;
419}
420
839fcaba
MT
421static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
422 struct ib_qp *qp, struct ib_cm_req_event_param *req,
423 unsigned psn)
424{
c1048aff 425 struct ipoib_dev_priv *priv = ipoib_priv(dev);
839fcaba
MT
426 struct ipoib_cm_data data = {};
427 struct ib_cm_rep_param rep = {};
428
429 data.qpn = cpu_to_be32(priv->qp->qp_num);
430 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
431
432 rep.private_data = &data;
433 rep.private_data_len = sizeof data;
434 rep.flow_control = 0;
435 rep.rnr_retry_count = req->rnr_retry_count;
68e995a2 436 rep.srq = ipoib_cm_has_srq(dev);
839fcaba
MT
437 rep.qp_num = qp->qp_num;
438 rep.starting_psn = psn;
439 return ib_send_cm_rep(cm_id, &rep);
440}
441
442static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
443{
444 struct net_device *dev = cm_id->context;
c1048aff 445 struct ipoib_dev_priv *priv = ipoib_priv(dev);
839fcaba 446 struct ipoib_cm_rx *p;
839fcaba
MT
447 unsigned psn;
448 int ret;
449
450 ipoib_dbg(priv, "REQ arrived\n");
451 p = kzalloc(sizeof *p, GFP_KERNEL);
452 if (!p)
453 return -ENOMEM;
454 p->dev = dev;
455 p->id = cm_id;
3ec7393a
MT
456 cm_id->context = p;
457 p->state = IPOIB_CM_RX_LIVE;
458 p->jiffies = jiffies;
459 INIT_LIST_HEAD(&p->list);
460
839fcaba
MT
461 p->qp = ipoib_cm_create_rx_qp(dev, p);
462 if (IS_ERR(p->qp)) {
463 ret = PTR_ERR(p->qp);
464 goto err_qp;
465 }
466
50bea5c0 467 psn = prandom_u32() & 0xffffff;
839fcaba
MT
468 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
469 if (ret)
470 goto err_modify;
471
68e995a2
PS
472 if (!ipoib_cm_has_srq(dev)) {
473 ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
474 if (ret)
475 goto err_modify;
476 }
477
3ec7393a 478 spin_lock_irq(&priv->lock);
0b39578b 479 queue_delayed_work(priv->wq,
3ec7393a
MT
480 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
481 /* Add this entry to passive ids list head, but do not re-add it
482 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
483 p->jiffies = jiffies;
484 if (p->state == IPOIB_CM_RX_LIVE)
485 list_move(&p->list, &priv->cm.passive_ids);
486 spin_unlock_irq(&priv->lock);
487
839fcaba
MT
488 ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
489 if (ret) {
490 ipoib_warn(priv, "failed to send REP: %d\n", ret);
3ec7393a
MT
491 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
492 ipoib_warn(priv, "unable to move qp to error state\n");
839fcaba 493 }
839fcaba
MT
494 return 0;
495
839fcaba
MT
496err_modify:
497 ib_destroy_qp(p->qp);
498err_qp:
499 kfree(p);
500 return ret;
501}
502
503static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
504 struct ib_cm_event *event)
505{
506 struct ipoib_cm_rx *p;
507 struct ipoib_dev_priv *priv;
839fcaba
MT
508
509 switch (event->event) {
510 case IB_CM_REQ_RECEIVED:
511 return ipoib_cm_req_handler(cm_id, event);
512 case IB_CM_DREQ_RECEIVED:
513 p = cm_id->context;
514 ib_send_cm_drep(cm_id, NULL, 0);
515 /* Fall through */
516 case IB_CM_REJ_RECEIVED:
517 p = cm_id->context;
c1048aff 518 priv = ipoib_priv(p->dev);
518b1646
MT
519 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
520 ipoib_warn(priv, "unable to move qp to error state\n");
521 /* Fall through */
839fcaba
MT
522 default:
523 return 0;
524 }
525}
526/* Adjust length of skb with fragments to match received data */
527static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
1812063b 528 unsigned int length, struct sk_buff *toskb)
839fcaba
MT
529{
530 int i, num_frags;
531 unsigned int size;
532
533 /* put header into skb */
534 size = min(length, hdr_space);
535 skb->tail += size;
536 skb->len += size;
537 length -= size;
538
539 num_frags = skb_shinfo(skb)->nr_frags;
540 for (i = 0; i < num_frags; i++) {
541 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
542
543 if (length == 0) {
544 /* don't need this page */
5581be3b
IC
545 skb_fill_page_desc(toskb, i, skb_frag_page(frag),
546 0, PAGE_SIZE);
839fcaba
MT
547 --skb_shinfo(skb)->nr_frags;
548 } else {
549 size = min(length, (unsigned) PAGE_SIZE);
550
9e903e08 551 skb_frag_size_set(frag, size);
839fcaba
MT
552 skb->data_len += size;
553 skb->truesize += size;
554 skb->len += size;
555 length -= size;
556 }
557 }
558}
559
560void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
561{
c1048aff 562 struct ipoib_dev_priv *priv = ipoib_priv(dev);
68e995a2 563 struct ipoib_cm_rx_buf *rx_ring;
1b524963 564 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
1812063b 565 struct sk_buff *skb, *newskb;
839fcaba
MT
566 struct ipoib_cm_rx *p;
567 unsigned long flags;
568 u64 mapping[IPOIB_CM_RX_SG];
1812063b 569 int frags;
68e995a2 570 int has_srq;
f89271da 571 struct sk_buff *small_skb;
839fcaba 572
a89875fc
RD
573 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
574 wr_id, wc->status);
839fcaba
MT
575
576 if (unlikely(wr_id >= ipoib_recvq_size)) {
1b524963 577 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
518b1646
MT
578 spin_lock_irqsave(&priv->lock, flags);
579 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
580 ipoib_cm_start_rx_drain(priv);
0b39578b 581 queue_work(priv->wq, &priv->cm.rx_reap_task);
518b1646
MT
582 spin_unlock_irqrestore(&priv->lock, flags);
583 } else
584 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
585 wr_id, ipoib_recvq_size);
839fcaba
MT
586 return;
587 }
588
68e995a2
PS
589 p = wc->qp->qp_context;
590
591 has_srq = ipoib_cm_has_srq(dev);
592 rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
593
594 skb = rx_ring[wr_id].skb;
839fcaba
MT
595
596 if (unlikely(wc->status != IB_WC_SUCCESS)) {
597 ipoib_dbg(priv, "cm recv error "
598 "(status=%d, wrid=%d vend_err %x)\n",
599 wc->status, wr_id, wc->vendor_err);
de903512 600 ++dev->stats.rx_dropped;
68e995a2
PS
601 if (has_srq)
602 goto repost;
603 else {
604 if (!--p->recv_count) {
605 spin_lock_irqsave(&priv->lock, flags);
606 list_move(&p->list, &priv->cm.rx_reap_list);
607 spin_unlock_irqrestore(&priv->lock, flags);
0b39578b 608 queue_work(priv->wq, &priv->cm.rx_reap_task);
68e995a2
PS
609 }
610 return;
611 }
839fcaba
MT
612 }
613
fd312561 614 if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
d6ef7d68 615 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
839fcaba
MT
616 spin_lock_irqsave(&priv->lock, flags);
617 p->jiffies = jiffies;
518b1646
MT
618 /* Move this entry to list head, but do not re-add it
619 * if it has been moved out of list. */
620 if (p->state == IPOIB_CM_RX_LIVE)
839fcaba
MT
621 list_move(&p->list, &priv->cm.passive_ids);
622 spin_unlock_irqrestore(&priv->lock, flags);
839fcaba
MT
623 }
624 }
625
f89271da
EC
626 if (wc->byte_len < IPOIB_CM_COPYBREAK) {
627 int dlen = wc->byte_len;
628
fc791b63 629 small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
f89271da 630 if (small_skb) {
fc791b63 631 skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
f89271da
EC
632 ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
633 dlen, DMA_FROM_DEVICE);
634 skb_copy_from_linear_data(skb, small_skb->data, dlen);
635 ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
636 dlen, DMA_FROM_DEVICE);
637 skb_put(small_skb, dlen);
638 skb = small_skb;
639 goto copied;
640 }
641 }
642
1812063b
MT
643 frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
644 (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
645
22252b4e
TA
646 newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
647 mapping, GFP_ATOMIC);
1812063b 648 if (unlikely(!newskb)) {
839fcaba
MT
649 /*
650 * If we can't allocate a new RX buffer, dump
651 * this packet and reuse the old buffer.
652 */
653 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
de903512 654 ++dev->stats.rx_dropped;
839fcaba
MT
655 goto repost;
656 }
657
68e995a2
PS
658 ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
659 memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
839fcaba
MT
660
661 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
662 wc->byte_len, wc->slid);
663
1812063b 664 skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
839fcaba 665
f89271da 666copied:
839fcaba 667 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
fc791b63 668 skb_add_pseudo_hdr(skb);
839fcaba 669
de903512
RD
670 ++dev->stats.rx_packets;
671 dev->stats.rx_bytes += skb->len;
839fcaba
MT
672
673 skb->dev = dev;
674 /* XXX get correct PACKET_ type here */
675 skb->pkt_type = PACKET_HOST;
8d1cc86a 676 netif_receive_skb(skb);
839fcaba
MT
677
678repost:
68e995a2
PS
679 if (has_srq) {
680 if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
681 ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
682 "for buf %d\n", wr_id);
683 } else {
a7d834c4
RD
684 if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
685 &priv->cm.rx_wr,
686 priv->cm.rx_sge,
687 wr_id))) {
68e995a2
PS
688 --p->recv_count;
689 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
690 "for buf %d\n", wr_id);
691 }
692 }
839fcaba
MT
693}
694
695static inline int post_send(struct ipoib_dev_priv *priv,
696 struct ipoib_cm_tx *tx,
697 unsigned int wr_id,
c4268778 698 struct ipoib_tx_buf *tx_req)
839fcaba
MT
699{
700 struct ib_send_wr *bad_wr;
701
c4268778 702 ipoib_build_sge(priv, tx_req);
839fcaba 703
e622f2f4 704 priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM;
839fcaba 705
e622f2f4 706 return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr);
839fcaba
MT
707}
708
709void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
710{
c1048aff 711 struct ipoib_dev_priv *priv = ipoib_priv(dev);
c4268778 712 struct ipoib_tx_buf *tx_req;
a48f509b 713 int rc;
78a50a5e 714 unsigned usable_sge = tx->max_send_sge - !!skb_headlen(skb);
839fcaba
MT
715
716 if (unlikely(skb->len > tx->mtu)) {
717 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
718 skb->len, tx->mtu);
de903512
RD
719 ++dev->stats.tx_dropped;
720 ++dev->stats.tx_errors;
77d8e1ef 721 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
839fcaba
MT
722 return;
723 }
78a50a5e
HWR
724 if (skb_shinfo(skb)->nr_frags > usable_sge) {
725 if (skb_linearize(skb) < 0) {
726 ipoib_warn(priv, "skb could not be linearized\n");
727 ++dev->stats.tx_dropped;
728 ++dev->stats.tx_errors;
729 dev_kfree_skb_any(skb);
730 return;
731 }
732 /* Does skb_linearize return ok without reducing nr_frags? */
733 if (skb_shinfo(skb)->nr_frags > usable_sge) {
734 ipoib_warn(priv, "too many frags after skb linearize\n");
735 ++dev->stats.tx_dropped;
736 ++dev->stats.tx_errors;
737 dev_kfree_skb_any(skb);
738 return;
739 }
740 }
839fcaba
MT
741 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
742 tx->tx_head, skb->len, tx->qp->qp_num);
743
744 /*
745 * We put the skb into the tx_ring _before_ we call post_send()
746 * because it's entirely possible that the completion handler will
747 * run before we execute anything after the post_send(). That
748 * means we have to make sure everything is properly recorded and
749 * our state is consistent before we call post_send().
750 */
751 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
752 tx_req->skb = skb;
c4268778
YS
753
754 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
de903512 755 ++dev->stats.tx_errors;
839fcaba
MT
756 dev_kfree_skb_any(skb);
757 return;
758 }
759
7e5a90c2
SP
760 skb_orphan(skb);
761 skb_dst_drop(skb);
762
c4268778 763 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
a48f509b
OG
764 if (unlikely(rc)) {
765 ipoib_warn(priv, "post_send failed, error %d\n", rc);
de903512 766 ++dev->stats.tx_errors;
c4268778 767 ipoib_dma_unmap_tx(priv, tx_req);
839fcaba
MT
768 dev_kfree_skb_any(skb);
769 } else {
860e9538 770 netif_trans_update(dev);
839fcaba
MT
771 ++tx->tx_head;
772
1b524963 773 if (++priv->tx_outstanding == ipoib_sendq_size) {
839fcaba
MT
774 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
775 tx->qp->qp_num);
776 netif_stop_queue(dev);
1ee9e2aa
MM
777 rc = ib_req_notify_cq(priv->send_cq,
778 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
779 if (rc < 0)
780 ipoib_warn(priv, "request notify on send CQ failed\n");
781 else if (rc)
782 ipoib_send_comp_handler(priv->send_cq, dev);
839fcaba
MT
783 }
784 }
785}
786
1b524963 787void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
839fcaba 788{
c1048aff 789 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1b524963
MT
790 struct ipoib_cm_tx *tx = wc->qp->qp_context;
791 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
c4268778 792 struct ipoib_tx_buf *tx_req;
839fcaba
MT
793 unsigned long flags;
794
a89875fc
RD
795 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
796 wr_id, wc->status);
839fcaba
MT
797
798 if (unlikely(wr_id >= ipoib_sendq_size)) {
799 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
800 wr_id, ipoib_sendq_size);
801 return;
802 }
803
804 tx_req = &tx->tx_ring[wr_id];
805
c4268778 806 ipoib_dma_unmap_tx(priv, tx_req);
839fcaba
MT
807
808 /* FIXME: is this right? Shouldn't we only increment on success? */
de903512
RD
809 ++dev->stats.tx_packets;
810 dev->stats.tx_bytes += tx_req->skb->len;
839fcaba
MT
811
812 dev_kfree_skb_any(tx_req->skb);
813
943c246e
RD
814 netif_tx_lock(dev);
815
839fcaba 816 ++tx->tx_tail;
1b524963
MT
817 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
818 netif_queue_stopped(dev) &&
819 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
839fcaba 820 netif_wake_queue(dev);
839fcaba
MT
821
822 if (wc->status != IB_WC_SUCCESS &&
823 wc->status != IB_WC_WR_FLUSH_ERR) {
824 struct ipoib_neigh *neigh;
825
13ee429a
FD
826 if (wc->status != IB_WC_RNR_RETRY_EXC_ERR)
827 ipoib_warn(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n",
828 wc->status, wr_id, wc->vendor_err);
829 else
830 ipoib_dbg(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n",
831 wc->status, wr_id, wc->vendor_err);
839fcaba 832
943c246e 833 spin_lock_irqsave(&priv->lock, flags);
839fcaba
MT
834 neigh = tx->neigh;
835
836 if (neigh) {
837 neigh->cm = NULL;
b63b70d8 838 ipoib_neigh_free(neigh);
839fcaba
MT
839
840 tx->neigh = NULL;
841 }
842
839fcaba
MT
843 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
844 list_move(&tx->list, &priv->cm.reap_list);
0b39578b 845 queue_work(priv->wq, &priv->cm.reap_task);
839fcaba
MT
846 }
847
848 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
849
943c246e 850 spin_unlock_irqrestore(&priv->lock, flags);
839fcaba
MT
851 }
852
943c246e 853 netif_tx_unlock(dev);
839fcaba
MT
854}
855
839fcaba
MT
856int ipoib_cm_dev_open(struct net_device *dev)
857{
c1048aff 858 struct ipoib_dev_priv *priv = ipoib_priv(dev);
839fcaba
MT
859 int ret;
860
861 if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
862 return 0;
863
864 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
865 if (IS_ERR(priv->cm.id)) {
866 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
347fcfbe 867 ret = PTR_ERR(priv->cm.id);
518b1646 868 goto err_cm;
839fcaba
MT
869 }
870
871 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
73fec7fd 872 0);
839fcaba
MT
873 if (ret) {
874 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
875 IPOIB_CM_IETF_ID | priv->qp->qp_num);
518b1646 876 goto err_listen;
839fcaba 877 }
518b1646 878
839fcaba 879 return 0;
518b1646
MT
880
881err_listen:
882 ib_destroy_cm_id(priv->cm.id);
883err_cm:
884 priv->cm.id = NULL;
518b1646 885 return ret;
839fcaba
MT
886}
887
efcd9971
RD
888static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
889{
c1048aff 890 struct ipoib_dev_priv *priv = ipoib_priv(dev);
efcd9971
RD
891 struct ipoib_cm_rx *rx, *n;
892 LIST_HEAD(list);
893
894 spin_lock_irq(&priv->lock);
895 list_splice_init(&priv->cm.rx_reap_list, &list);
896 spin_unlock_irq(&priv->lock);
897
898 list_for_each_entry_safe(rx, n, &list, list) {
899 ib_destroy_cm_id(rx->id);
900 ib_destroy_qp(rx->qp);
68e995a2
PS
901 if (!ipoib_cm_has_srq(dev)) {
902 ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
903 spin_lock_irq(&priv->lock);
904 --priv->cm.nonsrq_conn_qp;
905 spin_unlock_irq(&priv->lock);
906 }
efcd9971
RD
907 kfree(rx);
908 }
909}
910
839fcaba
MT
911void ipoib_cm_dev_stop(struct net_device *dev)
912{
c1048aff 913 struct ipoib_dev_priv *priv = ipoib_priv(dev);
efcd9971 914 struct ipoib_cm_rx *p;
518b1646 915 unsigned long begin;
518b1646 916 int ret;
839fcaba 917
347fcfbe 918 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
839fcaba
MT
919 return;
920
921 ib_destroy_cm_id(priv->cm.id);
347fcfbe 922 priv->cm.id = NULL;
518b1646 923
37aebbde 924 spin_lock_irq(&priv->lock);
839fcaba
MT
925 while (!list_empty(&priv->cm.passive_ids)) {
926 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
518b1646
MT
927 list_move(&p->list, &priv->cm.rx_error_list);
928 p->state = IPOIB_CM_RX_ERROR;
37aebbde 929 spin_unlock_irq(&priv->lock);
518b1646
MT
930 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
931 if (ret)
932 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
933 spin_lock_irq(&priv->lock);
934 }
935
936 /* Wait for all RX to be drained */
937 begin = jiffies;
938
939 while (!list_empty(&priv->cm.rx_error_list) ||
940 !list_empty(&priv->cm.rx_flush_list) ||
941 !list_empty(&priv->cm.rx_drain_list)) {
8fd357a6 942 if (time_after(jiffies, begin + 5 * HZ)) {
518b1646
MT
943 ipoib_warn(priv, "RX drain timing out\n");
944
945 /*
946 * assume the HW is wedged and just free up everything.
947 */
ec229e5e
PS
948 list_splice_init(&priv->cm.rx_flush_list,
949 &priv->cm.rx_reap_list);
950 list_splice_init(&priv->cm.rx_error_list,
951 &priv->cm.rx_reap_list);
952 list_splice_init(&priv->cm.rx_drain_list,
953 &priv->cm.rx_reap_list);
518b1646
MT
954 break;
955 }
956 spin_unlock_irq(&priv->lock);
957 msleep(1);
2dfbfc37 958 ipoib_drain_cq(dev);
518b1646
MT
959 spin_lock_irq(&priv->lock);
960 }
961
518b1646
MT
962 spin_unlock_irq(&priv->lock);
963
efcd9971 964 ipoib_cm_free_rx_reap_list(dev);
839fcaba
MT
965
966 cancel_delayed_work(&priv->cm.stale_task);
967}
968
969static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
970{
971 struct ipoib_cm_tx *p = cm_id->context;
c1048aff 972 struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
839fcaba
MT
973 struct ipoib_cm_data *data = event->private_data;
974 struct sk_buff_head skqueue;
975 struct ib_qp_attr qp_attr;
976 int qp_attr_mask, ret;
977 struct sk_buff *skb;
839fcaba
MT
978
979 p->mtu = be32_to_cpu(data->mtu);
980
82c3aca6
MT
981 if (p->mtu <= IPOIB_ENCAP_LEN) {
982 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
983 p->mtu, IPOIB_ENCAP_LEN);
839fcaba
MT
984 return -EINVAL;
985 }
986
987 qp_attr.qp_state = IB_QPS_RTR;
988 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
989 if (ret) {
990 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
991 return ret;
992 }
993
994 qp_attr.rq_psn = 0 /* FIXME */;
995 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
996 if (ret) {
997 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
998 return ret;
999 }
1000
1001 qp_attr.qp_state = IB_QPS_RTS;
1002 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
1003 if (ret) {
1004 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
1005 return ret;
1006 }
1007 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
1008 if (ret) {
1009 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
1010 return ret;
1011 }
1012
1013 skb_queue_head_init(&skqueue);
1014
37aebbde 1015 spin_lock_irq(&priv->lock);
839fcaba
MT
1016 set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
1017 if (p->neigh)
1018 while ((skb = __skb_dequeue(&p->neigh->queue)))
1019 __skb_queue_tail(&skqueue, skb);
37aebbde 1020 spin_unlock_irq(&priv->lock);
839fcaba
MT
1021
1022 while ((skb = __skb_dequeue(&skqueue))) {
1023 skb->dev = p->dev;
d32b9a81
FD
1024 ret = dev_queue_xmit(skb);
1025 if (ret)
1026 ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
1027 __func__, ret);
839fcaba
MT
1028 }
1029
1030 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1031 if (ret) {
1032 ipoib_warn(priv, "failed to send RTU: %d\n", ret);
1033 return ret;
1034 }
1035 return 0;
1036}
1037
1b524963 1038static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
839fcaba 1039{
c1048aff 1040 struct ipoib_dev_priv *priv = ipoib_priv(dev);
ede6bc04 1041 struct ib_qp_init_attr attr = {
f56bcd80
EC
1042 .send_cq = priv->recv_cq,
1043 .recv_cq = priv->recv_cq,
ede6bc04
DB
1044 .srq = priv->cm.srq,
1045 .cap.max_send_wr = ipoib_sendq_size,
1046 .cap.max_send_sge = 1,
1047 .sq_sig_type = IB_SIGNAL_ALL_WR,
1048 .qp_type = IB_QPT_RC,
09b93088
OG
1049 .qp_context = tx,
1050 .create_flags = IB_QP_CREATE_USE_GFP_NOIO
2337f809 1051 };
ede6bc04 1052
09b93088
OG
1053 struct ib_qp *tx_qp;
1054
c4268778 1055 if (dev->features & NETIF_F_SG)
78a50a5e
HWR
1056 attr.cap.max_send_sge =
1057 min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
c4268778 1058
09b93088
OG
1059 tx_qp = ib_create_qp(priv->pd, &attr);
1060 if (PTR_ERR(tx_qp) == -EINVAL) {
09b93088
OG
1061 attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
1062 tx_qp = ib_create_qp(priv->pd, &attr);
1063 }
78a50a5e 1064 tx->max_send_sge = attr.cap.max_send_sge;
09b93088 1065 return tx_qp;
839fcaba
MT
1066}
1067
1068static int ipoib_cm_send_req(struct net_device *dev,
1069 struct ib_cm_id *id, struct ib_qp *qp,
1070 u32 qpn,
1071 struct ib_sa_path_rec *pathrec)
1072{
c1048aff 1073 struct ipoib_dev_priv *priv = ipoib_priv(dev);
839fcaba
MT
1074 struct ipoib_cm_data data = {};
1075 struct ib_cm_req_param req = {};
1076
1077 data.qpn = cpu_to_be32(priv->qp->qp_num);
1078 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
1079
2337f809
RD
1080 req.primary_path = pathrec;
1081 req.alternate_path = NULL;
1082 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
1083 req.qp_num = qp->qp_num;
1084 req.qp_type = qp->qp_type;
1085 req.private_data = &data;
1086 req.private_data_len = sizeof data;
1087 req.flow_control = 0;
839fcaba 1088
2337f809 1089 req.starting_psn = 0; /* FIXME */
839fcaba
MT
1090
1091 /*
1092 * Pick some arbitrary defaults here; we could make these
1093 * module parameters if anyone cared about setting them.
1094 */
2337f809
RD
1095 req.responder_resources = 4;
1096 req.remote_cm_response_timeout = 20;
1097 req.local_cm_response_timeout = 20;
1098 req.retry_count = 0; /* RFC draft warns against retries */
1099 req.rnr_retry_count = 0; /* RFC draft warns against retries */
1100 req.max_cm_retries = 15;
68e995a2 1101 req.srq = ipoib_cm_has_srq(dev);
839fcaba
MT
1102 return ib_send_cm_req(id, &req);
1103}
1104
1105static int ipoib_cm_modify_tx_init(struct net_device *dev,
1106 struct ib_cm_id *cm_id, struct ib_qp *qp)
1107{
c1048aff 1108 struct ipoib_dev_priv *priv = ipoib_priv(dev);
839fcaba
MT
1109 struct ib_qp_attr qp_attr;
1110 int qp_attr_mask, ret;
9fdd5e5b 1111 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
839fcaba 1112 if (ret) {
9fdd5e5b 1113 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
839fcaba
MT
1114 return ret;
1115 }
1116
1117 qp_attr.qp_state = IB_QPS_INIT;
1118 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1119 qp_attr.port_num = priv->port;
1120 qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
1121
1122 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1123 if (ret) {
1124 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
1125 return ret;
1126 }
1127 return 0;
1128}
1129
1130static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1131 struct ib_sa_path_rec *pathrec)
1132{
c1048aff 1133 struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
839fcaba
MT
1134 int ret;
1135
09b93088
OG
1136 p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring,
1137 GFP_NOIO, PAGE_KERNEL);
839fcaba 1138 if (!p->tx_ring) {
839fcaba
MT
1139 ret = -ENOMEM;
1140 goto err_tx;
1141 }
09b93088 1142 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
839fcaba 1143
1b524963 1144 p->qp = ipoib_cm_create_tx_qp(p->dev, p);
839fcaba
MT
1145 if (IS_ERR(p->qp)) {
1146 ret = PTR_ERR(p->qp);
1147 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
1148 goto err_qp;
1149 }
1150
1151 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1152 if (IS_ERR(p->id)) {
1153 ret = PTR_ERR(p->id);
1154 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1155 goto err_id;
1156 }
1157
1158 ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
1159 if (ret) {
1160 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
23536dfa 1161 goto err_modify_send;
839fcaba
MT
1162 }
1163
1164 ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
1165 if (ret) {
1166 ipoib_warn(priv, "failed to send cm req: %d\n", ret);
23536dfa 1167 goto err_modify_send;
839fcaba
MT
1168 }
1169
5b095d98 1170 ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
fcace2fe 1171 p->qp->qp_num, pathrec->dgid.raw, qpn);
839fcaba
MT
1172
1173 return 0;
1174
23536dfa 1175err_modify_send:
839fcaba
MT
1176 ib_destroy_cm_id(p->id);
1177err_id:
1178 p->id = NULL;
1179 ib_destroy_qp(p->qp);
839fcaba
MT
1180err_qp:
1181 p->qp = NULL;
10313cbb 1182 vfree(p->tx_ring);
839fcaba
MT
1183err_tx:
1184 return ret;
1185}
1186
1187static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1188{
c1048aff 1189 struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
c4268778 1190 struct ipoib_tx_buf *tx_req;
1b524963 1191 unsigned long begin;
839fcaba
MT
1192
1193 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1194 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1195
1196 if (p->id)
1197 ib_destroy_cm_id(p->id);
1198
839fcaba 1199 if (p->tx_ring) {
1b524963
MT
1200 /* Wait for all sends to complete */
1201 begin = jiffies;
839fcaba 1202 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1b524963
MT
1203 if (time_after(jiffies, begin + 5 * HZ)) {
1204 ipoib_warn(priv, "timing out; %d sends not completed\n",
1205 p->tx_head - p->tx_tail);
1206 goto timeout;
1207 }
1208
1209 msleep(1);
839fcaba 1210 }
1b524963
MT
1211 }
1212
1213timeout:
839fcaba 1214
1b524963
MT
1215 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1216 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
c4268778 1217 ipoib_dma_unmap_tx(priv, tx_req);
1b524963
MT
1218 dev_kfree_skb_any(tx_req->skb);
1219 ++p->tx_tail;
943c246e 1220 netif_tx_lock_bh(p->dev);
1b524963
MT
1221 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1222 netif_queue_stopped(p->dev) &&
1223 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1224 netif_wake_queue(p->dev);
943c246e 1225 netif_tx_unlock_bh(p->dev);
839fcaba
MT
1226 }
1227
1b524963
MT
1228 if (p->qp)
1229 ib_destroy_qp(p->qp);
1230
10313cbb 1231 vfree(p->tx_ring);
839fcaba
MT
1232 kfree(p);
1233}
1234
1235static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1236 struct ib_cm_event *event)
1237{
1238 struct ipoib_cm_tx *tx = cm_id->context;
c1048aff 1239 struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
839fcaba
MT
1240 struct net_device *dev = priv->dev;
1241 struct ipoib_neigh *neigh;
943c246e 1242 unsigned long flags;
839fcaba
MT
1243 int ret;
1244
1245 switch (event->event) {
1246 case IB_CM_DREQ_RECEIVED:
1247 ipoib_dbg(priv, "DREQ received.\n");
1248 ib_send_cm_drep(cm_id, NULL, 0);
1249 break;
1250 case IB_CM_REP_RECEIVED:
1251 ipoib_dbg(priv, "REP received.\n");
1252 ret = ipoib_cm_rep_handler(cm_id, event);
1253 if (ret)
1254 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1255 NULL, 0, NULL, 0);
1256 break;
1257 case IB_CM_REQ_ERROR:
1258 case IB_CM_REJ_RECEIVED:
1259 case IB_CM_TIMEWAIT_EXIT:
1260 ipoib_dbg(priv, "CM error %d.\n", event->event);
943c246e
RD
1261 netif_tx_lock_bh(dev);
1262 spin_lock_irqsave(&priv->lock, flags);
839fcaba
MT
1263 neigh = tx->neigh;
1264
1265 if (neigh) {
1266 neigh->cm = NULL;
b63b70d8 1267 ipoib_neigh_free(neigh);
839fcaba
MT
1268
1269 tx->neigh = NULL;
1270 }
1271
1272 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1273 list_move(&tx->list, &priv->cm.reap_list);
0b39578b 1274 queue_work(priv->wq, &priv->cm.reap_task);
839fcaba
MT
1275 }
1276
943c246e
RD
1277 spin_unlock_irqrestore(&priv->lock, flags);
1278 netif_tx_unlock_bh(dev);
839fcaba
MT
1279 break;
1280 default:
1281 break;
1282 }
1283
1284 return 0;
1285}
1286
1287struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
1288 struct ipoib_neigh *neigh)
1289{
c1048aff 1290 struct ipoib_dev_priv *priv = ipoib_priv(dev);
839fcaba
MT
1291 struct ipoib_cm_tx *tx;
1292
1293 tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1294 if (!tx)
1295 return NULL;
1296
1297 neigh->cm = tx;
1298 tx->neigh = neigh;
1299 tx->path = path;
1300 tx->dev = dev;
1301 list_add(&tx->list, &priv->cm.start_list);
1302 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
0b39578b 1303 queue_work(priv->wq, &priv->cm.start_task);
839fcaba
MT
1304 return tx;
1305}
1306
1307void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1308{
c1048aff 1309 struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
fa16ebed 1310 unsigned long flags;
839fcaba 1311 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
fa16ebed 1312 spin_lock_irqsave(&priv->lock, flags);
839fcaba 1313 list_move(&tx->list, &priv->cm.reap_list);
0b39578b 1314 queue_work(priv->wq, &priv->cm.reap_task);
5b095d98 1315 ipoib_dbg(priv, "Reap connection for gid %pI6\n",
b63b70d8 1316 tx->neigh->daddr + 4);
839fcaba 1317 tx->neigh = NULL;
fa16ebed 1318 spin_unlock_irqrestore(&priv->lock, flags);
839fcaba
MT
1319 }
1320}
1321
546481c2
ES
1322#define QPN_AND_OPTIONS_OFFSET 4
1323
839fcaba
MT
1324static void ipoib_cm_tx_start(struct work_struct *work)
1325{
1326 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1327 cm.start_task);
1328 struct net_device *dev = priv->dev;
1329 struct ipoib_neigh *neigh;
1330 struct ipoib_cm_tx *p;
1331 unsigned long flags;
546481c2 1332 struct ipoib_path *path;
839fcaba
MT
1333 int ret;
1334
1335 struct ib_sa_path_rec pathrec;
1336 u32 qpn;
1337
943c246e
RD
1338 netif_tx_lock_bh(dev);
1339 spin_lock_irqsave(&priv->lock, flags);
1340
839fcaba
MT
1341 while (!list_empty(&priv->cm.start_list)) {
1342 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1343 list_del_init(&p->list);
1344 neigh = p->neigh;
546481c2 1345
b63b70d8 1346 qpn = IPOIB_QPN(neigh->daddr);
546481c2
ES
1347 /*
1348 * As long as the search is with these 2 locks,
1349 * path existence indicates its validity.
1350 */
1351 path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1352 if (!path) {
1353 pr_info("%s ignore not valid path %pI6\n",
1354 __func__,
1355 neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1356 goto free_neigh;
1357 }
839fcaba 1358 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
943c246e
RD
1359
1360 spin_unlock_irqrestore(&priv->lock, flags);
1361 netif_tx_unlock_bh(dev);
1362
839fcaba 1363 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
943c246e
RD
1364
1365 netif_tx_lock_bh(dev);
1366 spin_lock_irqsave(&priv->lock, flags);
1367
839fcaba 1368 if (ret) {
546481c2 1369free_neigh:
839fcaba
MT
1370 neigh = p->neigh;
1371 if (neigh) {
1372 neigh->cm = NULL;
b63b70d8 1373 ipoib_neigh_free(neigh);
839fcaba
MT
1374 }
1375 list_del(&p->list);
1376 kfree(p);
1377 }
1378 }
943c246e
RD
1379
1380 spin_unlock_irqrestore(&priv->lock, flags);
1381 netif_tx_unlock_bh(dev);
839fcaba
MT
1382}
1383
1384static void ipoib_cm_tx_reap(struct work_struct *work)
1385{
1386 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1387 cm.reap_task);
943c246e 1388 struct net_device *dev = priv->dev;
839fcaba 1389 struct ipoib_cm_tx *p;
943c246e
RD
1390 unsigned long flags;
1391
1392 netif_tx_lock_bh(dev);
1393 spin_lock_irqsave(&priv->lock, flags);
839fcaba 1394
839fcaba
MT
1395 while (!list_empty(&priv->cm.reap_list)) {
1396 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
27d41d29 1397 list_del_init(&p->list);
943c246e
RD
1398 spin_unlock_irqrestore(&priv->lock, flags);
1399 netif_tx_unlock_bh(dev);
839fcaba 1400 ipoib_cm_tx_destroy(p);
943c246e
RD
1401 netif_tx_lock_bh(dev);
1402 spin_lock_irqsave(&priv->lock, flags);
839fcaba 1403 }
943c246e
RD
1404
1405 spin_unlock_irqrestore(&priv->lock, flags);
1406 netif_tx_unlock_bh(dev);
839fcaba
MT
1407}
1408
1409static void ipoib_cm_skb_reap(struct work_struct *work)
1410{
1411 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1412 cm.skb_task);
943c246e 1413 struct net_device *dev = priv->dev;
839fcaba 1414 struct sk_buff *skb;
943c246e 1415 unsigned long flags;
839fcaba
MT
1416 unsigned mtu = priv->mcast_mtu;
1417
943c246e
RD
1418 netif_tx_lock_bh(dev);
1419 spin_lock_irqsave(&priv->lock, flags);
1420
839fcaba 1421 while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
943c246e
RD
1422 spin_unlock_irqrestore(&priv->lock, flags);
1423 netif_tx_unlock_bh(dev);
1424
839fcaba
MT
1425 if (skb->protocol == htons(ETH_P_IP))
1426 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
d90f9b35 1427#if IS_ENABLED(CONFIG_IPV6)
839fcaba 1428 else if (skb->protocol == htons(ETH_P_IPV6))
3ffe533c 1429 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
839fcaba
MT
1430#endif
1431 dev_kfree_skb_any(skb);
943c246e
RD
1432
1433 netif_tx_lock_bh(dev);
1434 spin_lock_irqsave(&priv->lock, flags);
839fcaba 1435 }
943c246e
RD
1436
1437 spin_unlock_irqrestore(&priv->lock, flags);
1438 netif_tx_unlock_bh(dev);
839fcaba
MT
1439}
1440
2337f809 1441void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
839fcaba
MT
1442 unsigned int mtu)
1443{
c1048aff 1444 struct ipoib_dev_priv *priv = ipoib_priv(dev);
839fcaba
MT
1445 int e = skb_queue_empty(&priv->cm.skb_queue);
1446
adf30907 1447 if (skb_dst(skb))
6700c270 1448 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
839fcaba
MT
1449
1450 skb_queue_tail(&priv->cm.skb_queue, skb);
1451 if (e)
0b39578b 1452 queue_work(priv->wq, &priv->cm.skb_task);
839fcaba
MT
1453}
1454
518b1646
MT
1455static void ipoib_cm_rx_reap(struct work_struct *work)
1456{
efcd9971
RD
1457 ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1458 cm.rx_reap_task)->dev);
518b1646
MT
1459}
1460
839fcaba
MT
1461static void ipoib_cm_stale_task(struct work_struct *work)
1462{
1463 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1464 cm.stale_task.work);
1465 struct ipoib_cm_rx *p;
518b1646 1466 int ret;
839fcaba 1467
37aebbde 1468 spin_lock_irq(&priv->lock);
839fcaba 1469 while (!list_empty(&priv->cm.passive_ids)) {
518b1646 1470 /* List is sorted by LRU, start from tail,
839fcaba
MT
1471 * stop when we see a recently used entry */
1472 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
60a596da 1473 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
839fcaba 1474 break;
518b1646
MT
1475 list_move(&p->list, &priv->cm.rx_error_list);
1476 p->state = IPOIB_CM_RX_ERROR;
37aebbde 1477 spin_unlock_irq(&priv->lock);
518b1646
MT
1478 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1479 if (ret)
1480 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
37aebbde 1481 spin_lock_irq(&priv->lock);
839fcaba 1482 }
7c5b9ef8
MT
1483
1484 if (!list_empty(&priv->cm.passive_ids))
0b39578b 1485 queue_delayed_work(priv->wq,
7c5b9ef8 1486 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
37aebbde 1487 spin_unlock_irq(&priv->lock);
839fcaba
MT
1488}
1489
2337f809 1490static ssize_t show_mode(struct device *d, struct device_attribute *attr,
839fcaba
MT
1491 char *buf)
1492{
c1048aff
ES
1493 struct net_device *dev = to_net_dev(d);
1494 struct ipoib_dev_priv *priv = ipoib_priv(dev);
839fcaba
MT
1495
1496 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
1497 return sprintf(buf, "connected\n");
1498 else
1499 return sprintf(buf, "datagram\n");
1500}
1501
862096a8
OG
1502static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1503 const char *buf, size_t count)
1504{
1505 struct net_device *dev = to_net_dev(d);
1506 int ret;
c1048aff 1507 struct ipoib_dev_priv *priv = ipoib_priv(dev);
198b12f7
ES
1508
1509 if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
1510 return -EPERM;
862096a8
OG
1511
1512 if (!rtnl_trylock())
1513 return restart_syscall();
1514
1515 ret = ipoib_set_mode(dev, buf);
1516
0a0007f2
FD
1517 /* The assumption is that the function ipoib_set_mode returned
1518 * with the rtnl held by it, if not the value -EBUSY returned,
1519 * then no need to rtnl_unlock
1520 */
1521 if (ret != -EBUSY)
1522 rtnl_unlock();
862096a8 1523
0a0007f2 1524 return (!ret || ret == -EBUSY) ? count : ret;
862096a8
OG
1525}
1526
551fd612 1527static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
839fcaba
MT
1528
1529int ipoib_cm_add_mode_attr(struct net_device *dev)
1530{
1531 return device_create_file(&dev->dev, &dev_attr_mode);
1532}
1533
586a6934 1534static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
839fcaba 1535{
c1048aff 1536 struct ipoib_dev_priv *priv = ipoib_priv(dev);
839fcaba 1537 struct ib_srq_init_attr srq_init_attr = {
96104eda 1538 .srq_type = IB_SRQT_BASIC,
839fcaba
MT
1539 .attr = {
1540 .max_wr = ipoib_recvq_size,
586a6934 1541 .max_sge = max_sge
839fcaba
MT
1542 }
1543 };
7b3687df
RD
1544
1545 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1546 if (IS_ERR(priv->cm.srq)) {
68e995a2
PS
1547 if (PTR_ERR(priv->cm.srq) != -ENOSYS)
1548 printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
1549 priv->ca->name, PTR_ERR(priv->cm.srq));
7b3687df 1550 priv->cm.srq = NULL;
68e995a2 1551 return;
7b3687df
RD
1552 }
1553
948579cd 1554 priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
7b3687df 1555 if (!priv->cm.srq_ring) {
7b3687df
RD
1556 ib_destroy_srq(priv->cm.srq);
1557 priv->cm.srq = NULL;
b1404069 1558 return;
7b3687df 1559 }
b1404069 1560
7b3687df
RD
1561}
1562
1563int ipoib_cm_dev_init(struct net_device *dev)
1564{
c1048aff 1565 struct ipoib_dev_priv *priv = ipoib_priv(dev);
4a061b28 1566 int max_srq_sge, i;
839fcaba
MT
1567
1568 INIT_LIST_HEAD(&priv->cm.passive_ids);
1569 INIT_LIST_HEAD(&priv->cm.reap_list);
1570 INIT_LIST_HEAD(&priv->cm.start_list);
518b1646
MT
1571 INIT_LIST_HEAD(&priv->cm.rx_error_list);
1572 INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1573 INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1574 INIT_LIST_HEAD(&priv->cm.rx_reap_list);
839fcaba
MT
1575 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1576 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1577 INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
518b1646 1578 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
839fcaba
MT
1579 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1580
1581 skb_queue_head_init(&priv->cm.skb_queue);
1582
4a061b28 1583 ipoib_dbg(priv, "max_srq_sge=%d\n", priv->ca->attrs.max_srq_sge);
586a6934 1584
4a061b28
OG
1585 max_srq_sge = min_t(int, IPOIB_CM_RX_SG, priv->ca->attrs.max_srq_sge);
1586 ipoib_cm_create_srq(dev, max_srq_sge);
586a6934 1587 if (ipoib_cm_has_srq(dev)) {
4a061b28
OG
1588 priv->cm.max_cm_mtu = max_srq_sge * PAGE_SIZE - 0x10;
1589 priv->cm.num_frags = max_srq_sge;
586a6934
PS
1590 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1591 priv->cm.max_cm_mtu, priv->cm.num_frags);
1592 } else {
1593 priv->cm.max_cm_mtu = IPOIB_CM_MTU;
1594 priv->cm.num_frags = IPOIB_CM_RX_SG;
1595 }
1596
a7d834c4 1597 ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
68e995a2
PS
1598
1599 if (ipoib_cm_has_srq(dev)) {
1600 for (i = 0; i < ipoib_recvq_size; ++i) {
1601 if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
586a6934 1602 priv->cm.num_frags - 1,
22252b4e
TA
1603 priv->cm.srq_ring[i].mapping,
1604 GFP_KERNEL)) {
68e995a2
PS
1605 ipoib_warn(priv, "failed to allocate "
1606 "receive buffer %d\n", i);
1607 ipoib_cm_dev_cleanup(dev);
1608 return -ENOMEM;
1609 }
7b3687df 1610
68e995a2
PS
1611 if (ipoib_cm_post_receive_srq(dev, i)) {
1612 ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1613 "failed for buf %d\n", i);
1614 ipoib_cm_dev_cleanup(dev);
1615 return -EIO;
1616 }
839fcaba
MT
1617 }
1618 }
1619
1620 priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
1621 return 0;
1622}
1623
1624void ipoib_cm_dev_cleanup(struct net_device *dev)
1625{
c1048aff 1626 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1efb6144 1627 int ret;
839fcaba
MT
1628
1629 if (!priv->cm.srq)
1630 return;
1631
1632 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1633
1634 ret = ib_destroy_srq(priv->cm.srq);
1635 if (ret)
1636 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1637
1638 priv->cm.srq = NULL;
1639 if (!priv->cm.srq_ring)
1640 return;
1efb6144
RD
1641
1642 ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
839fcaba
MT
1643 priv->cm.srq_ring = NULL;
1644}