]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/infiniband/ulp/ipoib/ipoib_cm.c
Merge remote-tracking branches 'asoc/topic/dwc', 'asoc/topic/fallthrough', 'asoc...
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / ulp / ipoib / ipoib_cm.c
1 /*
2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <rdma/ib_cm.h>
34 #include <net/dst.h>
35 #include <net/icmp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/moduleparam.h>
41 #include <linux/sched/signal.h>
42 #include <linux/sched/mm.h>
43
44 #include "ipoib.h"
45
46 int ipoib_max_conn_qp = 128;
47
48 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
49 MODULE_PARM_DESC(max_nonsrq_conn_qp,
50 "Max number of connected-mode QPs per interface "
51 "(applied only if shared receive queue is not available)");
52
53 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
54 static int data_debug_level;
55
56 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
57 MODULE_PARM_DESC(cm_data_debug_level,
58 "Enable data path debug tracing for connected mode if > 0");
59 #endif
60
61 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
62
63 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
64 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
65 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
66 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
67
68 #define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
69
70 static struct ib_qp_attr ipoib_cm_err_attr = {
71 .qp_state = IB_QPS_ERR
72 };
73
74 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
75
76 static struct ib_send_wr ipoib_cm_rx_drain_wr = {
77 .opcode = IB_WR_SEND,
78 };
79
80 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
81 struct ib_cm_event *event);
82
83 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
84 u64 mapping[IPOIB_CM_RX_SG])
85 {
86 int i;
87
88 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
89
90 for (i = 0; i < frags; ++i)
91 ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
92 }
93
94 static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
95 {
96 struct ipoib_dev_priv *priv = ipoib_priv(dev);
97 struct ib_recv_wr *bad_wr;
98 int i, ret;
99
100 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
101
102 for (i = 0; i < priv->cm.num_frags; ++i)
103 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
104
105 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
106 if (unlikely(ret)) {
107 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
108 ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
109 priv->cm.srq_ring[id].mapping);
110 dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
111 priv->cm.srq_ring[id].skb = NULL;
112 }
113
114 return ret;
115 }
116
117 static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
118 struct ipoib_cm_rx *rx,
119 struct ib_recv_wr *wr,
120 struct ib_sge *sge, int id)
121 {
122 struct ipoib_dev_priv *priv = ipoib_priv(dev);
123 struct ib_recv_wr *bad_wr;
124 int i, ret;
125
126 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
127
128 for (i = 0; i < IPOIB_CM_RX_SG; ++i)
129 sge[i].addr = rx->rx_ring[id].mapping[i];
130
131 ret = ib_post_recv(rx->qp, wr, &bad_wr);
132 if (unlikely(ret)) {
133 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
134 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
135 rx->rx_ring[id].mapping);
136 dev_kfree_skb_any(rx->rx_ring[id].skb);
137 rx->rx_ring[id].skb = NULL;
138 }
139
140 return ret;
141 }
142
143 static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
144 struct ipoib_cm_rx_buf *rx_ring,
145 int id, int frags,
146 u64 mapping[IPOIB_CM_RX_SG],
147 gfp_t gfp)
148 {
149 struct ipoib_dev_priv *priv = ipoib_priv(dev);
150 struct sk_buff *skb;
151 int i;
152
153 skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
154 if (unlikely(!skb))
155 return NULL;
156
157 /*
158 * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
159 * IP header to a multiple of 16.
160 */
161 skb_reserve(skb, IPOIB_CM_RX_RESERVE);
162
163 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
164 DMA_FROM_DEVICE);
165 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
166 dev_kfree_skb_any(skb);
167 return NULL;
168 }
169
170 for (i = 0; i < frags; i++) {
171 struct page *page = alloc_page(gfp);
172
173 if (!page)
174 goto partial_error;
175 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
176
177 mapping[i + 1] = ib_dma_map_page(priv->ca, page,
178 0, PAGE_SIZE, DMA_FROM_DEVICE);
179 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
180 goto partial_error;
181 }
182
183 rx_ring[id].skb = skb;
184 return skb;
185
186 partial_error:
187
188 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
189
190 for (; i > 0; --i)
191 ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
192
193 dev_kfree_skb_any(skb);
194 return NULL;
195 }
196
197 static void ipoib_cm_free_rx_ring(struct net_device *dev,
198 struct ipoib_cm_rx_buf *rx_ring)
199 {
200 struct ipoib_dev_priv *priv = ipoib_priv(dev);
201 int i;
202
203 for (i = 0; i < ipoib_recvq_size; ++i)
204 if (rx_ring[i].skb) {
205 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
206 rx_ring[i].mapping);
207 dev_kfree_skb_any(rx_ring[i].skb);
208 }
209
210 vfree(rx_ring);
211 }
212
213 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
214 {
215 struct ib_send_wr *bad_wr;
216 struct ipoib_cm_rx *p;
217
218 /* We only reserved 1 extra slot in CQ for drain WRs, so
219 * make sure we have at most 1 outstanding WR. */
220 if (list_empty(&priv->cm.rx_flush_list) ||
221 !list_empty(&priv->cm.rx_drain_list))
222 return;
223
224 /*
225 * QPs on flush list are error state. This way, a "flush
226 * error" WC will be immediately generated for each WR we post.
227 */
228 p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
229 ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID;
230 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
231 ipoib_warn(priv, "failed to post drain wr\n");
232
233 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
234 }
235
236 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
237 {
238 struct ipoib_cm_rx *p = ctx;
239 struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
240 unsigned long flags;
241
242 if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
243 return;
244
245 spin_lock_irqsave(&priv->lock, flags);
246 list_move(&p->list, &priv->cm.rx_flush_list);
247 p->state = IPOIB_CM_RX_FLUSH;
248 ipoib_cm_start_rx_drain(priv);
249 spin_unlock_irqrestore(&priv->lock, flags);
250 }
251
252 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
253 struct ipoib_cm_rx *p)
254 {
255 struct ipoib_dev_priv *priv = ipoib_priv(dev);
256 struct ib_qp_init_attr attr = {
257 .event_handler = ipoib_cm_rx_event_handler,
258 .send_cq = priv->recv_cq, /* For drain WR */
259 .recv_cq = priv->recv_cq,
260 .srq = priv->cm.srq,
261 .cap.max_send_wr = 1, /* For drain WR */
262 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
263 .sq_sig_type = IB_SIGNAL_ALL_WR,
264 .qp_type = IB_QPT_RC,
265 .qp_context = p,
266 };
267
268 if (!ipoib_cm_has_srq(dev)) {
269 attr.cap.max_recv_wr = ipoib_recvq_size;
270 attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
271 }
272
273 return ib_create_qp(priv->pd, &attr);
274 }
275
276 static int ipoib_cm_modify_rx_qp(struct net_device *dev,
277 struct ib_cm_id *cm_id, struct ib_qp *qp,
278 unsigned psn)
279 {
280 struct ipoib_dev_priv *priv = ipoib_priv(dev);
281 struct ib_qp_attr qp_attr;
282 int qp_attr_mask, ret;
283
284 qp_attr.qp_state = IB_QPS_INIT;
285 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
286 if (ret) {
287 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
288 return ret;
289 }
290 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
291 if (ret) {
292 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
293 return ret;
294 }
295 qp_attr.qp_state = IB_QPS_RTR;
296 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
297 if (ret) {
298 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
299 return ret;
300 }
301 qp_attr.rq_psn = psn;
302 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
303 if (ret) {
304 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
305 return ret;
306 }
307
308 /*
309 * Current Mellanox HCA firmware won't generate completions
310 * with error for drain WRs unless the QP has been moved to
311 * RTS first. This work-around leaves a window where a QP has
312 * moved to error asynchronously, but this will eventually get
313 * fixed in firmware, so let's not error out if modify QP
314 * fails.
315 */
316 qp_attr.qp_state = IB_QPS_RTS;
317 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
318 if (ret) {
319 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
320 return 0;
321 }
322 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
323 if (ret) {
324 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
325 return 0;
326 }
327
328 return 0;
329 }
330
331 static void ipoib_cm_init_rx_wr(struct net_device *dev,
332 struct ib_recv_wr *wr,
333 struct ib_sge *sge)
334 {
335 struct ipoib_dev_priv *priv = ipoib_priv(dev);
336 int i;
337
338 for (i = 0; i < priv->cm.num_frags; ++i)
339 sge[i].lkey = priv->pd->local_dma_lkey;
340
341 sge[0].length = IPOIB_CM_HEAD_SIZE;
342 for (i = 1; i < priv->cm.num_frags; ++i)
343 sge[i].length = PAGE_SIZE;
344
345 wr->next = NULL;
346 wr->sg_list = sge;
347 wr->num_sge = priv->cm.num_frags;
348 }
349
350 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
351 struct ipoib_cm_rx *rx)
352 {
353 struct ipoib_dev_priv *priv = ipoib_priv(dev);
354 struct {
355 struct ib_recv_wr wr;
356 struct ib_sge sge[IPOIB_CM_RX_SG];
357 } *t;
358 int ret;
359 int i;
360
361 rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
362 if (!rx->rx_ring)
363 return -ENOMEM;
364
365 t = kmalloc(sizeof *t, GFP_KERNEL);
366 if (!t) {
367 ret = -ENOMEM;
368 goto err_free_1;
369 }
370
371 ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
372
373 spin_lock_irq(&priv->lock);
374
375 if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
376 spin_unlock_irq(&priv->lock);
377 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
378 ret = -EINVAL;
379 goto err_free;
380 } else
381 ++priv->cm.nonsrq_conn_qp;
382
383 spin_unlock_irq(&priv->lock);
384
385 for (i = 0; i < ipoib_recvq_size; ++i) {
386 if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
387 rx->rx_ring[i].mapping,
388 GFP_KERNEL)) {
389 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
390 ret = -ENOMEM;
391 goto err_count;
392 }
393 ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
394 if (ret) {
395 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
396 "failed for buf %d\n", i);
397 ret = -EIO;
398 goto err_count;
399 }
400 }
401
402 rx->recv_count = ipoib_recvq_size;
403
404 kfree(t);
405
406 return 0;
407
408 err_count:
409 spin_lock_irq(&priv->lock);
410 --priv->cm.nonsrq_conn_qp;
411 spin_unlock_irq(&priv->lock);
412
413 err_free:
414 kfree(t);
415
416 err_free_1:
417 ipoib_cm_free_rx_ring(dev, rx->rx_ring);
418
419 return ret;
420 }
421
422 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
423 struct ib_qp *qp, struct ib_cm_req_event_param *req,
424 unsigned psn)
425 {
426 struct ipoib_dev_priv *priv = ipoib_priv(dev);
427 struct ipoib_cm_data data = {};
428 struct ib_cm_rep_param rep = {};
429
430 data.qpn = cpu_to_be32(priv->qp->qp_num);
431 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
432
433 rep.private_data = &data;
434 rep.private_data_len = sizeof data;
435 rep.flow_control = 0;
436 rep.rnr_retry_count = req->rnr_retry_count;
437 rep.srq = ipoib_cm_has_srq(dev);
438 rep.qp_num = qp->qp_num;
439 rep.starting_psn = psn;
440 return ib_send_cm_rep(cm_id, &rep);
441 }
442
443 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
444 {
445 struct net_device *dev = cm_id->context;
446 struct ipoib_dev_priv *priv = ipoib_priv(dev);
447 struct ipoib_cm_rx *p;
448 unsigned psn;
449 int ret;
450
451 ipoib_dbg(priv, "REQ arrived\n");
452 p = kzalloc(sizeof *p, GFP_KERNEL);
453 if (!p)
454 return -ENOMEM;
455 p->dev = dev;
456 p->id = cm_id;
457 cm_id->context = p;
458 p->state = IPOIB_CM_RX_LIVE;
459 p->jiffies = jiffies;
460 INIT_LIST_HEAD(&p->list);
461
462 p->qp = ipoib_cm_create_rx_qp(dev, p);
463 if (IS_ERR(p->qp)) {
464 ret = PTR_ERR(p->qp);
465 goto err_qp;
466 }
467
468 psn = prandom_u32() & 0xffffff;
469 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
470 if (ret)
471 goto err_modify;
472
473 if (!ipoib_cm_has_srq(dev)) {
474 ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
475 if (ret)
476 goto err_modify;
477 }
478
479 spin_lock_irq(&priv->lock);
480 queue_delayed_work(priv->wq,
481 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
482 /* Add this entry to passive ids list head, but do not re-add it
483 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
484 p->jiffies = jiffies;
485 if (p->state == IPOIB_CM_RX_LIVE)
486 list_move(&p->list, &priv->cm.passive_ids);
487 spin_unlock_irq(&priv->lock);
488
489 ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
490 if (ret) {
491 ipoib_warn(priv, "failed to send REP: %d\n", ret);
492 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
493 ipoib_warn(priv, "unable to move qp to error state\n");
494 }
495 return 0;
496
497 err_modify:
498 ib_destroy_qp(p->qp);
499 err_qp:
500 kfree(p);
501 return ret;
502 }
503
504 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
505 struct ib_cm_event *event)
506 {
507 struct ipoib_cm_rx *p;
508 struct ipoib_dev_priv *priv;
509
510 switch (event->event) {
511 case IB_CM_REQ_RECEIVED:
512 return ipoib_cm_req_handler(cm_id, event);
513 case IB_CM_DREQ_RECEIVED:
514 ib_send_cm_drep(cm_id, NULL, 0);
515 /* Fall through */
516 case IB_CM_REJ_RECEIVED:
517 p = cm_id->context;
518 priv = ipoib_priv(p->dev);
519 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
520 ipoib_warn(priv, "unable to move qp to error state\n");
521 /* Fall through */
522 default:
523 return 0;
524 }
525 }
526 /* Adjust length of skb with fragments to match received data */
527 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
528 unsigned int length, struct sk_buff *toskb)
529 {
530 int i, num_frags;
531 unsigned int size;
532
533 /* put header into skb */
534 size = min(length, hdr_space);
535 skb->tail += size;
536 skb->len += size;
537 length -= size;
538
539 num_frags = skb_shinfo(skb)->nr_frags;
540 for (i = 0; i < num_frags; i++) {
541 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
542
543 if (length == 0) {
544 /* don't need this page */
545 skb_fill_page_desc(toskb, i, skb_frag_page(frag),
546 0, PAGE_SIZE);
547 --skb_shinfo(skb)->nr_frags;
548 } else {
549 size = min(length, (unsigned) PAGE_SIZE);
550
551 skb_frag_size_set(frag, size);
552 skb->data_len += size;
553 skb->truesize += size;
554 skb->len += size;
555 length -= size;
556 }
557 }
558 }
559
560 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
561 {
562 struct ipoib_dev_priv *priv = ipoib_priv(dev);
563 struct ipoib_cm_rx_buf *rx_ring;
564 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
565 struct sk_buff *skb, *newskb;
566 struct ipoib_cm_rx *p;
567 unsigned long flags;
568 u64 mapping[IPOIB_CM_RX_SG];
569 int frags;
570 int has_srq;
571 struct sk_buff *small_skb;
572
573 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
574 wr_id, wc->status);
575
576 if (unlikely(wr_id >= ipoib_recvq_size)) {
577 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
578 spin_lock_irqsave(&priv->lock, flags);
579 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
580 ipoib_cm_start_rx_drain(priv);
581 queue_work(priv->wq, &priv->cm.rx_reap_task);
582 spin_unlock_irqrestore(&priv->lock, flags);
583 } else
584 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
585 wr_id, ipoib_recvq_size);
586 return;
587 }
588
589 p = wc->qp->qp_context;
590
591 has_srq = ipoib_cm_has_srq(dev);
592 rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
593
594 skb = rx_ring[wr_id].skb;
595
596 if (unlikely(wc->status != IB_WC_SUCCESS)) {
597 ipoib_dbg(priv, "cm recv error "
598 "(status=%d, wrid=%d vend_err %x)\n",
599 wc->status, wr_id, wc->vendor_err);
600 ++dev->stats.rx_dropped;
601 if (has_srq)
602 goto repost;
603 else {
604 if (!--p->recv_count) {
605 spin_lock_irqsave(&priv->lock, flags);
606 list_move(&p->list, &priv->cm.rx_reap_list);
607 spin_unlock_irqrestore(&priv->lock, flags);
608 queue_work(priv->wq, &priv->cm.rx_reap_task);
609 }
610 return;
611 }
612 }
613
614 if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
615 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
616 spin_lock_irqsave(&priv->lock, flags);
617 p->jiffies = jiffies;
618 /* Move this entry to list head, but do not re-add it
619 * if it has been moved out of list. */
620 if (p->state == IPOIB_CM_RX_LIVE)
621 list_move(&p->list, &priv->cm.passive_ids);
622 spin_unlock_irqrestore(&priv->lock, flags);
623 }
624 }
625
626 if (wc->byte_len < IPOIB_CM_COPYBREAK) {
627 int dlen = wc->byte_len;
628
629 small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
630 if (small_skb) {
631 skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
632 ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
633 dlen, DMA_FROM_DEVICE);
634 skb_copy_from_linear_data(skb, small_skb->data, dlen);
635 ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
636 dlen, DMA_FROM_DEVICE);
637 skb_put(small_skb, dlen);
638 skb = small_skb;
639 goto copied;
640 }
641 }
642
643 frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
644 (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
645
646 newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
647 mapping, GFP_ATOMIC);
648 if (unlikely(!newskb)) {
649 /*
650 * If we can't allocate a new RX buffer, dump
651 * this packet and reuse the old buffer.
652 */
653 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
654 ++dev->stats.rx_dropped;
655 goto repost;
656 }
657
658 ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
659 memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
660
661 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
662 wc->byte_len, wc->slid);
663
664 skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
665
666 copied:
667 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
668 skb_add_pseudo_hdr(skb);
669
670 ++dev->stats.rx_packets;
671 dev->stats.rx_bytes += skb->len;
672
673 skb->dev = dev;
674 /* XXX get correct PACKET_ type here */
675 skb->pkt_type = PACKET_HOST;
676 netif_receive_skb(skb);
677
678 repost:
679 if (has_srq) {
680 if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
681 ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
682 "for buf %d\n", wr_id);
683 } else {
684 if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
685 &priv->cm.rx_wr,
686 priv->cm.rx_sge,
687 wr_id))) {
688 --p->recv_count;
689 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
690 "for buf %d\n", wr_id);
691 }
692 }
693 }
694
695 static inline int post_send(struct ipoib_dev_priv *priv,
696 struct ipoib_cm_tx *tx,
697 unsigned int wr_id,
698 struct ipoib_tx_buf *tx_req)
699 {
700 struct ib_send_wr *bad_wr;
701
702 ipoib_build_sge(priv, tx_req);
703
704 priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM;
705
706 return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr);
707 }
708
709 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
710 {
711 struct ipoib_dev_priv *priv = ipoib_priv(dev);
712 struct ipoib_tx_buf *tx_req;
713 int rc;
714 unsigned usable_sge = tx->max_send_sge - !!skb_headlen(skb);
715
716 if (unlikely(skb->len > tx->mtu)) {
717 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
718 skb->len, tx->mtu);
719 ++dev->stats.tx_dropped;
720 ++dev->stats.tx_errors;
721 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
722 return;
723 }
724 if (skb_shinfo(skb)->nr_frags > usable_sge) {
725 if (skb_linearize(skb) < 0) {
726 ipoib_warn(priv, "skb could not be linearized\n");
727 ++dev->stats.tx_dropped;
728 ++dev->stats.tx_errors;
729 dev_kfree_skb_any(skb);
730 return;
731 }
732 /* Does skb_linearize return ok without reducing nr_frags? */
733 if (skb_shinfo(skb)->nr_frags > usable_sge) {
734 ipoib_warn(priv, "too many frags after skb linearize\n");
735 ++dev->stats.tx_dropped;
736 ++dev->stats.tx_errors;
737 dev_kfree_skb_any(skb);
738 return;
739 }
740 }
741 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
742 tx->tx_head, skb->len, tx->qp->qp_num);
743
744 /*
745 * We put the skb into the tx_ring _before_ we call post_send()
746 * because it's entirely possible that the completion handler will
747 * run before we execute anything after the post_send(). That
748 * means we have to make sure everything is properly recorded and
749 * our state is consistent before we call post_send().
750 */
751 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
752 tx_req->skb = skb;
753
754 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
755 ++dev->stats.tx_errors;
756 dev_kfree_skb_any(skb);
757 return;
758 }
759
760 skb_orphan(skb);
761 skb_dst_drop(skb);
762
763 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
764 if (unlikely(rc)) {
765 ipoib_warn(priv, "post_send failed, error %d\n", rc);
766 ++dev->stats.tx_errors;
767 ipoib_dma_unmap_tx(priv, tx_req);
768 dev_kfree_skb_any(skb);
769 } else {
770 netif_trans_update(dev);
771 ++tx->tx_head;
772
773 if (++priv->tx_outstanding == ipoib_sendq_size) {
774 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
775 tx->qp->qp_num);
776 netif_stop_queue(dev);
777 rc = ib_req_notify_cq(priv->send_cq,
778 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
779 if (rc < 0)
780 ipoib_warn(priv, "request notify on send CQ failed\n");
781 else if (rc)
782 ipoib_send_comp_handler(priv->send_cq, dev);
783 }
784 }
785 }
786
787 void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
788 {
789 struct ipoib_dev_priv *priv = ipoib_priv(dev);
790 struct ipoib_cm_tx *tx = wc->qp->qp_context;
791 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
792 struct ipoib_tx_buf *tx_req;
793 unsigned long flags;
794
795 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
796 wr_id, wc->status);
797
798 if (unlikely(wr_id >= ipoib_sendq_size)) {
799 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
800 wr_id, ipoib_sendq_size);
801 return;
802 }
803
804 tx_req = &tx->tx_ring[wr_id];
805
806 ipoib_dma_unmap_tx(priv, tx_req);
807
808 /* FIXME: is this right? Shouldn't we only increment on success? */
809 ++dev->stats.tx_packets;
810 dev->stats.tx_bytes += tx_req->skb->len;
811
812 dev_kfree_skb_any(tx_req->skb);
813
814 netif_tx_lock(dev);
815
816 ++tx->tx_tail;
817 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
818 netif_queue_stopped(dev) &&
819 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
820 netif_wake_queue(dev);
821
822 if (wc->status != IB_WC_SUCCESS &&
823 wc->status != IB_WC_WR_FLUSH_ERR) {
824 struct ipoib_neigh *neigh;
825
826 /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle,
827 * so don't make waves.
828 */
829 if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
830 wc->status == IB_WC_RETRY_EXC_ERR)
831 ipoib_dbg(priv,
832 "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
833 __func__, wc->status, wr_id, wc->vendor_err);
834 else
835 ipoib_warn(priv,
836 "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
837 __func__, wc->status, wr_id, wc->vendor_err);
838
839 spin_lock_irqsave(&priv->lock, flags);
840 neigh = tx->neigh;
841
842 if (neigh) {
843 neigh->cm = NULL;
844 ipoib_neigh_free(neigh);
845
846 tx->neigh = NULL;
847 }
848
849 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
850 list_move(&tx->list, &priv->cm.reap_list);
851 queue_work(priv->wq, &priv->cm.reap_task);
852 }
853
854 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
855
856 spin_unlock_irqrestore(&priv->lock, flags);
857 }
858
859 netif_tx_unlock(dev);
860 }
861
862 int ipoib_cm_dev_open(struct net_device *dev)
863 {
864 struct ipoib_dev_priv *priv = ipoib_priv(dev);
865 int ret;
866
867 if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
868 return 0;
869
870 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
871 if (IS_ERR(priv->cm.id)) {
872 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
873 ret = PTR_ERR(priv->cm.id);
874 goto err_cm;
875 }
876
877 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
878 0);
879 if (ret) {
880 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
881 IPOIB_CM_IETF_ID | priv->qp->qp_num);
882 goto err_listen;
883 }
884
885 return 0;
886
887 err_listen:
888 ib_destroy_cm_id(priv->cm.id);
889 err_cm:
890 priv->cm.id = NULL;
891 return ret;
892 }
893
894 static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
895 {
896 struct ipoib_dev_priv *priv = ipoib_priv(dev);
897 struct ipoib_cm_rx *rx, *n;
898 LIST_HEAD(list);
899
900 spin_lock_irq(&priv->lock);
901 list_splice_init(&priv->cm.rx_reap_list, &list);
902 spin_unlock_irq(&priv->lock);
903
904 list_for_each_entry_safe(rx, n, &list, list) {
905 ib_destroy_cm_id(rx->id);
906 ib_destroy_qp(rx->qp);
907 if (!ipoib_cm_has_srq(dev)) {
908 ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
909 spin_lock_irq(&priv->lock);
910 --priv->cm.nonsrq_conn_qp;
911 spin_unlock_irq(&priv->lock);
912 }
913 kfree(rx);
914 }
915 }
916
917 void ipoib_cm_dev_stop(struct net_device *dev)
918 {
919 struct ipoib_dev_priv *priv = ipoib_priv(dev);
920 struct ipoib_cm_rx *p;
921 unsigned long begin;
922 int ret;
923
924 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
925 return;
926
927 ib_destroy_cm_id(priv->cm.id);
928 priv->cm.id = NULL;
929
930 spin_lock_irq(&priv->lock);
931 while (!list_empty(&priv->cm.passive_ids)) {
932 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
933 list_move(&p->list, &priv->cm.rx_error_list);
934 p->state = IPOIB_CM_RX_ERROR;
935 spin_unlock_irq(&priv->lock);
936 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
937 if (ret)
938 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
939 spin_lock_irq(&priv->lock);
940 }
941
942 /* Wait for all RX to be drained */
943 begin = jiffies;
944
945 while (!list_empty(&priv->cm.rx_error_list) ||
946 !list_empty(&priv->cm.rx_flush_list) ||
947 !list_empty(&priv->cm.rx_drain_list)) {
948 if (time_after(jiffies, begin + 5 * HZ)) {
949 ipoib_warn(priv, "RX drain timing out\n");
950
951 /*
952 * assume the HW is wedged and just free up everything.
953 */
954 list_splice_init(&priv->cm.rx_flush_list,
955 &priv->cm.rx_reap_list);
956 list_splice_init(&priv->cm.rx_error_list,
957 &priv->cm.rx_reap_list);
958 list_splice_init(&priv->cm.rx_drain_list,
959 &priv->cm.rx_reap_list);
960 break;
961 }
962 spin_unlock_irq(&priv->lock);
963 usleep_range(1000, 2000);
964 ipoib_drain_cq(dev);
965 spin_lock_irq(&priv->lock);
966 }
967
968 spin_unlock_irq(&priv->lock);
969
970 ipoib_cm_free_rx_reap_list(dev);
971
972 cancel_delayed_work(&priv->cm.stale_task);
973 }
974
975 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
976 {
977 struct ipoib_cm_tx *p = cm_id->context;
978 struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
979 struct ipoib_cm_data *data = event->private_data;
980 struct sk_buff_head skqueue;
981 struct ib_qp_attr qp_attr;
982 int qp_attr_mask, ret;
983 struct sk_buff *skb;
984
985 p->mtu = be32_to_cpu(data->mtu);
986
987 if (p->mtu <= IPOIB_ENCAP_LEN) {
988 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
989 p->mtu, IPOIB_ENCAP_LEN);
990 return -EINVAL;
991 }
992
993 qp_attr.qp_state = IB_QPS_RTR;
994 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
995 if (ret) {
996 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
997 return ret;
998 }
999
1000 qp_attr.rq_psn = 0 /* FIXME */;
1001 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
1002 if (ret) {
1003 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
1004 return ret;
1005 }
1006
1007 qp_attr.qp_state = IB_QPS_RTS;
1008 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
1009 if (ret) {
1010 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
1011 return ret;
1012 }
1013 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
1014 if (ret) {
1015 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
1016 return ret;
1017 }
1018
1019 skb_queue_head_init(&skqueue);
1020
1021 spin_lock_irq(&priv->lock);
1022 set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
1023 if (p->neigh)
1024 while ((skb = __skb_dequeue(&p->neigh->queue)))
1025 __skb_queue_tail(&skqueue, skb);
1026 spin_unlock_irq(&priv->lock);
1027
1028 while ((skb = __skb_dequeue(&skqueue))) {
1029 skb->dev = p->dev;
1030 ret = dev_queue_xmit(skb);
1031 if (ret)
1032 ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
1033 __func__, ret);
1034 }
1035
1036 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1037 if (ret) {
1038 ipoib_warn(priv, "failed to send RTU: %d\n", ret);
1039 return ret;
1040 }
1041 return 0;
1042 }
1043
1044 static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
1045 {
1046 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1047 struct ib_qp_init_attr attr = {
1048 .send_cq = priv->recv_cq,
1049 .recv_cq = priv->recv_cq,
1050 .srq = priv->cm.srq,
1051 .cap.max_send_wr = ipoib_sendq_size,
1052 .cap.max_send_sge = 1,
1053 .sq_sig_type = IB_SIGNAL_ALL_WR,
1054 .qp_type = IB_QPT_RC,
1055 .qp_context = tx,
1056 .create_flags = 0
1057 };
1058 struct ib_qp *tx_qp;
1059
1060 if (dev->features & NETIF_F_SG)
1061 attr.cap.max_send_sge =
1062 min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
1063
1064 tx_qp = ib_create_qp(priv->pd, &attr);
1065 tx->max_send_sge = attr.cap.max_send_sge;
1066 return tx_qp;
1067 }
1068
1069 static int ipoib_cm_send_req(struct net_device *dev,
1070 struct ib_cm_id *id, struct ib_qp *qp,
1071 u32 qpn,
1072 struct sa_path_rec *pathrec)
1073 {
1074 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1075 struct ipoib_cm_data data = {};
1076 struct ib_cm_req_param req = {};
1077
1078 data.qpn = cpu_to_be32(priv->qp->qp_num);
1079 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
1080
1081 req.primary_path = pathrec;
1082 req.alternate_path = NULL;
1083 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
1084 req.qp_num = qp->qp_num;
1085 req.qp_type = qp->qp_type;
1086 req.private_data = &data;
1087 req.private_data_len = sizeof data;
1088 req.flow_control = 0;
1089
1090 req.starting_psn = 0; /* FIXME */
1091
1092 /*
1093 * Pick some arbitrary defaults here; we could make these
1094 * module parameters if anyone cared about setting them.
1095 */
1096 req.responder_resources = 4;
1097 req.remote_cm_response_timeout = 20;
1098 req.local_cm_response_timeout = 20;
1099 req.retry_count = 0; /* RFC draft warns against retries */
1100 req.rnr_retry_count = 0; /* RFC draft warns against retries */
1101 req.max_cm_retries = 15;
1102 req.srq = ipoib_cm_has_srq(dev);
1103 return ib_send_cm_req(id, &req);
1104 }
1105
1106 static int ipoib_cm_modify_tx_init(struct net_device *dev,
1107 struct ib_cm_id *cm_id, struct ib_qp *qp)
1108 {
1109 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1110 struct ib_qp_attr qp_attr;
1111 int qp_attr_mask, ret;
1112 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
1113 if (ret) {
1114 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
1115 return ret;
1116 }
1117
1118 qp_attr.qp_state = IB_QPS_INIT;
1119 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1120 qp_attr.port_num = priv->port;
1121 qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
1122
1123 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1124 if (ret) {
1125 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
1126 return ret;
1127 }
1128 return 0;
1129 }
1130
1131 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1132 struct sa_path_rec *pathrec)
1133 {
1134 struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
1135 unsigned int noio_flag;
1136 int ret;
1137
1138 noio_flag = memalloc_noio_save();
1139 p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
1140 if (!p->tx_ring) {
1141 ret = -ENOMEM;
1142 goto err_tx;
1143 }
1144 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1145
1146 p->qp = ipoib_cm_create_tx_qp(p->dev, p);
1147 memalloc_noio_restore(noio_flag);
1148 if (IS_ERR(p->qp)) {
1149 ret = PTR_ERR(p->qp);
1150 ipoib_warn(priv, "failed to create tx qp: %d\n", ret);
1151 goto err_qp;
1152 }
1153
1154 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1155 if (IS_ERR(p->id)) {
1156 ret = PTR_ERR(p->id);
1157 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1158 goto err_id;
1159 }
1160
1161 ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
1162 if (ret) {
1163 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
1164 goto err_modify_send;
1165 }
1166
1167 ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
1168 if (ret) {
1169 ipoib_warn(priv, "failed to send cm req: %d\n", ret);
1170 goto err_modify_send;
1171 }
1172
1173 ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
1174 p->qp->qp_num, pathrec->dgid.raw, qpn);
1175
1176 return 0;
1177
1178 err_modify_send:
1179 ib_destroy_cm_id(p->id);
1180 err_id:
1181 p->id = NULL;
1182 ib_destroy_qp(p->qp);
1183 err_qp:
1184 p->qp = NULL;
1185 vfree(p->tx_ring);
1186 err_tx:
1187 return ret;
1188 }
1189
1190 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1191 {
1192 struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
1193 struct ipoib_tx_buf *tx_req;
1194 unsigned long begin;
1195
1196 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1197 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1198
1199 if (p->id)
1200 ib_destroy_cm_id(p->id);
1201
1202 if (p->tx_ring) {
1203 /* Wait for all sends to complete */
1204 begin = jiffies;
1205 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1206 if (time_after(jiffies, begin + 5 * HZ)) {
1207 ipoib_warn(priv, "timing out; %d sends not completed\n",
1208 p->tx_head - p->tx_tail);
1209 goto timeout;
1210 }
1211
1212 usleep_range(1000, 2000);
1213 }
1214 }
1215
1216 timeout:
1217
1218 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1219 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1220 ipoib_dma_unmap_tx(priv, tx_req);
1221 dev_kfree_skb_any(tx_req->skb);
1222 ++p->tx_tail;
1223 netif_tx_lock_bh(p->dev);
1224 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1225 netif_queue_stopped(p->dev) &&
1226 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1227 netif_wake_queue(p->dev);
1228 netif_tx_unlock_bh(p->dev);
1229 }
1230
1231 if (p->qp)
1232 ib_destroy_qp(p->qp);
1233
1234 vfree(p->tx_ring);
1235 kfree(p);
1236 }
1237
1238 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1239 struct ib_cm_event *event)
1240 {
1241 struct ipoib_cm_tx *tx = cm_id->context;
1242 struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
1243 struct net_device *dev = priv->dev;
1244 struct ipoib_neigh *neigh;
1245 unsigned long flags;
1246 int ret;
1247
1248 switch (event->event) {
1249 case IB_CM_DREQ_RECEIVED:
1250 ipoib_dbg(priv, "DREQ received.\n");
1251 ib_send_cm_drep(cm_id, NULL, 0);
1252 break;
1253 case IB_CM_REP_RECEIVED:
1254 ipoib_dbg(priv, "REP received.\n");
1255 ret = ipoib_cm_rep_handler(cm_id, event);
1256 if (ret)
1257 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1258 NULL, 0, NULL, 0);
1259 break;
1260 case IB_CM_REQ_ERROR:
1261 case IB_CM_REJ_RECEIVED:
1262 case IB_CM_TIMEWAIT_EXIT:
1263 ipoib_dbg(priv, "CM error %d.\n", event->event);
1264 netif_tx_lock_bh(dev);
1265 spin_lock_irqsave(&priv->lock, flags);
1266 neigh = tx->neigh;
1267
1268 if (neigh) {
1269 neigh->cm = NULL;
1270 ipoib_neigh_free(neigh);
1271
1272 tx->neigh = NULL;
1273 }
1274
1275 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1276 list_move(&tx->list, &priv->cm.reap_list);
1277 queue_work(priv->wq, &priv->cm.reap_task);
1278 }
1279
1280 spin_unlock_irqrestore(&priv->lock, flags);
1281 netif_tx_unlock_bh(dev);
1282 break;
1283 default:
1284 break;
1285 }
1286
1287 return 0;
1288 }
1289
1290 struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
1291 struct ipoib_neigh *neigh)
1292 {
1293 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1294 struct ipoib_cm_tx *tx;
1295
1296 tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1297 if (!tx)
1298 return NULL;
1299
1300 neigh->cm = tx;
1301 tx->neigh = neigh;
1302 tx->path = path;
1303 tx->dev = dev;
1304 list_add(&tx->list, &priv->cm.start_list);
1305 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1306 queue_work(priv->wq, &priv->cm.start_task);
1307 return tx;
1308 }
1309
1310 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1311 {
1312 struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
1313 unsigned long flags;
1314 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1315 spin_lock_irqsave(&priv->lock, flags);
1316 list_move(&tx->list, &priv->cm.reap_list);
1317 queue_work(priv->wq, &priv->cm.reap_task);
1318 ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1319 tx->neigh->daddr + 4);
1320 tx->neigh = NULL;
1321 spin_unlock_irqrestore(&priv->lock, flags);
1322 }
1323 }
1324
1325 #define QPN_AND_OPTIONS_OFFSET 4
1326
1327 static void ipoib_cm_tx_start(struct work_struct *work)
1328 {
1329 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1330 cm.start_task);
1331 struct net_device *dev = priv->dev;
1332 struct ipoib_neigh *neigh;
1333 struct ipoib_cm_tx *p;
1334 unsigned long flags;
1335 struct ipoib_path *path;
1336 int ret;
1337
1338 struct sa_path_rec pathrec;
1339 u32 qpn;
1340
1341 netif_tx_lock_bh(dev);
1342 spin_lock_irqsave(&priv->lock, flags);
1343
1344 while (!list_empty(&priv->cm.start_list)) {
1345 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1346 list_del_init(&p->list);
1347 neigh = p->neigh;
1348
1349 qpn = IPOIB_QPN(neigh->daddr);
1350 /*
1351 * As long as the search is with these 2 locks,
1352 * path existence indicates its validity.
1353 */
1354 path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1355 if (!path) {
1356 pr_info("%s ignore not valid path %pI6\n",
1357 __func__,
1358 neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1359 goto free_neigh;
1360 }
1361 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1362
1363 spin_unlock_irqrestore(&priv->lock, flags);
1364 netif_tx_unlock_bh(dev);
1365
1366 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1367
1368 netif_tx_lock_bh(dev);
1369 spin_lock_irqsave(&priv->lock, flags);
1370
1371 if (ret) {
1372 free_neigh:
1373 neigh = p->neigh;
1374 if (neigh) {
1375 neigh->cm = NULL;
1376 ipoib_neigh_free(neigh);
1377 }
1378 list_del(&p->list);
1379 kfree(p);
1380 }
1381 }
1382
1383 spin_unlock_irqrestore(&priv->lock, flags);
1384 netif_tx_unlock_bh(dev);
1385 }
1386
1387 static void ipoib_cm_tx_reap(struct work_struct *work)
1388 {
1389 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1390 cm.reap_task);
1391 struct net_device *dev = priv->dev;
1392 struct ipoib_cm_tx *p;
1393 unsigned long flags;
1394
1395 netif_tx_lock_bh(dev);
1396 spin_lock_irqsave(&priv->lock, flags);
1397
1398 while (!list_empty(&priv->cm.reap_list)) {
1399 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1400 list_del_init(&p->list);
1401 spin_unlock_irqrestore(&priv->lock, flags);
1402 netif_tx_unlock_bh(dev);
1403 ipoib_cm_tx_destroy(p);
1404 netif_tx_lock_bh(dev);
1405 spin_lock_irqsave(&priv->lock, flags);
1406 }
1407
1408 spin_unlock_irqrestore(&priv->lock, flags);
1409 netif_tx_unlock_bh(dev);
1410 }
1411
1412 static void ipoib_cm_skb_reap(struct work_struct *work)
1413 {
1414 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1415 cm.skb_task);
1416 struct net_device *dev = priv->dev;
1417 struct sk_buff *skb;
1418 unsigned long flags;
1419 unsigned mtu = priv->mcast_mtu;
1420
1421 netif_tx_lock_bh(dev);
1422 spin_lock_irqsave(&priv->lock, flags);
1423
1424 while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1425 spin_unlock_irqrestore(&priv->lock, flags);
1426 netif_tx_unlock_bh(dev);
1427
1428 if (skb->protocol == htons(ETH_P_IP))
1429 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1430 #if IS_ENABLED(CONFIG_IPV6)
1431 else if (skb->protocol == htons(ETH_P_IPV6))
1432 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1433 #endif
1434 dev_kfree_skb_any(skb);
1435
1436 netif_tx_lock_bh(dev);
1437 spin_lock_irqsave(&priv->lock, flags);
1438 }
1439
1440 spin_unlock_irqrestore(&priv->lock, flags);
1441 netif_tx_unlock_bh(dev);
1442 }
1443
1444 void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1445 unsigned int mtu)
1446 {
1447 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1448 int e = skb_queue_empty(&priv->cm.skb_queue);
1449
1450 if (skb_dst(skb))
1451 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1452
1453 skb_queue_tail(&priv->cm.skb_queue, skb);
1454 if (e)
1455 queue_work(priv->wq, &priv->cm.skb_task);
1456 }
1457
1458 static void ipoib_cm_rx_reap(struct work_struct *work)
1459 {
1460 ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1461 cm.rx_reap_task)->dev);
1462 }
1463
1464 static void ipoib_cm_stale_task(struct work_struct *work)
1465 {
1466 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1467 cm.stale_task.work);
1468 struct ipoib_cm_rx *p;
1469 int ret;
1470
1471 spin_lock_irq(&priv->lock);
1472 while (!list_empty(&priv->cm.passive_ids)) {
1473 /* List is sorted by LRU, start from tail,
1474 * stop when we see a recently used entry */
1475 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1476 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1477 break;
1478 list_move(&p->list, &priv->cm.rx_error_list);
1479 p->state = IPOIB_CM_RX_ERROR;
1480 spin_unlock_irq(&priv->lock);
1481 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1482 if (ret)
1483 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1484 spin_lock_irq(&priv->lock);
1485 }
1486
1487 if (!list_empty(&priv->cm.passive_ids))
1488 queue_delayed_work(priv->wq,
1489 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1490 spin_unlock_irq(&priv->lock);
1491 }
1492
1493 static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1494 char *buf)
1495 {
1496 struct net_device *dev = to_net_dev(d);
1497 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1498
1499 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
1500 return sprintf(buf, "connected\n");
1501 else
1502 return sprintf(buf, "datagram\n");
1503 }
1504
1505 static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1506 const char *buf, size_t count)
1507 {
1508 struct net_device *dev = to_net_dev(d);
1509 int ret;
1510 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1511
1512 if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
1513 return -EPERM;
1514
1515 if (!mutex_trylock(&priv->sysfs_mutex))
1516 return restart_syscall();
1517
1518 if (!rtnl_trylock()) {
1519 mutex_unlock(&priv->sysfs_mutex);
1520 return restart_syscall();
1521 }
1522
1523 ret = ipoib_set_mode(dev, buf);
1524
1525 /* The assumption is that the function ipoib_set_mode returned
1526 * with the rtnl held by it, if not the value -EBUSY returned,
1527 * then no need to rtnl_unlock
1528 */
1529 if (ret != -EBUSY)
1530 rtnl_unlock();
1531 mutex_unlock(&priv->sysfs_mutex);
1532
1533 return (!ret || ret == -EBUSY) ? count : ret;
1534 }
1535
1536 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
1537
1538 int ipoib_cm_add_mode_attr(struct net_device *dev)
1539 {
1540 return device_create_file(&dev->dev, &dev_attr_mode);
1541 }
1542
1543 static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
1544 {
1545 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1546 struct ib_srq_init_attr srq_init_attr = {
1547 .srq_type = IB_SRQT_BASIC,
1548 .attr = {
1549 .max_wr = ipoib_recvq_size,
1550 .max_sge = max_sge
1551 }
1552 };
1553
1554 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1555 if (IS_ERR(priv->cm.srq)) {
1556 if (PTR_ERR(priv->cm.srq) != -ENOSYS)
1557 printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
1558 priv->ca->name, PTR_ERR(priv->cm.srq));
1559 priv->cm.srq = NULL;
1560 return;
1561 }
1562
1563 priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1564 if (!priv->cm.srq_ring) {
1565 ib_destroy_srq(priv->cm.srq);
1566 priv->cm.srq = NULL;
1567 return;
1568 }
1569
1570 }
1571
1572 int ipoib_cm_dev_init(struct net_device *dev)
1573 {
1574 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1575 int max_srq_sge, i;
1576
1577 INIT_LIST_HEAD(&priv->cm.passive_ids);
1578 INIT_LIST_HEAD(&priv->cm.reap_list);
1579 INIT_LIST_HEAD(&priv->cm.start_list);
1580 INIT_LIST_HEAD(&priv->cm.rx_error_list);
1581 INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1582 INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1583 INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1584 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1585 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1586 INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
1587 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1588 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1589
1590 skb_queue_head_init(&priv->cm.skb_queue);
1591
1592 ipoib_dbg(priv, "max_srq_sge=%d\n", priv->ca->attrs.max_srq_sge);
1593
1594 max_srq_sge = min_t(int, IPOIB_CM_RX_SG, priv->ca->attrs.max_srq_sge);
1595 ipoib_cm_create_srq(dev, max_srq_sge);
1596 if (ipoib_cm_has_srq(dev)) {
1597 priv->cm.max_cm_mtu = max_srq_sge * PAGE_SIZE - 0x10;
1598 priv->cm.num_frags = max_srq_sge;
1599 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1600 priv->cm.max_cm_mtu, priv->cm.num_frags);
1601 } else {
1602 priv->cm.max_cm_mtu = IPOIB_CM_MTU;
1603 priv->cm.num_frags = IPOIB_CM_RX_SG;
1604 }
1605
1606 ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
1607
1608 if (ipoib_cm_has_srq(dev)) {
1609 for (i = 0; i < ipoib_recvq_size; ++i) {
1610 if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
1611 priv->cm.num_frags - 1,
1612 priv->cm.srq_ring[i].mapping,
1613 GFP_KERNEL)) {
1614 ipoib_warn(priv, "failed to allocate "
1615 "receive buffer %d\n", i);
1616 ipoib_cm_dev_cleanup(dev);
1617 return -ENOMEM;
1618 }
1619
1620 if (ipoib_cm_post_receive_srq(dev, i)) {
1621 ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1622 "failed for buf %d\n", i);
1623 ipoib_cm_dev_cleanup(dev);
1624 return -EIO;
1625 }
1626 }
1627 }
1628
1629 priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
1630 return 0;
1631 }
1632
1633 void ipoib_cm_dev_cleanup(struct net_device *dev)
1634 {
1635 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1636 int ret;
1637
1638 if (!priv->cm.srq)
1639 return;
1640
1641 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1642
1643 ret = ib_destroy_srq(priv->cm.srq);
1644 if (ret)
1645 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1646
1647 priv->cm.srq = NULL;
1648 if (!priv->cm.srq_ring)
1649 return;
1650
1651 ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
1652 priv->cm.srq_ring = NULL;
1653 }