]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/infiniband/ulp/ipoib/ipoib_ib.c
Merge remote-tracking branches 'asoc/topic/sgtl5000', 'asoc/topic/simple', 'asoc...
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / ulp / ipoib / ipoib_ib.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
2a1d9b7f
RD
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
1da177e4
LT
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
1da177e4
LT
34 */
35
36#include <linux/delay.h>
fec14d2f 37#include <linux/moduleparam.h>
1da177e4 38#include <linux/dma-mapping.h>
5a0e3ad6 39#include <linux/slab.h>
1da177e4 40
40ca1988
EC
41#include <linux/ip.h>
42#include <linux/tcp.h>
1da177e4
LT
43
44#include "ipoib.h"
45
46#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
47static int data_debug_level;
48
49module_param(data_debug_level, int, 0644);
50MODULE_PARM_DESC(data_debug_level,
51 "Enable data path debug tracing if > 0");
52#endif
53
1da177e4
LT
54struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
55 struct ib_pd *pd, struct ib_ah_attr *attr)
56{
57 struct ipoib_ah *ah;
3874397c 58 struct ib_ah *vah;
1da177e4
LT
59
60 ah = kmalloc(sizeof *ah, GFP_KERNEL);
61 if (!ah)
3874397c 62 return ERR_PTR(-ENOMEM);
1da177e4
LT
63
64 ah->dev = dev;
65 ah->last_send = 0;
66 kref_init(&ah->ref);
67
3874397c
MM
68 vah = ib_create_ah(pd, attr);
69 if (IS_ERR(vah)) {
1da177e4 70 kfree(ah);
3874397c
MM
71 ah = (struct ipoib_ah *)vah;
72 } else {
73 ah->ah = vah;
1da177e4 74 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
3874397c 75 }
1da177e4
LT
76
77 return ah;
78}
79
80void ipoib_free_ah(struct kref *kref)
81{
82 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
83 struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
84
85 unsigned long flags;
86
31c02e21
RD
87 spin_lock_irqsave(&priv->lock, flags);
88 list_add_tail(&ah->list, &priv->dead_ahs);
89 spin_unlock_irqrestore(&priv->lock, flags);
1da177e4
LT
90}
91
bc7b3a36
SM
92static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
93 u64 mapping[IPOIB_UD_RX_SG])
94{
a44878d1
ES
95 ib_dma_unmap_single(priv->ca, mapping[0],
96 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
97 DMA_FROM_DEVICE);
bc7b3a36
SM
98}
99
1993d683 100static int ipoib_ib_post_receive(struct net_device *dev, int id)
1da177e4 101{
1993d683 102 struct ipoib_dev_priv *priv = netdev_priv(dev);
1da177e4 103 struct ib_recv_wr *bad_wr;
1993d683
RD
104 int ret;
105
bc7b3a36
SM
106 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
107 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
108 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
1993d683 109
1993d683 110
bc7b3a36 111 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
1993d683
RD
112 if (unlikely(ret)) {
113 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
bc7b3a36 114 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
1993d683
RD
115 dev_kfree_skb_any(priv->rx_ring[id].skb);
116 priv->rx_ring[id].skb = NULL;
117 }
1da177e4 118
1993d683 119 return ret;
1da177e4
LT
120}
121
bc7b3a36 122static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
1da177e4
LT
123{
124 struct ipoib_dev_priv *priv = netdev_priv(dev);
125 struct sk_buff *skb;
bc7b3a36
SM
126 int buf_size;
127 u64 *mapping;
1da177e4 128
a44878d1 129 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
bc7b3a36 130
a44878d1 131 skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
bc7b3a36
SM
132 if (unlikely(!skb))
133 return NULL;
1993d683
RD
134
135 /*
136 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
137 * header. So we need 4 more bytes to get to 48 and align the
138 * IP header to a multiple of 16.
139 */
140 skb_reserve(skb, 4);
141
bc7b3a36
SM
142 mapping = priv->rx_ring[id].mapping;
143 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
144 DMA_FROM_DEVICE);
145 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
146 goto error;
147
bc7b3a36
SM
148 priv->rx_ring[id].skb = skb;
149 return skb;
bc7b3a36
SM
150error:
151 dev_kfree_skb_any(skb);
152 return NULL;
1da177e4
LT
153}
154
155static int ipoib_ib_post_receives(struct net_device *dev)
156{
157 struct ipoib_dev_priv *priv = netdev_priv(dev);
158 int i;
159
0f485251 160 for (i = 0; i < ipoib_recvq_size; ++i) {
bc7b3a36 161 if (!ipoib_alloc_rx_skb(dev, i)) {
1993d683
RD
162 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
163 return -ENOMEM;
164 }
1da177e4
LT
165 if (ipoib_ib_post_receive(dev, i)) {
166 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
167 return -EIO;
168 }
169 }
170
171 return 0;
172}
173
2439a6e6 174static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
1da177e4
LT
175{
176 struct ipoib_dev_priv *priv = netdev_priv(dev);
2439a6e6
RD
177 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
178 struct sk_buff *skb;
bc7b3a36 179 u64 mapping[IPOIB_UD_RX_SG];
fed1db33 180 union ib_gid *dgid;
68996a6e 181 union ib_gid *sgid;
1da177e4 182
a89875fc
RD
183 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
184 wr_id, wc->status);
1da177e4 185
2439a6e6
RD
186 if (unlikely(wr_id >= ipoib_recvq_size)) {
187 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
188 wr_id, ipoib_recvq_size);
189 return;
190 }
191
192 skb = priv->rx_ring[wr_id].skb;
2439a6e6
RD
193
194 if (unlikely(wc->status != IB_WC_SUCCESS)) {
195 if (wc->status != IB_WC_WR_FLUSH_ERR)
196 ipoib_warn(priv, "failed recv event "
197 "(status=%d, wrid=%d vend_err %x)\n",
198 wc->status, wr_id, wc->vendor_err);
bc7b3a36 199 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
2439a6e6
RD
200 dev_kfree_skb_any(skb);
201 priv->rx_ring[wr_id].skb = NULL;
202 return;
203 }
1da177e4 204
bc7b3a36
SM
205 memcpy(mapping, priv->rx_ring[wr_id].mapping,
206 IPOIB_UD_RX_SG * sizeof *mapping);
207
2439a6e6
RD
208 /*
209 * If we can't allocate a new RX buffer, dump
210 * this packet and reuse the old buffer.
211 */
bc7b3a36 212 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
de903512 213 ++dev->stats.rx_dropped;
2439a6e6
RD
214 goto repost;
215 }
216
217 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
218 wc->byte_len, wc->slid);
219
bc7b3a36 220 ipoib_ud_dma_unmap_rx(priv, mapping);
a44878d1
ES
221
222 skb_put(skb, wc->byte_len);
2439a6e6 223
fed1db33
CL
224 /* First byte of dgid signals multicast when 0xff */
225 dgid = &((struct ib_grh *)skb->data)->dgid;
226
227 if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
228 skb->pkt_type = PACKET_HOST;
229 else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
230 skb->pkt_type = PACKET_BROADCAST;
231 else
232 skb->pkt_type = PACKET_MULTICAST;
233
68996a6e
EC
234 sgid = &((struct ib_grh *)skb->data)->sgid;
235
236 /*
237 * Drop packets that this interface sent, ie multicast packets
238 * that the HCA has replicated.
239 */
240 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
241 int need_repost = 1;
242
243 if ((wc->wc_flags & IB_WC_GRH) &&
244 sgid->global.interface_id != priv->local_gid.global.interface_id)
245 need_repost = 0;
246
247 if (need_repost) {
248 dev_kfree_skb_any(skb);
249 goto repost;
250 }
251 }
252
2439a6e6
RD
253 skb_pull(skb, IB_GRH_BYTES);
254
1b844afe
RD
255 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
256 skb_reset_mac_header(skb);
257 skb_pull(skb, IPOIB_ENCAP_LEN);
258
de903512
RD
259 ++dev->stats.rx_packets;
260 dev->stats.rx_bytes += skb->len;
1b844afe
RD
261
262 skb->dev = dev;
d927d505
OG
263 if ((dev->features & NETIF_F_RXCSUM) &&
264 likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
6046136c
EC
265 skb->ip_summed = CHECKSUM_UNNECESSARY;
266
8ae31e5b 267 napi_gro_receive(&priv->napi, skb);
1da177e4 268
2439a6e6
RD
269repost:
270 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
271 ipoib_warn(priv, "ipoib_ib_post_receive failed "
272 "for buf %d\n", wr_id);
273}
1da177e4 274
c4268778 275int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
7143740d
EC
276{
277 struct sk_buff *skb = tx_req->skb;
278 u64 *mapping = tx_req->mapping;
279 int i;
40ca1988 280 int off;
7143740d 281
40ca1988
EC
282 if (skb_headlen(skb)) {
283 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
284 DMA_TO_DEVICE);
285 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
286 return -EIO;
287
288 off = 1;
289 } else
290 off = 0;
7143740d
EC
291
292 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
9e903e08 293 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5581be3b
IC
294 mapping[i + off] = ib_dma_map_page(ca,
295 skb_frag_page(frag),
9e903e08 296 frag->page_offset, skb_frag_size(frag),
7143740d 297 DMA_TO_DEVICE);
40ca1988 298 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
7143740d
EC
299 goto partial_error;
300 }
301 return 0;
302
303partial_error:
7143740d 304 for (; i > 0; --i) {
9e903e08
ED
305 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
306
307 ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
7143740d 308 }
40ca1988
EC
309
310 if (off)
311 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
312
7143740d
EC
313 return -EIO;
314}
315
c4268778
YS
316void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
317 struct ipoib_tx_buf *tx_req)
7143740d
EC
318{
319 struct sk_buff *skb = tx_req->skb;
320 u64 *mapping = tx_req->mapping;
321 int i;
40ca1988 322 int off;
7143740d 323
40ca1988 324 if (skb_headlen(skb)) {
c4268778
YS
325 ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
326 DMA_TO_DEVICE);
40ca1988
EC
327 off = 1;
328 } else
329 off = 0;
7143740d
EC
330
331 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
9e903e08
ED
332 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
333
c4268778
YS
334 ib_dma_unmap_page(priv->ca, mapping[i + off],
335 skb_frag_size(frag), DMA_TO_DEVICE);
7143740d
EC
336 }
337}
338
2c010730
ES
339/*
340 * As the result of a completion error the QP Can be transferred to SQE states.
341 * The function checks if the (send)QP is in SQE state and
342 * moves it back to RTS state, that in order to have it functional again.
343 */
344static void ipoib_qp_state_validate_work(struct work_struct *work)
345{
346 struct ipoib_qp_state_validate *qp_work =
347 container_of(work, struct ipoib_qp_state_validate, work);
348
349 struct ipoib_dev_priv *priv = qp_work->priv;
350 struct ib_qp_attr qp_attr;
351 struct ib_qp_init_attr query_init_attr;
352 int ret;
353
354 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
355 if (ret) {
356 ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
357 __func__, ret);
358 goto free_res;
359 }
360 pr_info("%s: QP: 0x%x is in state: %d\n",
361 __func__, priv->qp->qp_num, qp_attr.qp_state);
362
363 /* currently support only in SQE->RTS transition*/
364 if (qp_attr.qp_state == IB_QPS_SQE) {
365 qp_attr.qp_state = IB_QPS_RTS;
366
367 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
368 if (ret) {
369 pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
370 ret, priv->qp->qp_num);
371 goto free_res;
372 }
373 pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
374 __func__, priv->qp->qp_num);
375 } else {
376 pr_warn("QP (%d) will stay in state: %d\n",
377 priv->qp->qp_num, qp_attr.qp_state);
378 }
379
380free_res:
381 kfree(qp_work);
382}
383
2439a6e6
RD
384static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
385{
386 struct ipoib_dev_priv *priv = netdev_priv(dev);
387 unsigned int wr_id = wc->wr_id;
388 struct ipoib_tx_buf *tx_req;
1da177e4 389
a89875fc
RD
390 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
391 wr_id, wc->status);
1da177e4 392
2439a6e6
RD
393 if (unlikely(wr_id >= ipoib_sendq_size)) {
394 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
395 wr_id, ipoib_sendq_size);
396 return;
1da177e4 397 }
2439a6e6
RD
398
399 tx_req = &priv->tx_ring[wr_id];
400
c4268778 401 ipoib_dma_unmap_tx(priv, tx_req);
2439a6e6 402
de903512
RD
403 ++dev->stats.tx_packets;
404 dev->stats.tx_bytes += tx_req->skb->len;
2439a6e6
RD
405
406 dev_kfree_skb_any(tx_req->skb);
407
2439a6e6 408 ++priv->tx_tail;
1b524963
MT
409 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
410 netif_queue_stopped(dev) &&
411 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
2439a6e6 412 netif_wake_queue(dev);
2439a6e6
RD
413
414 if (wc->status != IB_WC_SUCCESS &&
2c010730
ES
415 wc->status != IB_WC_WR_FLUSH_ERR) {
416 struct ipoib_qp_state_validate *qp_work;
2439a6e6
RD
417 ipoib_warn(priv, "failed send event "
418 "(status=%d, wrid=%d vend_err %x)\n",
419 wc->status, wr_id, wc->vendor_err);
2c010730
ES
420 qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
421 if (!qp_work) {
422 ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n",
423 __func__, priv->qp->qp_num);
424 return;
425 }
426
427 INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
428 qp_work->priv = priv;
429 queue_work(priv->wq, &qp_work->work);
430 }
2439a6e6
RD
431}
432
f56bcd80
EC
433static int poll_tx(struct ipoib_dev_priv *priv)
434{
435 int n, i;
436
437 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
438 for (i = 0; i < n; ++i)
439 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
440
441 return n == MAX_SEND_CQE;
442}
443
bea3348e 444int ipoib_poll(struct napi_struct *napi, int budget)
2439a6e6 445{
bea3348e
SH
446 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
447 struct net_device *dev = priv->dev;
8d1cc86a
RD
448 int done;
449 int t;
8d1cc86a
RD
450 int n, i;
451
452 done = 0;
8d1cc86a 453
bea3348e
SH
454poll_more:
455 while (done < budget) {
456 int max = (budget - done);
457
8d1cc86a 458 t = min(IPOIB_NUM_WC, max);
f56bcd80 459 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
8d1cc86a 460
bea3348e 461 for (i = 0; i < n; i++) {
8d1cc86a
RD
462 struct ib_wc *wc = priv->ibwc + i;
463
1b524963 464 if (wc->wr_id & IPOIB_OP_RECV) {
8d1cc86a 465 ++done;
1b524963
MT
466 if (wc->wr_id & IPOIB_OP_CM)
467 ipoib_cm_handle_rx_wc(dev, wc);
468 else
469 ipoib_ib_handle_rx_wc(dev, wc);
f56bcd80
EC
470 } else
471 ipoib_cm_handle_tx_wc(priv->dev, wc);
8d1cc86a
RD
472 }
473
bea3348e 474 if (n != t)
8d1cc86a 475 break;
8d1cc86a
RD
476 }
477
bea3348e 478 if (done < budget) {
288379f0 479 napi_complete(napi);
f56bcd80 480 if (unlikely(ib_req_notify_cq(priv->recv_cq,
8d1cc86a
RD
481 IB_CQ_NEXT_COMP |
482 IB_CQ_REPORT_MISSED_EVENTS)) &&
288379f0 483 napi_reschedule(napi))
bea3348e 484 goto poll_more;
8d1cc86a
RD
485 }
486
bea3348e 487 return done;
1da177e4
LT
488}
489
490void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
491{
bea3348e
SH
492 struct net_device *dev = dev_ptr;
493 struct ipoib_dev_priv *priv = netdev_priv(dev);
494
288379f0 495 napi_schedule(&priv->napi);
1da177e4
LT
496}
497
57ce41d1
EC
498static void drain_tx_cq(struct net_device *dev)
499{
500 struct ipoib_dev_priv *priv = netdev_priv(dev);
57ce41d1 501
943c246e 502 netif_tx_lock(dev);
57ce41d1
EC
503 while (poll_tx(priv))
504 ; /* nothing */
505
506 if (netif_queue_stopped(dev))
507 mod_timer(&priv->poll_timer, jiffies + 1);
508
943c246e 509 netif_tx_unlock(dev);
57ce41d1
EC
510}
511
512void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
513{
943c246e
RD
514 struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);
515
516 mod_timer(&priv->poll_timer, jiffies);
57ce41d1
EC
517}
518
1da177e4
LT
519static inline int post_send(struct ipoib_dev_priv *priv,
520 unsigned int wr_id,
521 struct ib_ah *address, u32 qpn,
40ca1988
EC
522 struct ipoib_tx_buf *tx_req,
523 void *head, int hlen)
1da177e4
LT
524{
525 struct ib_send_wr *bad_wr;
40ca1988 526 struct sk_buff *skb = tx_req->skb;
40ca1988 527
c4268778 528 ipoib_build_sge(priv, tx_req);
1da177e4 529
e622f2f4
CH
530 priv->tx_wr.wr.wr_id = wr_id;
531 priv->tx_wr.remote_qpn = qpn;
532 priv->tx_wr.ah = address;
1da177e4 533
40ca1988 534 if (head) {
e622f2f4
CH
535 priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
536 priv->tx_wr.header = head;
537 priv->tx_wr.hlen = hlen;
538 priv->tx_wr.wr.opcode = IB_WR_LSO;
40ca1988 539 } else
e622f2f4 540 priv->tx_wr.wr.opcode = IB_WR_SEND;
40ca1988 541
e622f2f4 542 return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr);
1da177e4
LT
543}
544
545void ipoib_send(struct net_device *dev, struct sk_buff *skb,
546 struct ipoib_ah *address, u32 qpn)
547{
548 struct ipoib_dev_priv *priv = netdev_priv(dev);
1993d683 549 struct ipoib_tx_buf *tx_req;
a48f509b 550 int hlen, rc;
40ca1988 551 void *phead;
78a50a5e 552 unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb);
40ca1988
EC
553
554 if (skb_is_gso(skb)) {
555 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
556 phead = skb->data;
557 if (unlikely(!skb_pull(skb, hlen))) {
558 ipoib_warn(priv, "linear data too small\n");
559 ++dev->stats.tx_dropped;
560 ++dev->stats.tx_errors;
561 dev_kfree_skb_any(skb);
562 return;
563 }
564 } else {
565 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
566 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
567 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
568 ++dev->stats.tx_dropped;
569 ++dev->stats.tx_errors;
570 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
571 return;
572 }
573 phead = NULL;
574 hlen = 0;
1da177e4 575 }
78a50a5e
HWR
576 if (skb_shinfo(skb)->nr_frags > usable_sge) {
577 if (skb_linearize(skb) < 0) {
578 ipoib_warn(priv, "skb could not be linearized\n");
579 ++dev->stats.tx_dropped;
580 ++dev->stats.tx_errors;
581 dev_kfree_skb_any(skb);
582 return;
583 }
584 /* Does skb_linearize return ok without reducing nr_frags? */
585 if (skb_shinfo(skb)->nr_frags > usable_sge) {
586 ipoib_warn(priv, "too many frags after skb linearize\n");
587 ++dev->stats.tx_dropped;
588 ++dev->stats.tx_errors;
589 dev_kfree_skb_any(skb);
590 return;
591 }
592 }
1da177e4
LT
593
594 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
595 skb->len, address, qpn);
596
597 /*
598 * We put the skb into the tx_ring _before_ we call post_send()
599 * because it's entirely possible that the completion handler will
600 * run before we execute anything after the post_send(). That
601 * means we have to make sure everything is properly recorded and
602 * our state is consistent before we call post_send().
603 */
0f485251 604 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
1da177e4 605 tx_req->skb = skb;
7143740d 606 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
de903512 607 ++dev->stats.tx_errors;
73fbe8be
RD
608 dev_kfree_skb_any(skb);
609 return;
610 }
1da177e4 611
6046136c 612 if (skb->ip_summed == CHECKSUM_PARTIAL)
e622f2f4 613 priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
6046136c 614 else
e622f2f4 615 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
6046136c 616
57ce41d1
EC
617 if (++priv->tx_outstanding == ipoib_sendq_size) {
618 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
619 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
620 ipoib_warn(priv, "request notify on send CQ failed\n");
621 netif_stop_queue(dev);
622 }
623
7e5a90c2
SP
624 skb_orphan(skb);
625 skb_dst_drop(skb);
626
a48f509b
OG
627 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
628 address->ah, qpn, tx_req, phead, hlen);
629 if (unlikely(rc)) {
630 ipoib_warn(priv, "post_send failed, error %d\n", rc);
de903512 631 ++dev->stats.tx_errors;
57ce41d1 632 --priv->tx_outstanding;
c4268778 633 ipoib_dma_unmap_tx(priv, tx_req);
1da177e4 634 dev_kfree_skb_any(skb);
57ce41d1
EC
635 if (netif_queue_stopped(dev))
636 netif_wake_queue(dev);
1da177e4 637 } else {
860e9538 638 netif_trans_update(dev);
1da177e4
LT
639
640 address->last_send = priv->tx_head;
641 ++priv->tx_head;
1da177e4 642 }
f56bcd80
EC
643
644 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
57ce41d1
EC
645 while (poll_tx(priv))
646 ; /* nothing */
1da177e4
LT
647}
648
649static void __ipoib_reap_ah(struct net_device *dev)
650{
651 struct ipoib_dev_priv *priv = netdev_priv(dev);
652 struct ipoib_ah *ah, *tah;
653 LIST_HEAD(remove_list);
943c246e
RD
654 unsigned long flags;
655
656 netif_tx_lock_bh(dev);
657 spin_lock_irqsave(&priv->lock, flags);
1da177e4 658
1da177e4 659 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
2181858b 660 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
1da177e4 661 list_del(&ah->list);
31c02e21
RD
662 ib_destroy_ah(ah->ah);
663 kfree(ah);
1da177e4 664 }
943c246e
RD
665
666 spin_unlock_irqrestore(&priv->lock, flags);
667 netif_tx_unlock_bh(dev);
1da177e4
LT
668}
669
c4028958 670void ipoib_reap_ah(struct work_struct *work)
1da177e4 671{
c4028958
DH
672 struct ipoib_dev_priv *priv =
673 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
674 struct net_device *dev = priv->dev;
1da177e4
LT
675
676 __ipoib_reap_ah(dev);
677
678 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
0b39578b 679 queue_delayed_work(priv->wq, &priv->ah_reap_task,
69fc507a 680 round_jiffies_relative(HZ));
1da177e4
LT
681}
682
efc82eee 683static void ipoib_flush_ah(struct net_device *dev)
e135106f
DL
684{
685 struct ipoib_dev_priv *priv = netdev_priv(dev);
686
687 cancel_delayed_work(&priv->ah_reap_task);
efc82eee 688 flush_workqueue(priv->wq);
e135106f
DL
689 ipoib_reap_ah(&priv->ah_reap_task.work);
690}
691
efc82eee 692static void ipoib_stop_ah(struct net_device *dev)
e135106f
DL
693{
694 struct ipoib_dev_priv *priv = netdev_priv(dev);
695
696 set_bit(IPOIB_STOP_REAPER, &priv->flags);
efc82eee 697 ipoib_flush_ah(dev);
e135106f
DL
698}
699
57ce41d1
EC
700static void ipoib_ib_tx_timer_func(unsigned long ctx)
701{
702 drain_tx_cq((struct net_device *)ctx);
703}
704
efc82eee 705int ipoib_ib_dev_open(struct net_device *dev)
1da177e4
LT
706{
707 struct ipoib_dev_priv *priv = netdev_priv(dev);
708 int ret;
709
dd57c930
AE
710 ipoib_pkey_dev_check_presence(dev);
711
712 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
713 ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
714 (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
26bbf13c
YE
715 return -1;
716 }
26bbf13c 717
5b6810e0 718 ret = ipoib_init_qp(dev);
1da177e4 719 if (ret) {
5b6810e0 720 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
1da177e4
LT
721 return -1;
722 }
723
724 ret = ipoib_ib_post_receives(dev);
725 if (ret) {
726 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
c2bb5628 727 goto dev_stop;
1da177e4
LT
728 }
729
839fcaba
MT
730 ret = ipoib_cm_dev_open(dev);
731 if (ret) {
24bd1e4e 732 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
c2bb5628 733 goto dev_stop;
839fcaba
MT
734 }
735
1da177e4 736 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
0b39578b 737 queue_delayed_work(priv->wq, &priv->ah_reap_task,
69fc507a 738 round_jiffies_relative(HZ));
1da177e4 739
e028cc55
YE
740 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
741 napi_enable(&priv->napi);
7a343d4c 742
1da177e4 743 return 0;
c2bb5628
ES
744dev_stop:
745 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
746 napi_enable(&priv->napi);
efc82eee 747 ipoib_ib_dev_stop(dev);
c2bb5628 748 return -1;
1da177e4
LT
749}
750
db84f880 751void ipoib_pkey_dev_check_presence(struct net_device *dev)
7a343d4c
LA
752{
753 struct ipoib_dev_priv *priv = netdev_priv(dev);
7a343d4c 754
dd57c930
AE
755 if (!(priv->pkey & 0x7fff) ||
756 ib_find_pkey(priv->ca, priv->port, priv->pkey,
757 &priv->pkey_index))
7a343d4c
LA
758 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
759 else
760 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
761}
762
1da177e4
LT
763int ipoib_ib_dev_up(struct net_device *dev)
764{
765 struct ipoib_dev_priv *priv = netdev_priv(dev);
766
7a343d4c
LA
767 ipoib_pkey_dev_check_presence(dev);
768
769 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
770 ipoib_dbg(priv, "PKEY is not assigned.\n");
771 return 0;
772 }
773
1da177e4
LT
774 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
775
776 return ipoib_mcast_start_thread(dev);
777}
778
efc82eee 779int ipoib_ib_dev_down(struct net_device *dev)
1da177e4
LT
780{
781 struct ipoib_dev_priv *priv = netdev_priv(dev);
782
783 ipoib_dbg(priv, "downing ib_dev\n");
784
785 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
786 netif_carrier_off(dev);
787
efc82eee 788 ipoib_mcast_stop_thread(dev);
1da177e4
LT
789 ipoib_mcast_dev_flush(dev);
790
1da177e4
LT
791 ipoib_flush_paths(dev);
792
793 return 0;
794}
795
796static int recvs_pending(struct net_device *dev)
797{
798 struct ipoib_dev_priv *priv = netdev_priv(dev);
799 int pending = 0;
800 int i;
801
0f485251 802 for (i = 0; i < ipoib_recvq_size; ++i)
1da177e4
LT
803 if (priv->rx_ring[i].skb)
804 ++pending;
805
806 return pending;
807}
808
2dfbfc37
MT
809void ipoib_drain_cq(struct net_device *dev)
810{
811 struct ipoib_dev_priv *priv = netdev_priv(dev);
812 int i, n;
943c246e
RD
813
814 /*
815 * We call completion handling routines that expect to be
816 * called from the BH-disabled NAPI poll context, so disable
817 * BHs here too.
818 */
819 local_bh_disable();
820
2dfbfc37 821 do {
f56bcd80 822 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
2dfbfc37 823 for (i = 0; i < n; ++i) {
ce423ef5
RD
824 /*
825 * Convert any successful completions to flush
826 * errors to avoid passing packets up the
827 * stack after bringing the device down.
828 */
829 if (priv->ibwc[i].status == IB_WC_SUCCESS)
830 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
831
1b524963
MT
832 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
833 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
834 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
835 else
836 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
f56bcd80
EC
837 } else
838 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
2dfbfc37
MT
839 }
840 } while (n == IPOIB_NUM_WC);
f56bcd80
EC
841
842 while (poll_tx(priv))
843 ; /* nothing */
943c246e
RD
844
845 local_bh_enable();
2dfbfc37
MT
846}
847
efc82eee 848int ipoib_ib_dev_stop(struct net_device *dev)
1da177e4
LT
849{
850 struct ipoib_dev_priv *priv = netdev_priv(dev);
851 struct ib_qp_attr qp_attr;
1da177e4 852 unsigned long begin;
1993d683 853 struct ipoib_tx_buf *tx_req;
2dfbfc37 854 int i;
1da177e4 855
e028cc55
YE
856 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
857 napi_disable(&priv->napi);
7a343d4c 858
839fcaba
MT
859 ipoib_cm_dev_stop(dev);
860
3bc12e75
RD
861 /*
862 * Move our QP to the error state and then reinitialize in
863 * when all work requests have completed or have been flushed.
864 */
1da177e4 865 qp_attr.qp_state = IB_QPS_ERR;
3bc12e75 866 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
1da177e4
LT
867 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
868
869 /* Wait for all sends and receives to complete */
870 begin = jiffies;
871
872 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
873 if (time_after(jiffies, begin + 5 * HZ)) {
874 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
875 priv->tx_head - priv->tx_tail, recvs_pending(dev));
876
877 /*
878 * assume the HW is wedged and just free up
879 * all our pending work requests.
880 */
2181858b 881 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
1da177e4 882 tx_req = &priv->tx_ring[priv->tx_tail &
0f485251 883 (ipoib_sendq_size - 1)];
c4268778 884 ipoib_dma_unmap_tx(priv, tx_req);
1da177e4
LT
885 dev_kfree_skb_any(tx_req->skb);
886 ++priv->tx_tail;
1b524963 887 --priv->tx_outstanding;
1da177e4
LT
888 }
889
37ccf9df
RC
890 for (i = 0; i < ipoib_recvq_size; ++i) {
891 struct ipoib_rx_buf *rx_req;
892
893 rx_req = &priv->rx_ring[i];
894 if (!rx_req->skb)
895 continue;
bc7b3a36
SM
896 ipoib_ud_dma_unmap_rx(priv,
897 priv->rx_ring[i].mapping);
37ccf9df
RC
898 dev_kfree_skb_any(rx_req->skb);
899 rx_req->skb = NULL;
900 }
1da177e4
LT
901
902 goto timeout;
903 }
904
2dfbfc37 905 ipoib_drain_cq(dev);
8d1cc86a 906
1da177e4
LT
907 msleep(1);
908 }
909
910 ipoib_dbg(priv, "All sends and receives done.\n");
911
912timeout:
57ce41d1 913 del_timer_sync(&priv->poll_timer);
1da177e4 914 qp_attr.qp_state = IB_QPS_RESET;
3bc12e75 915 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
1da177e4
LT
916 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
917
efc82eee 918 ipoib_flush_ah(dev);
1da177e4 919
f56bcd80 920 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
8d1cc86a 921
1da177e4
LT
922 return 0;
923}
924
925int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
926{
927 struct ipoib_dev_priv *priv = netdev_priv(dev);
928
929 priv->ca = ca;
930 priv->port = port;
931 priv->qp = NULL;
932
933 if (ipoib_transport_dev_init(dev, ca)) {
934 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
935 return -ENODEV;
936 }
937
2767840a
RD
938 setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
939 (unsigned long) dev);
940
1da177e4 941 if (dev->flags & IFF_UP) {
efc82eee 942 if (ipoib_ib_dev_open(dev)) {
1da177e4
LT
943 ipoib_transport_dev_cleanup(dev);
944 return -ENODEV;
945 }
946 }
947
948 return 0;
949}
950
c2904141
ES
951/*
952 * Takes whatever value which is in pkey index 0 and updates priv->pkey
953 * returns 0 if the pkey value was changed.
954 */
955static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
956{
957 int result;
958 u16 prev_pkey;
959
960 prev_pkey = priv->pkey;
961 result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
962 if (result) {
963 ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
964 priv->port, result);
965 return result;
966 }
967
968 priv->pkey |= 0x8000;
969
970 if (prev_pkey != priv->pkey) {
971 ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
972 prev_pkey, priv->pkey);
973 /*
974 * Update the pkey in the broadcast address, while making sure to set
975 * the full membership bit, so that we join the right broadcast group.
976 */
977 priv->dev->broadcast[8] = priv->pkey >> 8;
978 priv->dev->broadcast[9] = priv->pkey & 0xff;
979 return 0;
980 }
981
982 return 1;
983}
dd57c930
AE
984/*
985 * returns 0 if pkey value was found in a different slot.
986 */
987static inline int update_child_pkey(struct ipoib_dev_priv *priv)
988{
989 u16 old_index = priv->pkey_index;
990
991 priv->pkey_index = 0;
992 ipoib_pkey_dev_check_presence(priv->dev);
993
994 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
995 (old_index == priv->pkey_index))
996 return 1;
997 return 0;
998}
c2904141 999
492a7e67
MB
1000/*
1001 * returns true if the device address of the ipoib interface has changed and the
1002 * new address is a valid one (i.e in the gid table), return false otherwise.
1003 */
1004static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
1005{
1006 union ib_gid search_gid;
1007 union ib_gid gid0;
1008 union ib_gid *netdev_gid;
1009 int err;
1010 u16 index;
1011 u8 port;
1012 bool ret = false;
1013
1014 netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
1015 if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
1016 return false;
1017
9b29953b 1018 netif_addr_lock_bh(priv->dev);
492a7e67
MB
1019
1020 /* The subnet prefix may have changed, update it now so we won't have
1021 * to do it later
1022 */
1023 priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1024 netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
1025 search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1026
1027 search_gid.global.interface_id = priv->local_gid.global.interface_id;
1028
9b29953b 1029 netif_addr_unlock_bh(priv->dev);
492a7e67
MB
1030
1031 err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
1032 priv->dev, &port, &index);
1033
9b29953b 1034 netif_addr_lock_bh(priv->dev);
492a7e67
MB
1035
1036 if (search_gid.global.interface_id !=
1037 priv->local_gid.global.interface_id)
1038 /* There was a change while we were looking up the gid, bail
1039 * here and let the next work sort this out
1040 */
1041 goto out;
1042
1043 /* The next section of code needs some background:
1044 * Per IB spec the port GUID can't change if the HCA is powered on.
1045 * port GUID is the basis for GID at index 0 which is the basis for
1046 * the default device address of a ipoib interface.
1047 *
1048 * so it seems the flow should be:
1049 * if user_changed_dev_addr && gid in gid tbl
1050 * set bit dev_addr_set
1051 * return true
1052 * else
1053 * return false
1054 *
1055 * The issue is that there are devices that don't follow the spec,
1056 * they change the port GUID when the HCA is powered, so in order
1057 * not to break userspace applications, We need to check if the
1058 * user wanted to control the device address and we assume that
1059 * if he sets the device address back to be based on GID index 0,
1060 * he no longer wishs to control it.
1061 *
1062 * If the user doesn't control the the device address,
1063 * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
1064 * the port GUID has changed and GID at index 0 has changed
1065 * so we need to change priv->local_gid and priv->dev->dev_addr
1066 * to reflect the new GID.
1067 */
1068 if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
1069 if (!err && port == priv->port) {
1070 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1071 if (index == 0)
1072 clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
1073 &priv->flags);
1074 else
1075 set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
1076 ret = true;
1077 } else {
1078 ret = false;
1079 }
1080 } else {
1081 if (!err && port == priv->port) {
1082 ret = true;
1083 } else {
1084 if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
1085 memcpy(&priv->local_gid, &gid0,
1086 sizeof(priv->local_gid));
1087 memcpy(priv->dev->dev_addr + 4, &gid0,
1088 sizeof(priv->local_gid));
1089 ret = true;
1090 }
1091 }
1092 }
1093
1094out:
9b29953b 1095 netif_addr_unlock_bh(priv->dev);
492a7e67
MB
1096
1097 return ret;
1098}
1099
ee1e2c82 1100static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
8b7cce0d
HE
1101 enum ipoib_flush_level level,
1102 int nesting)
1da177e4 1103{
26bbf13c 1104 struct ipoib_dev_priv *cpriv;
c4028958 1105 struct net_device *dev = priv->dev;
c2904141 1106 int result;
26bbf13c 1107
8b7cce0d 1108 down_read_nested(&priv->vlan_rwsem, nesting);
1da177e4 1109
26bbf13c
YE
1110 /*
1111 * Flush any child interfaces too -- they might be up even if
1112 * the parent is down.
1113 */
1114 list_for_each_entry(cpriv, &priv->child_intfs, list)
8b7cce0d 1115 __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
26bbf13c 1116
f47944cc 1117 up_read(&priv->vlan_rwsem);
26bbf13c 1118
dd57c930
AE
1119 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
1120 level != IPOIB_FLUSH_HEAVY) {
492a7e67
MB
1121 /* Make sure the dev_addr is set even if not flushing */
1122 if (level == IPOIB_FLUSH_LIGHT)
1123 ipoib_dev_addr_changed_valid(priv);
7a343d4c
LA
1124 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
1125 return;
1126 }
1127
1128 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
dd57c930
AE
1129 /* interface is down. update pkey and leave. */
1130 if (level == IPOIB_FLUSH_HEAVY) {
1131 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1132 update_parent_pkey(priv);
1133 else
1134 update_child_pkey(priv);
492a7e67
MB
1135 } else if (level == IPOIB_FLUSH_LIGHT)
1136 ipoib_dev_addr_changed_valid(priv);
7a343d4c 1137 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1da177e4 1138 return;
7a343d4c 1139 }
1da177e4 1140
ee1e2c82 1141 if (level == IPOIB_FLUSH_HEAVY) {
c2904141
ES
1142 /* child devices chase their origin pkey value, while non-child
1143 * (parent) devices should always takes what present in pkey index 0
1144 */
1145 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
dd57c930
AE
1146 result = update_child_pkey(priv);
1147 if (result) {
1148 /* restart QP only if P_Key index is changed */
c2904141 1149 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
9fdd5e5b 1150 return;
c2904141 1151 }
dd57c930 1152
c2904141
ES
1153 } else {
1154 result = update_parent_pkey(priv);
1155 /* restart QP only if P_Key value changed */
1156 if (result) {
1157 ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
1158 return;
1159 }
26bbf13c 1160 }
26bbf13c
YE
1161 }
1162
ee1e2c82 1163 if (level == IPOIB_FLUSH_LIGHT) {
344bacca 1164 int oper_up;
ee1e2c82 1165 ipoib_mark_paths_invalid(dev);
344bacca
AV
1166 /* Set IPoIB operation as down to prevent races between:
1167 * the flush flow which leaves MCG and on the fly joins
1168 * which can happen during that time. mcast restart task
1169 * should deal with join requests we missed.
1170 */
1171 oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
ee1e2c82 1172 ipoib_mcast_dev_flush(dev);
344bacca
AV
1173 if (oper_up)
1174 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
efc82eee 1175 ipoib_flush_ah(dev);
ee1e2c82 1176 }
1da177e4 1177
ee1e2c82 1178 if (level >= IPOIB_FLUSH_NORMAL)
efc82eee 1179 ipoib_ib_dev_down(dev);
1da177e4 1180
ee1e2c82 1181 if (level == IPOIB_FLUSH_HEAVY) {
dd57c930 1182 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
efc82eee
DL
1183 ipoib_ib_dev_stop(dev);
1184 if (ipoib_ib_dev_open(dev) != 0)
dd57c930
AE
1185 return;
1186 if (netif_queue_stopped(dev))
1187 netif_start_queue(dev);
26bbf13c
YE
1188 }
1189
1da177e4
LT
1190 /*
1191 * The device could have been brought down between the start and when
1192 * we get here, don't bring it back up if it's not configured up
1193 */
5ccd0255 1194 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
ee1e2c82
MS
1195 if (level >= IPOIB_FLUSH_NORMAL)
1196 ipoib_ib_dev_up(dev);
492a7e67
MB
1197 if (ipoib_dev_addr_changed_valid(priv))
1198 ipoib_mcast_restart_task(&priv->restart_task);
5ccd0255 1199 }
26bbf13c 1200}
1da177e4 1201
ee1e2c82
MS
1202void ipoib_ib_dev_flush_light(struct work_struct *work)
1203{
1204 struct ipoib_dev_priv *priv =
1205 container_of(work, struct ipoib_dev_priv, flush_light);
1206
8b7cce0d 1207 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
ee1e2c82
MS
1208}
1209
1210void ipoib_ib_dev_flush_normal(struct work_struct *work)
26bbf13c
YE
1211{
1212 struct ipoib_dev_priv *priv =
ee1e2c82 1213 container_of(work, struct ipoib_dev_priv, flush_normal);
4f71055a 1214
8b7cce0d 1215 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
26bbf13c 1216}
4f71055a 1217
ee1e2c82 1218void ipoib_ib_dev_flush_heavy(struct work_struct *work)
26bbf13c
YE
1219{
1220 struct ipoib_dev_priv *priv =
ee1e2c82 1221 container_of(work, struct ipoib_dev_priv, flush_heavy);
26bbf13c 1222
8b7cce0d 1223 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
1da177e4
LT
1224}
1225
1226void ipoib_ib_dev_cleanup(struct net_device *dev)
1227{
1228 struct ipoib_dev_priv *priv = netdev_priv(dev);
1229
1230 ipoib_dbg(priv, "cleaning up ib_dev\n");
a39c52ab
ES
1231 /*
1232 * We must make sure there are no more (path) completions
1233 * that may wish to touch priv fields that are no longer valid
1234 */
1235 ipoib_flush_paths(dev);
1da177e4 1236
efc82eee 1237 ipoib_mcast_stop_thread(dev);
988bd503 1238 ipoib_mcast_dev_flush(dev);
1da177e4 1239
e135106f
DL
1240 /*
1241 * All of our ah references aren't free until after
1242 * ipoib_mcast_dev_flush(), ipoib_flush_paths, and
1243 * the neighbor garbage collection is stopped and reaped.
1244 * That should all be done now, so make a final ah flush.
1245 */
efc82eee 1246 ipoib_stop_ah(dev);
e135106f 1247
1da177e4
LT
1248 ipoib_transport_dev_cleanup(dev);
1249}
1250
1da177e4 1251