]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/ulp/ipoib/ipoib_multicast.c
Merge branch 'x86-64'
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / ulp / ipoib / ipoib_multicast.c
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * $Id: ipoib_multicast.c 1362 2004-12-18 15:56:29Z roland $
35 */
36
37 #include <linux/skbuff.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/ip.h>
40 #include <linux/in.h>
41 #include <linux/igmp.h>
42 #include <linux/inetdevice.h>
43 #include <linux/delay.h>
44 #include <linux/completion.h>
45
46 #include <net/dst.h>
47
48 #include "ipoib.h"
49
50 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
51 static int mcast_debug_level;
52
53 module_param(mcast_debug_level, int, 0644);
54 MODULE_PARM_DESC(mcast_debug_level,
55 "Enable multicast debug tracing if > 0");
56 #endif
57
58 static DEFINE_MUTEX(mcast_mutex);
59
60 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
61 struct ipoib_mcast {
62 struct ib_sa_mcmember_rec mcmember;
63 struct ipoib_ah *ah;
64
65 struct rb_node rb_node;
66 struct list_head list;
67 struct completion done;
68
69 int query_id;
70 struct ib_sa_query *query;
71
72 unsigned long created;
73 unsigned long backoff;
74
75 unsigned long flags;
76 unsigned char logcount;
77
78 struct list_head neigh_list;
79
80 struct sk_buff_head pkt_queue;
81
82 struct net_device *dev;
83 };
84
85 struct ipoib_mcast_iter {
86 struct net_device *dev;
87 union ib_gid mgid;
88 unsigned long created;
89 unsigned int queuelen;
90 unsigned int complete;
91 unsigned int send_only;
92 };
93
94 static void ipoib_mcast_free(struct ipoib_mcast *mcast)
95 {
96 struct net_device *dev = mcast->dev;
97 struct ipoib_dev_priv *priv = netdev_priv(dev);
98 struct ipoib_neigh *neigh, *tmp;
99 unsigned long flags;
100 int tx_dropped = 0;
101
102 ipoib_dbg_mcast(netdev_priv(dev),
103 "deleting multicast group " IPOIB_GID_FMT "\n",
104 IPOIB_GID_ARG(mcast->mcmember.mgid));
105
106 spin_lock_irqsave(&priv->lock, flags);
107
108 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
109 /*
110 * It's safe to call ipoib_put_ah() inside priv->lock
111 * here, because we know that mcast->ah will always
112 * hold one more reference, so ipoib_put_ah() will
113 * never do more than decrement the ref count.
114 */
115 if (neigh->ah)
116 ipoib_put_ah(neigh->ah);
117 ipoib_neigh_free(neigh);
118 }
119
120 spin_unlock_irqrestore(&priv->lock, flags);
121
122 if (mcast->ah)
123 ipoib_put_ah(mcast->ah);
124
125 while (!skb_queue_empty(&mcast->pkt_queue)) {
126 ++tx_dropped;
127 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
128 }
129
130 spin_lock_irqsave(&priv->tx_lock, flags);
131 priv->stats.tx_dropped += tx_dropped;
132 spin_unlock_irqrestore(&priv->tx_lock, flags);
133
134 kfree(mcast);
135 }
136
137 static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
138 int can_sleep)
139 {
140 struct ipoib_mcast *mcast;
141
142 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
143 if (!mcast)
144 return NULL;
145
146 mcast->dev = dev;
147 mcast->created = jiffies;
148 mcast->backoff = 1;
149
150 INIT_LIST_HEAD(&mcast->list);
151 INIT_LIST_HEAD(&mcast->neigh_list);
152 skb_queue_head_init(&mcast->pkt_queue);
153
154 return mcast;
155 }
156
157 static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
158 {
159 struct ipoib_dev_priv *priv = netdev_priv(dev);
160 struct rb_node *n = priv->multicast_tree.rb_node;
161
162 while (n) {
163 struct ipoib_mcast *mcast;
164 int ret;
165
166 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
167
168 ret = memcmp(mgid, mcast->mcmember.mgid.raw,
169 sizeof (union ib_gid));
170 if (ret < 0)
171 n = n->rb_left;
172 else if (ret > 0)
173 n = n->rb_right;
174 else
175 return mcast;
176 }
177
178 return NULL;
179 }
180
181 static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
182 {
183 struct ipoib_dev_priv *priv = netdev_priv(dev);
184 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
185
186 while (*n) {
187 struct ipoib_mcast *tmcast;
188 int ret;
189
190 pn = *n;
191 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
192
193 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
194 sizeof (union ib_gid));
195 if (ret < 0)
196 n = &pn->rb_left;
197 else if (ret > 0)
198 n = &pn->rb_right;
199 else
200 return -EEXIST;
201 }
202
203 rb_link_node(&mcast->rb_node, pn, n);
204 rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
205
206 return 0;
207 }
208
209 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
210 struct ib_sa_mcmember_rec *mcmember)
211 {
212 struct net_device *dev = mcast->dev;
213 struct ipoib_dev_priv *priv = netdev_priv(dev);
214 struct ipoib_ah *ah;
215 int ret;
216
217 mcast->mcmember = *mcmember;
218
219 /* Set the cached Q_Key before we attach if it's the broadcast group */
220 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
221 sizeof (union ib_gid))) {
222 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
223 priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
224 }
225
226 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
227 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
228 ipoib_warn(priv, "multicast group " IPOIB_GID_FMT
229 " already attached\n",
230 IPOIB_GID_ARG(mcast->mcmember.mgid));
231
232 return 0;
233 }
234
235 ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid),
236 &mcast->mcmember.mgid);
237 if (ret < 0) {
238 ipoib_warn(priv, "couldn't attach QP to multicast group "
239 IPOIB_GID_FMT "\n",
240 IPOIB_GID_ARG(mcast->mcmember.mgid));
241
242 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
243 return ret;
244 }
245 }
246
247 {
248 struct ib_ah_attr av = {
249 .dlid = be16_to_cpu(mcast->mcmember.mlid),
250 .port_num = priv->port,
251 .sl = mcast->mcmember.sl,
252 .ah_flags = IB_AH_GRH,
253 .static_rate = mcast->mcmember.rate,
254 .grh = {
255 .flow_label = be32_to_cpu(mcast->mcmember.flow_label),
256 .hop_limit = mcast->mcmember.hop_limit,
257 .sgid_index = 0,
258 .traffic_class = mcast->mcmember.traffic_class
259 }
260 };
261 av.grh.dgid = mcast->mcmember.mgid;
262
263 ah = ipoib_create_ah(dev, priv->pd, &av);
264 if (!ah) {
265 ipoib_warn(priv, "ib_address_create failed\n");
266 } else {
267 ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT
268 " AV %p, LID 0x%04x, SL %d\n",
269 IPOIB_GID_ARG(mcast->mcmember.mgid),
270 mcast->ah->ah,
271 be16_to_cpu(mcast->mcmember.mlid),
272 mcast->mcmember.sl);
273 }
274
275 spin_lock_irq(&priv->lock);
276 mcast->ah = ah;
277 spin_unlock_irq(&priv->lock);
278 }
279
280 /* actually send any queued packets */
281 spin_lock_irq(&priv->tx_lock);
282 while (!skb_queue_empty(&mcast->pkt_queue)) {
283 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
284 spin_unlock_irq(&priv->tx_lock);
285
286 skb->dev = dev;
287
288 if (!skb->dst || !skb->dst->neighbour) {
289 /* put pseudoheader back on for next time */
290 skb_push(skb, sizeof (struct ipoib_pseudoheader));
291 }
292
293 if (dev_queue_xmit(skb))
294 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
295 spin_lock_irq(&priv->tx_lock);
296 }
297 spin_unlock_irq(&priv->tx_lock);
298
299 return 0;
300 }
301
302 static void
303 ipoib_mcast_sendonly_join_complete(int status,
304 struct ib_sa_mcmember_rec *mcmember,
305 void *mcast_ptr)
306 {
307 struct ipoib_mcast *mcast = mcast_ptr;
308 struct net_device *dev = mcast->dev;
309 struct ipoib_dev_priv *priv = netdev_priv(dev);
310
311 if (!status)
312 ipoib_mcast_join_finish(mcast, mcmember);
313 else {
314 if (mcast->logcount++ < 20)
315 ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for "
316 IPOIB_GID_FMT ", status %d\n",
317 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
318
319 /* Flush out any queued packets */
320 spin_lock_irq(&priv->tx_lock);
321 while (!skb_queue_empty(&mcast->pkt_queue)) {
322 ++priv->stats.tx_dropped;
323 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
324 }
325 spin_unlock_irq(&priv->tx_lock);
326
327 /* Clear the busy flag so we try again */
328 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
329 }
330
331 complete(&mcast->done);
332 }
333
334 static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
335 {
336 struct net_device *dev = mcast->dev;
337 struct ipoib_dev_priv *priv = netdev_priv(dev);
338 struct ib_sa_mcmember_rec rec = {
339 #if 0 /* Some SMs don't support send-only yet */
340 .join_state = 4
341 #else
342 .join_state = 1
343 #endif
344 };
345 int ret = 0;
346
347 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
348 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
349 return -ENODEV;
350 }
351
352 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
353 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
354 return -EBUSY;
355 }
356
357 rec.mgid = mcast->mcmember.mgid;
358 rec.port_gid = priv->local_gid;
359 rec.pkey = cpu_to_be16(priv->pkey);
360
361 init_completion(&mcast->done);
362
363 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec,
364 IB_SA_MCMEMBER_REC_MGID |
365 IB_SA_MCMEMBER_REC_PORT_GID |
366 IB_SA_MCMEMBER_REC_PKEY |
367 IB_SA_MCMEMBER_REC_JOIN_STATE,
368 1000, GFP_ATOMIC,
369 ipoib_mcast_sendonly_join_complete,
370 mcast, &mcast->query);
371 if (ret < 0) {
372 ipoib_warn(priv, "ib_sa_mcmember_rec_set failed (ret = %d)\n",
373 ret);
374 } else {
375 ipoib_dbg_mcast(priv, "no multicast record for " IPOIB_GID_FMT
376 ", starting join\n",
377 IPOIB_GID_ARG(mcast->mcmember.mgid));
378
379 mcast->query_id = ret;
380 }
381
382 return ret;
383 }
384
385 static void ipoib_mcast_join_complete(int status,
386 struct ib_sa_mcmember_rec *mcmember,
387 void *mcast_ptr)
388 {
389 struct ipoib_mcast *mcast = mcast_ptr;
390 struct net_device *dev = mcast->dev;
391 struct ipoib_dev_priv *priv = netdev_priv(dev);
392
393 ipoib_dbg_mcast(priv, "join completion for " IPOIB_GID_FMT
394 " (status %d)\n",
395 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
396
397 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
398 mcast->backoff = 1;
399 mutex_lock(&mcast_mutex);
400 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
401 queue_work(ipoib_workqueue, &priv->mcast_task);
402 mutex_unlock(&mcast_mutex);
403 complete(&mcast->done);
404 return;
405 }
406
407 if (status == -EINTR) {
408 complete(&mcast->done);
409 return;
410 }
411
412 if (status && mcast->logcount++ < 20) {
413 if (status == -ETIMEDOUT || status == -EINTR) {
414 ipoib_dbg_mcast(priv, "multicast join failed for " IPOIB_GID_FMT
415 ", status %d\n",
416 IPOIB_GID_ARG(mcast->mcmember.mgid),
417 status);
418 } else {
419 ipoib_warn(priv, "multicast join failed for "
420 IPOIB_GID_FMT ", status %d\n",
421 IPOIB_GID_ARG(mcast->mcmember.mgid),
422 status);
423 }
424 }
425
426 mcast->backoff *= 2;
427 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
428 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
429
430 mutex_lock(&mcast_mutex);
431
432 spin_lock_irq(&priv->lock);
433 mcast->query = NULL;
434
435 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
436 if (status == -ETIMEDOUT)
437 queue_work(ipoib_workqueue, &priv->mcast_task);
438 else
439 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
440 mcast->backoff * HZ);
441 } else
442 complete(&mcast->done);
443 spin_unlock_irq(&priv->lock);
444 mutex_unlock(&mcast_mutex);
445
446 return;
447 }
448
449 static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
450 int create)
451 {
452 struct ipoib_dev_priv *priv = netdev_priv(dev);
453 struct ib_sa_mcmember_rec rec = {
454 .join_state = 1
455 };
456 ib_sa_comp_mask comp_mask;
457 int ret = 0;
458
459 ipoib_dbg_mcast(priv, "joining MGID " IPOIB_GID_FMT "\n",
460 IPOIB_GID_ARG(mcast->mcmember.mgid));
461
462 rec.mgid = mcast->mcmember.mgid;
463 rec.port_gid = priv->local_gid;
464 rec.pkey = cpu_to_be16(priv->pkey);
465
466 comp_mask =
467 IB_SA_MCMEMBER_REC_MGID |
468 IB_SA_MCMEMBER_REC_PORT_GID |
469 IB_SA_MCMEMBER_REC_PKEY |
470 IB_SA_MCMEMBER_REC_JOIN_STATE;
471
472 if (create) {
473 comp_mask |=
474 IB_SA_MCMEMBER_REC_QKEY |
475 IB_SA_MCMEMBER_REC_SL |
476 IB_SA_MCMEMBER_REC_FLOW_LABEL |
477 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
478
479 rec.qkey = priv->broadcast->mcmember.qkey;
480 rec.sl = priv->broadcast->mcmember.sl;
481 rec.flow_label = priv->broadcast->mcmember.flow_label;
482 rec.traffic_class = priv->broadcast->mcmember.traffic_class;
483 }
484
485 init_completion(&mcast->done);
486
487 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, comp_mask,
488 mcast->backoff * 1000, GFP_ATOMIC,
489 ipoib_mcast_join_complete,
490 mcast, &mcast->query);
491
492 if (ret < 0) {
493 ipoib_warn(priv, "ib_sa_mcmember_rec_set failed, status %d\n", ret);
494
495 mcast->backoff *= 2;
496 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
497 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
498
499 mutex_lock(&mcast_mutex);
500 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
501 queue_delayed_work(ipoib_workqueue,
502 &priv->mcast_task,
503 mcast->backoff * HZ);
504 mutex_unlock(&mcast_mutex);
505 } else
506 mcast->query_id = ret;
507 }
508
509 void ipoib_mcast_join_task(void *dev_ptr)
510 {
511 struct net_device *dev = dev_ptr;
512 struct ipoib_dev_priv *priv = netdev_priv(dev);
513
514 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
515 return;
516
517 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
518 ipoib_warn(priv, "ib_gid_entry_get() failed\n");
519 else
520 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
521
522 {
523 struct ib_port_attr attr;
524
525 if (!ib_query_port(priv->ca, priv->port, &attr)) {
526 priv->local_lid = attr.lid;
527 priv->local_rate = attr.active_speed *
528 ib_width_enum_to_int(attr.active_width);
529 } else
530 ipoib_warn(priv, "ib_query_port failed\n");
531 }
532
533 if (!priv->broadcast) {
534 struct ipoib_mcast *broadcast;
535
536 broadcast = ipoib_mcast_alloc(dev, 1);
537 if (!broadcast) {
538 ipoib_warn(priv, "failed to allocate broadcast group\n");
539 mutex_lock(&mcast_mutex);
540 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
541 queue_delayed_work(ipoib_workqueue,
542 &priv->mcast_task, HZ);
543 mutex_unlock(&mcast_mutex);
544 return;
545 }
546
547 spin_lock_irq(&priv->lock);
548 memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
549 sizeof (union ib_gid));
550 priv->broadcast = broadcast;
551
552 __ipoib_mcast_add(dev, priv->broadcast);
553 spin_unlock_irq(&priv->lock);
554 }
555
556 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
557 ipoib_mcast_join(dev, priv->broadcast, 0);
558 return;
559 }
560
561 while (1) {
562 struct ipoib_mcast *mcast = NULL;
563
564 spin_lock_irq(&priv->lock);
565 list_for_each_entry(mcast, &priv->multicast_list, list) {
566 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
567 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
568 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
569 /* Found the next unjoined group */
570 break;
571 }
572 }
573 spin_unlock_irq(&priv->lock);
574
575 if (&mcast->list == &priv->multicast_list) {
576 /* All done */
577 break;
578 }
579
580 ipoib_mcast_join(dev, mcast, 1);
581 return;
582 }
583
584 priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) -
585 IPOIB_ENCAP_LEN;
586 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
587
588 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
589
590 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
591 netif_carrier_on(dev);
592 }
593
594 int ipoib_mcast_start_thread(struct net_device *dev)
595 {
596 struct ipoib_dev_priv *priv = netdev_priv(dev);
597
598 ipoib_dbg_mcast(priv, "starting multicast thread\n");
599
600 mutex_lock(&mcast_mutex);
601 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
602 queue_work(ipoib_workqueue, &priv->mcast_task);
603 mutex_unlock(&mcast_mutex);
604
605 spin_lock_irq(&priv->lock);
606 set_bit(IPOIB_MCAST_STARTED, &priv->flags);
607 spin_unlock_irq(&priv->lock);
608
609 return 0;
610 }
611
612 static void wait_for_mcast_join(struct ipoib_dev_priv *priv,
613 struct ipoib_mcast *mcast)
614 {
615 spin_lock_irq(&priv->lock);
616 if (mcast && mcast->query) {
617 ib_sa_cancel_query(mcast->query_id, mcast->query);
618 mcast->query = NULL;
619 spin_unlock_irq(&priv->lock);
620 ipoib_dbg_mcast(priv, "waiting for MGID " IPOIB_GID_FMT "\n",
621 IPOIB_GID_ARG(mcast->mcmember.mgid));
622 wait_for_completion(&mcast->done);
623 }
624 else
625 spin_unlock_irq(&priv->lock);
626 }
627
628 int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
629 {
630 struct ipoib_dev_priv *priv = netdev_priv(dev);
631 struct ipoib_mcast *mcast;
632
633 ipoib_dbg_mcast(priv, "stopping multicast thread\n");
634
635 spin_lock_irq(&priv->lock);
636 clear_bit(IPOIB_MCAST_STARTED, &priv->flags);
637 spin_unlock_irq(&priv->lock);
638
639 mutex_lock(&mcast_mutex);
640 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
641 cancel_delayed_work(&priv->mcast_task);
642 mutex_unlock(&mcast_mutex);
643
644 if (flush)
645 flush_workqueue(ipoib_workqueue);
646
647 wait_for_mcast_join(priv, priv->broadcast);
648
649 list_for_each_entry(mcast, &priv->multicast_list, list)
650 wait_for_mcast_join(priv, mcast);
651
652 return 0;
653 }
654
655 static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
656 {
657 struct ipoib_dev_priv *priv = netdev_priv(dev);
658 struct ib_sa_mcmember_rec rec = {
659 .join_state = 1
660 };
661 int ret = 0;
662
663 if (!test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags))
664 return 0;
665
666 ipoib_dbg_mcast(priv, "leaving MGID " IPOIB_GID_FMT "\n",
667 IPOIB_GID_ARG(mcast->mcmember.mgid));
668
669 rec.mgid = mcast->mcmember.mgid;
670 rec.port_gid = priv->local_gid;
671 rec.pkey = cpu_to_be16(priv->pkey);
672
673 /* Remove ourselves from the multicast group */
674 ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid),
675 &mcast->mcmember.mgid);
676 if (ret)
677 ipoib_warn(priv, "ipoib_mcast_detach failed (result = %d)\n", ret);
678
679 /*
680 * Just make one shot at leaving and don't wait for a reply;
681 * if we fail, too bad.
682 */
683 ret = ib_sa_mcmember_rec_delete(priv->ca, priv->port, &rec,
684 IB_SA_MCMEMBER_REC_MGID |
685 IB_SA_MCMEMBER_REC_PORT_GID |
686 IB_SA_MCMEMBER_REC_PKEY |
687 IB_SA_MCMEMBER_REC_JOIN_STATE,
688 0, GFP_ATOMIC, NULL,
689 mcast, &mcast->query);
690 if (ret < 0)
691 ipoib_warn(priv, "ib_sa_mcmember_rec_delete failed "
692 "for leave (result = %d)\n", ret);
693
694 return 0;
695 }
696
697 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
698 {
699 struct ipoib_dev_priv *priv = netdev_priv(dev);
700 struct ipoib_mcast *mcast;
701
702 /*
703 * We can only be called from ipoib_start_xmit, so we're
704 * inside tx_lock -- no need to save/restore flags.
705 */
706 spin_lock(&priv->lock);
707
708 if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) ||
709 !priv->broadcast ||
710 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
711 ++priv->stats.tx_dropped;
712 dev_kfree_skb_any(skb);
713 goto unlock;
714 }
715
716 mcast = __ipoib_mcast_find(dev, mgid);
717 if (!mcast) {
718 /* Let's create a new send only group now */
719 ipoib_dbg_mcast(priv, "setting up send only multicast group for "
720 IPOIB_GID_FMT "\n", IPOIB_GID_RAW_ARG(mgid));
721
722 mcast = ipoib_mcast_alloc(dev, 0);
723 if (!mcast) {
724 ipoib_warn(priv, "unable to allocate memory for "
725 "multicast structure\n");
726 ++priv->stats.tx_dropped;
727 dev_kfree_skb_any(skb);
728 goto out;
729 }
730
731 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
732 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
733 __ipoib_mcast_add(dev, mcast);
734 list_add_tail(&mcast->list, &priv->multicast_list);
735 }
736
737 if (!mcast->ah) {
738 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
739 skb_queue_tail(&mcast->pkt_queue, skb);
740 else {
741 ++priv->stats.tx_dropped;
742 dev_kfree_skb_any(skb);
743 }
744
745 if (mcast->query)
746 ipoib_dbg_mcast(priv, "no address vector, "
747 "but multicast join already started\n");
748 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
749 ipoib_mcast_sendonly_join(mcast);
750
751 /*
752 * If lookup completes between here and out:, don't
753 * want to send packet twice.
754 */
755 mcast = NULL;
756 }
757
758 out:
759 if (mcast && mcast->ah) {
760 if (skb->dst &&
761 skb->dst->neighbour &&
762 !*to_ipoib_neigh(skb->dst->neighbour)) {
763 struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour);
764
765 if (neigh) {
766 kref_get(&mcast->ah->ref);
767 neigh->ah = mcast->ah;
768 list_add_tail(&neigh->list, &mcast->neigh_list);
769 }
770 }
771
772 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
773 }
774
775 unlock:
776 spin_unlock(&priv->lock);
777 }
778
779 void ipoib_mcast_dev_flush(struct net_device *dev)
780 {
781 struct ipoib_dev_priv *priv = netdev_priv(dev);
782 LIST_HEAD(remove_list);
783 struct ipoib_mcast *mcast, *tmcast;
784 unsigned long flags;
785
786 ipoib_dbg_mcast(priv, "flushing multicast list\n");
787
788 spin_lock_irqsave(&priv->lock, flags);
789
790 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
791 list_del(&mcast->list);
792 rb_erase(&mcast->rb_node, &priv->multicast_tree);
793 list_add_tail(&mcast->list, &remove_list);
794 }
795
796 if (priv->broadcast) {
797 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
798 list_add_tail(&priv->broadcast->list, &remove_list);
799 priv->broadcast = NULL;
800 }
801
802 spin_unlock_irqrestore(&priv->lock, flags);
803
804 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
805 ipoib_mcast_leave(dev, mcast);
806 ipoib_mcast_free(mcast);
807 }
808 }
809
810 void ipoib_mcast_restart_task(void *dev_ptr)
811 {
812 struct net_device *dev = dev_ptr;
813 struct ipoib_dev_priv *priv = netdev_priv(dev);
814 struct dev_mc_list *mclist;
815 struct ipoib_mcast *mcast, *tmcast;
816 LIST_HEAD(remove_list);
817 unsigned long flags;
818
819 ipoib_dbg_mcast(priv, "restarting multicast task\n");
820
821 ipoib_mcast_stop_thread(dev, 0);
822
823 local_irq_save(flags);
824 netif_tx_lock(dev);
825 spin_lock(&priv->lock);
826
827 /*
828 * Unfortunately, the networking core only gives us a list of all of
829 * the multicast hardware addresses. We need to figure out which ones
830 * are new and which ones have been removed
831 */
832
833 /* Clear out the found flag */
834 list_for_each_entry(mcast, &priv->multicast_list, list)
835 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
836
837 /* Mark all of the entries that are found or don't exist */
838 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
839 union ib_gid mgid;
840
841 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
842
843 /* Add in the P_Key */
844 mgid.raw[4] = (priv->pkey >> 8) & 0xff;
845 mgid.raw[5] = priv->pkey & 0xff;
846
847 mcast = __ipoib_mcast_find(dev, &mgid);
848 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
849 struct ipoib_mcast *nmcast;
850
851 /* Not found or send-only group, let's add a new entry */
852 ipoib_dbg_mcast(priv, "adding multicast entry for mgid "
853 IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mgid));
854
855 nmcast = ipoib_mcast_alloc(dev, 0);
856 if (!nmcast) {
857 ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
858 continue;
859 }
860
861 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
862
863 nmcast->mcmember.mgid = mgid;
864
865 if (mcast) {
866 /* Destroy the send only entry */
867 list_move_tail(&mcast->list, &remove_list);
868
869 rb_replace_node(&mcast->rb_node,
870 &nmcast->rb_node,
871 &priv->multicast_tree);
872 } else
873 __ipoib_mcast_add(dev, nmcast);
874
875 list_add_tail(&nmcast->list, &priv->multicast_list);
876 }
877
878 if (mcast)
879 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
880 }
881
882 /* Remove all of the entries don't exist anymore */
883 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
884 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
885 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
886 ipoib_dbg_mcast(priv, "deleting multicast group " IPOIB_GID_FMT "\n",
887 IPOIB_GID_ARG(mcast->mcmember.mgid));
888
889 rb_erase(&mcast->rb_node, &priv->multicast_tree);
890
891 /* Move to the remove list */
892 list_move_tail(&mcast->list, &remove_list);
893 }
894 }
895
896 spin_unlock(&priv->lock);
897 netif_tx_unlock(dev);
898 local_irq_restore(flags);
899
900 /* We have to cancel outside of the spinlock */
901 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
902 wait_for_mcast_join(priv, mcast);
903 ipoib_mcast_leave(mcast->dev, mcast);
904 ipoib_mcast_free(mcast);
905 }
906
907 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
908 ipoib_mcast_start_thread(dev);
909 }
910
911 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
912
913 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
914 {
915 struct ipoib_mcast_iter *iter;
916
917 iter = kmalloc(sizeof *iter, GFP_KERNEL);
918 if (!iter)
919 return NULL;
920
921 iter->dev = dev;
922 memset(iter->mgid.raw, 0, 16);
923
924 if (ipoib_mcast_iter_next(iter)) {
925 kfree(iter);
926 return NULL;
927 }
928
929 return iter;
930 }
931
932 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
933 {
934 struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
935 struct rb_node *n;
936 struct ipoib_mcast *mcast;
937 int ret = 1;
938
939 spin_lock_irq(&priv->lock);
940
941 n = rb_first(&priv->multicast_tree);
942
943 while (n) {
944 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
945
946 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
947 sizeof (union ib_gid)) < 0) {
948 iter->mgid = mcast->mcmember.mgid;
949 iter->created = mcast->created;
950 iter->queuelen = skb_queue_len(&mcast->pkt_queue);
951 iter->complete = !!mcast->ah;
952 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
953
954 ret = 0;
955
956 break;
957 }
958
959 n = rb_next(n);
960 }
961
962 spin_unlock_irq(&priv->lock);
963
964 return ret;
965 }
966
967 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
968 union ib_gid *mgid,
969 unsigned long *created,
970 unsigned int *queuelen,
971 unsigned int *complete,
972 unsigned int *send_only)
973 {
974 *mgid = iter->mgid;
975 *created = iter->created;
976 *queuelen = iter->queuelen;
977 *complete = iter->complete;
978 *send_only = iter->send_only;
979 }
980
981 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */