]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/infiniband/ulp/ipoib/ipoib_multicast.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound
[mirror_ubuntu-eoan-kernel.git] / drivers / infiniband / ulp / ipoib / ipoib_multicast.c
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/skbuff.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/moduleparam.h>
38 #include <linux/ip.h>
39 #include <linux/in.h>
40 #include <linux/igmp.h>
41 #include <linux/inetdevice.h>
42 #include <linux/delay.h>
43 #include <linux/completion.h>
44 #include <linux/slab.h>
45
46 #include <net/dst.h>
47
48 #include "ipoib.h"
49
50 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
51 static int mcast_debug_level;
52
53 module_param(mcast_debug_level, int, 0644);
54 MODULE_PARM_DESC(mcast_debug_level,
55 "Enable multicast debug tracing if > 0");
56 #endif
57
58 static DEFINE_MUTEX(mcast_mutex);
59
60 struct ipoib_mcast_iter {
61 struct net_device *dev;
62 union ib_gid mgid;
63 unsigned long created;
64 unsigned int queuelen;
65 unsigned int complete;
66 unsigned int send_only;
67 };
68
69 static void ipoib_mcast_free(struct ipoib_mcast *mcast)
70 {
71 struct net_device *dev = mcast->dev;
72 struct ipoib_dev_priv *priv = netdev_priv(dev);
73 struct ipoib_neigh *neigh, *tmp;
74 int tx_dropped = 0;
75
76 ipoib_dbg_mcast(netdev_priv(dev), "deleting multicast group %pI6\n",
77 mcast->mcmember.mgid.raw);
78
79 spin_lock_irq(&priv->lock);
80
81 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
82 /*
83 * It's safe to call ipoib_put_ah() inside priv->lock
84 * here, because we know that mcast->ah will always
85 * hold one more reference, so ipoib_put_ah() will
86 * never do more than decrement the ref count.
87 */
88 if (neigh->ah)
89 ipoib_put_ah(neigh->ah);
90 ipoib_neigh_free(dev, neigh);
91 }
92
93 spin_unlock_irq(&priv->lock);
94
95 if (mcast->ah)
96 ipoib_put_ah(mcast->ah);
97
98 while (!skb_queue_empty(&mcast->pkt_queue)) {
99 ++tx_dropped;
100 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
101 }
102
103 netif_tx_lock_bh(dev);
104 dev->stats.tx_dropped += tx_dropped;
105 netif_tx_unlock_bh(dev);
106
107 kfree(mcast);
108 }
109
110 static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
111 int can_sleep)
112 {
113 struct ipoib_mcast *mcast;
114
115 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
116 if (!mcast)
117 return NULL;
118
119 mcast->dev = dev;
120 mcast->created = jiffies;
121 mcast->backoff = 1;
122
123 INIT_LIST_HEAD(&mcast->list);
124 INIT_LIST_HEAD(&mcast->neigh_list);
125 skb_queue_head_init(&mcast->pkt_queue);
126
127 return mcast;
128 }
129
130 static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
131 {
132 struct ipoib_dev_priv *priv = netdev_priv(dev);
133 struct rb_node *n = priv->multicast_tree.rb_node;
134
135 while (n) {
136 struct ipoib_mcast *mcast;
137 int ret;
138
139 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
140
141 ret = memcmp(mgid, mcast->mcmember.mgid.raw,
142 sizeof (union ib_gid));
143 if (ret < 0)
144 n = n->rb_left;
145 else if (ret > 0)
146 n = n->rb_right;
147 else
148 return mcast;
149 }
150
151 return NULL;
152 }
153
154 static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
155 {
156 struct ipoib_dev_priv *priv = netdev_priv(dev);
157 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
158
159 while (*n) {
160 struct ipoib_mcast *tmcast;
161 int ret;
162
163 pn = *n;
164 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
165
166 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
167 sizeof (union ib_gid));
168 if (ret < 0)
169 n = &pn->rb_left;
170 else if (ret > 0)
171 n = &pn->rb_right;
172 else
173 return -EEXIST;
174 }
175
176 rb_link_node(&mcast->rb_node, pn, n);
177 rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
178
179 return 0;
180 }
181
182 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
183 struct ib_sa_mcmember_rec *mcmember)
184 {
185 struct net_device *dev = mcast->dev;
186 struct ipoib_dev_priv *priv = netdev_priv(dev);
187 struct ipoib_ah *ah;
188 int ret;
189 int set_qkey = 0;
190
191 mcast->mcmember = *mcmember;
192
193 /* Set the cached Q_Key before we attach if it's the broadcast group */
194 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
195 sizeof (union ib_gid))) {
196 spin_lock_irq(&priv->lock);
197 if (!priv->broadcast) {
198 spin_unlock_irq(&priv->lock);
199 return -EAGAIN;
200 }
201 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
202 spin_unlock_irq(&priv->lock);
203 priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
204 set_qkey = 1;
205 }
206
207 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
208 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
209 ipoib_warn(priv, "multicast group %pI6 already attached\n",
210 mcast->mcmember.mgid.raw);
211
212 return 0;
213 }
214
215 ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid),
216 &mcast->mcmember.mgid, set_qkey);
217 if (ret < 0) {
218 ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n",
219 mcast->mcmember.mgid.raw);
220
221 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
222 return ret;
223 }
224 }
225
226 {
227 struct ib_ah_attr av = {
228 .dlid = be16_to_cpu(mcast->mcmember.mlid),
229 .port_num = priv->port,
230 .sl = mcast->mcmember.sl,
231 .ah_flags = IB_AH_GRH,
232 .static_rate = mcast->mcmember.rate,
233 .grh = {
234 .flow_label = be32_to_cpu(mcast->mcmember.flow_label),
235 .hop_limit = mcast->mcmember.hop_limit,
236 .sgid_index = 0,
237 .traffic_class = mcast->mcmember.traffic_class
238 }
239 };
240 av.grh.dgid = mcast->mcmember.mgid;
241
242 ah = ipoib_create_ah(dev, priv->pd, &av);
243 if (IS_ERR(ah)) {
244 ipoib_warn(priv, "ib_address_create failed %ld\n",
245 -PTR_ERR(ah));
246 /* use original error */
247 return PTR_ERR(ah);
248 } else {
249 spin_lock_irq(&priv->lock);
250 mcast->ah = ah;
251 spin_unlock_irq(&priv->lock);
252
253 ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n",
254 mcast->mcmember.mgid.raw,
255 mcast->ah->ah,
256 be16_to_cpu(mcast->mcmember.mlid),
257 mcast->mcmember.sl);
258 }
259 }
260
261 /* actually send any queued packets */
262 netif_tx_lock_bh(dev);
263 while (!skb_queue_empty(&mcast->pkt_queue)) {
264 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
265 struct dst_entry *dst = skb_dst(skb);
266 struct neighbour *n = NULL;
267
268 netif_tx_unlock_bh(dev);
269
270 skb->dev = dev;
271 if (dst)
272 n = dst_get_neighbour_raw(dst);
273 if (!dst || !n) {
274 /* put pseudoheader back on for next time */
275 skb_push(skb, sizeof (struct ipoib_pseudoheader));
276 }
277
278 if (dev_queue_xmit(skb))
279 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
280 netif_tx_lock_bh(dev);
281 }
282 netif_tx_unlock_bh(dev);
283
284 return 0;
285 }
286
287 static int
288 ipoib_mcast_sendonly_join_complete(int status,
289 struct ib_sa_multicast *multicast)
290 {
291 struct ipoib_mcast *mcast = multicast->context;
292 struct net_device *dev = mcast->dev;
293
294 /* We trap for port events ourselves. */
295 if (status == -ENETRESET)
296 return 0;
297
298 if (!status)
299 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
300
301 if (status) {
302 if (mcast->logcount++ < 20)
303 ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
304 mcast->mcmember.mgid.raw, status);
305
306 /* Flush out any queued packets */
307 netif_tx_lock_bh(dev);
308 while (!skb_queue_empty(&mcast->pkt_queue)) {
309 ++dev->stats.tx_dropped;
310 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
311 }
312 netif_tx_unlock_bh(dev);
313
314 /* Clear the busy flag so we try again */
315 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
316 &mcast->flags);
317 }
318 return status;
319 }
320
321 static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
322 {
323 struct net_device *dev = mcast->dev;
324 struct ipoib_dev_priv *priv = netdev_priv(dev);
325 struct ib_sa_mcmember_rec rec = {
326 #if 0 /* Some SMs don't support send-only yet */
327 .join_state = 4
328 #else
329 .join_state = 1
330 #endif
331 };
332 int ret = 0;
333
334 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
335 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
336 return -ENODEV;
337 }
338
339 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
340 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
341 return -EBUSY;
342 }
343
344 rec.mgid = mcast->mcmember.mgid;
345 rec.port_gid = priv->local_gid;
346 rec.pkey = cpu_to_be16(priv->pkey);
347
348 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
349 priv->port, &rec,
350 IB_SA_MCMEMBER_REC_MGID |
351 IB_SA_MCMEMBER_REC_PORT_GID |
352 IB_SA_MCMEMBER_REC_PKEY |
353 IB_SA_MCMEMBER_REC_JOIN_STATE,
354 GFP_ATOMIC,
355 ipoib_mcast_sendonly_join_complete,
356 mcast);
357 if (IS_ERR(mcast->mc)) {
358 ret = PTR_ERR(mcast->mc);
359 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
360 ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
361 ret);
362 } else {
363 ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
364 mcast->mcmember.mgid.raw);
365 }
366
367 return ret;
368 }
369
370 void ipoib_mcast_carrier_on_task(struct work_struct *work)
371 {
372 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
373 carrier_on_task);
374 struct ib_port_attr attr;
375
376 /*
377 * Take rtnl_lock to avoid racing with ipoib_stop() and
378 * turning the carrier back on while a device is being
379 * removed.
380 */
381 if (ib_query_port(priv->ca, priv->port, &attr) ||
382 attr.state != IB_PORT_ACTIVE) {
383 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
384 return;
385 }
386
387 rtnl_lock();
388 netif_carrier_on(priv->dev);
389 rtnl_unlock();
390 }
391
392 static int ipoib_mcast_join_complete(int status,
393 struct ib_sa_multicast *multicast)
394 {
395 struct ipoib_mcast *mcast = multicast->context;
396 struct net_device *dev = mcast->dev;
397 struct ipoib_dev_priv *priv = netdev_priv(dev);
398
399 ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
400 mcast->mcmember.mgid.raw, status);
401
402 /* We trap for port events ourselves. */
403 if (status == -ENETRESET)
404 return 0;
405
406 if (!status)
407 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
408
409 if (!status) {
410 mcast->backoff = 1;
411 mutex_lock(&mcast_mutex);
412 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
413 queue_delayed_work(ipoib_workqueue,
414 &priv->mcast_task, 0);
415 mutex_unlock(&mcast_mutex);
416
417 /*
418 * Defer carrier on work to ipoib_workqueue to avoid a
419 * deadlock on rtnl_lock here.
420 */
421 if (mcast == priv->broadcast)
422 queue_work(ipoib_workqueue, &priv->carrier_on_task);
423
424 return 0;
425 }
426
427 if (mcast->logcount++ < 20) {
428 if (status == -ETIMEDOUT || status == -EAGAIN) {
429 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
430 mcast->mcmember.mgid.raw, status);
431 } else {
432 ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
433 mcast->mcmember.mgid.raw, status);
434 }
435 }
436
437 mcast->backoff *= 2;
438 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
439 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
440
441 /* Clear the busy flag so we try again */
442 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
443
444 mutex_lock(&mcast_mutex);
445 spin_lock_irq(&priv->lock);
446 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
447 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
448 mcast->backoff * HZ);
449 spin_unlock_irq(&priv->lock);
450 mutex_unlock(&mcast_mutex);
451
452 return status;
453 }
454
455 static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
456 int create)
457 {
458 struct ipoib_dev_priv *priv = netdev_priv(dev);
459 struct ib_sa_mcmember_rec rec = {
460 .join_state = 1
461 };
462 ib_sa_comp_mask comp_mask;
463 int ret = 0;
464
465 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
466
467 rec.mgid = mcast->mcmember.mgid;
468 rec.port_gid = priv->local_gid;
469 rec.pkey = cpu_to_be16(priv->pkey);
470
471 comp_mask =
472 IB_SA_MCMEMBER_REC_MGID |
473 IB_SA_MCMEMBER_REC_PORT_GID |
474 IB_SA_MCMEMBER_REC_PKEY |
475 IB_SA_MCMEMBER_REC_JOIN_STATE;
476
477 if (create) {
478 comp_mask |=
479 IB_SA_MCMEMBER_REC_QKEY |
480 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
481 IB_SA_MCMEMBER_REC_MTU |
482 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
483 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
484 IB_SA_MCMEMBER_REC_RATE |
485 IB_SA_MCMEMBER_REC_SL |
486 IB_SA_MCMEMBER_REC_FLOW_LABEL |
487 IB_SA_MCMEMBER_REC_HOP_LIMIT;
488
489 rec.qkey = priv->broadcast->mcmember.qkey;
490 rec.mtu_selector = IB_SA_EQ;
491 rec.mtu = priv->broadcast->mcmember.mtu;
492 rec.traffic_class = priv->broadcast->mcmember.traffic_class;
493 rec.rate_selector = IB_SA_EQ;
494 rec.rate = priv->broadcast->mcmember.rate;
495 rec.sl = priv->broadcast->mcmember.sl;
496 rec.flow_label = priv->broadcast->mcmember.flow_label;
497 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
498 }
499
500 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
501 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
502 &rec, comp_mask, GFP_KERNEL,
503 ipoib_mcast_join_complete, mcast);
504 if (IS_ERR(mcast->mc)) {
505 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
506 ret = PTR_ERR(mcast->mc);
507 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
508
509 mcast->backoff *= 2;
510 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
511 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
512
513 mutex_lock(&mcast_mutex);
514 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
515 queue_delayed_work(ipoib_workqueue,
516 &priv->mcast_task,
517 mcast->backoff * HZ);
518 mutex_unlock(&mcast_mutex);
519 }
520 }
521
522 void ipoib_mcast_join_task(struct work_struct *work)
523 {
524 struct ipoib_dev_priv *priv =
525 container_of(work, struct ipoib_dev_priv, mcast_task.work);
526 struct net_device *dev = priv->dev;
527
528 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
529 return;
530
531 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
532 ipoib_warn(priv, "ib_query_gid() failed\n");
533 else
534 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
535
536 {
537 struct ib_port_attr attr;
538
539 if (!ib_query_port(priv->ca, priv->port, &attr))
540 priv->local_lid = attr.lid;
541 else
542 ipoib_warn(priv, "ib_query_port failed\n");
543 }
544
545 if (!priv->broadcast) {
546 struct ipoib_mcast *broadcast;
547
548 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
549 return;
550
551 broadcast = ipoib_mcast_alloc(dev, 1);
552 if (!broadcast) {
553 ipoib_warn(priv, "failed to allocate broadcast group\n");
554 mutex_lock(&mcast_mutex);
555 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
556 queue_delayed_work(ipoib_workqueue,
557 &priv->mcast_task, HZ);
558 mutex_unlock(&mcast_mutex);
559 return;
560 }
561
562 spin_lock_irq(&priv->lock);
563 memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
564 sizeof (union ib_gid));
565 priv->broadcast = broadcast;
566
567 __ipoib_mcast_add(dev, priv->broadcast);
568 spin_unlock_irq(&priv->lock);
569 }
570
571 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
572 if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
573 ipoib_mcast_join(dev, priv->broadcast, 0);
574 return;
575 }
576
577 while (1) {
578 struct ipoib_mcast *mcast = NULL;
579
580 spin_lock_irq(&priv->lock);
581 list_for_each_entry(mcast, &priv->multicast_list, list) {
582 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
583 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
584 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
585 /* Found the next unjoined group */
586 break;
587 }
588 }
589 spin_unlock_irq(&priv->lock);
590
591 if (&mcast->list == &priv->multicast_list) {
592 /* All done */
593 break;
594 }
595
596 ipoib_mcast_join(dev, mcast, 1);
597 return;
598 }
599
600 priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
601
602 if (!ipoib_cm_admin_enabled(dev)) {
603 rtnl_lock();
604 dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
605 rtnl_unlock();
606 }
607
608 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
609
610 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
611 }
612
613 int ipoib_mcast_start_thread(struct net_device *dev)
614 {
615 struct ipoib_dev_priv *priv = netdev_priv(dev);
616
617 ipoib_dbg_mcast(priv, "starting multicast thread\n");
618
619 mutex_lock(&mcast_mutex);
620 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
621 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
622 mutex_unlock(&mcast_mutex);
623
624 return 0;
625 }
626
627 int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
628 {
629 struct ipoib_dev_priv *priv = netdev_priv(dev);
630
631 ipoib_dbg_mcast(priv, "stopping multicast thread\n");
632
633 mutex_lock(&mcast_mutex);
634 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
635 cancel_delayed_work(&priv->mcast_task);
636 mutex_unlock(&mcast_mutex);
637
638 if (flush)
639 flush_workqueue(ipoib_workqueue);
640
641 return 0;
642 }
643
644 static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
645 {
646 struct ipoib_dev_priv *priv = netdev_priv(dev);
647 int ret = 0;
648
649 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
650 ib_sa_free_multicast(mcast->mc);
651
652 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
653 ipoib_dbg_mcast(priv, "leaving MGID %pI6\n",
654 mcast->mcmember.mgid.raw);
655
656 /* Remove ourselves from the multicast group */
657 ret = ib_detach_mcast(priv->qp, &mcast->mcmember.mgid,
658 be16_to_cpu(mcast->mcmember.mlid));
659 if (ret)
660 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
661 }
662
663 return 0;
664 }
665
666 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
667 {
668 struct ipoib_dev_priv *priv = netdev_priv(dev);
669 struct ipoib_mcast *mcast;
670 unsigned long flags;
671
672 spin_lock_irqsave(&priv->lock, flags);
673
674 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
675 !priv->broadcast ||
676 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
677 ++dev->stats.tx_dropped;
678 dev_kfree_skb_any(skb);
679 goto unlock;
680 }
681
682 mcast = __ipoib_mcast_find(dev, mgid);
683 if (!mcast) {
684 /* Let's create a new send only group now */
685 ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
686 mgid);
687
688 mcast = ipoib_mcast_alloc(dev, 0);
689 if (!mcast) {
690 ipoib_warn(priv, "unable to allocate memory for "
691 "multicast structure\n");
692 ++dev->stats.tx_dropped;
693 dev_kfree_skb_any(skb);
694 goto out;
695 }
696
697 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
698 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
699 __ipoib_mcast_add(dev, mcast);
700 list_add_tail(&mcast->list, &priv->multicast_list);
701 }
702
703 if (!mcast->ah) {
704 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
705 skb_queue_tail(&mcast->pkt_queue, skb);
706 else {
707 ++dev->stats.tx_dropped;
708 dev_kfree_skb_any(skb);
709 }
710
711 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
712 ipoib_dbg_mcast(priv, "no address vector, "
713 "but multicast join already started\n");
714 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
715 ipoib_mcast_sendonly_join(mcast);
716
717 /*
718 * If lookup completes between here and out:, don't
719 * want to send packet twice.
720 */
721 mcast = NULL;
722 }
723
724 out:
725 if (mcast && mcast->ah) {
726 struct dst_entry *dst = skb_dst(skb);
727 struct neighbour *n = NULL;
728
729 rcu_read_lock();
730 if (dst)
731 n = dst_get_neighbour(dst);
732 if (n && !*to_ipoib_neigh(n)) {
733 struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
734 skb->dev);
735
736 if (neigh) {
737 kref_get(&mcast->ah->ref);
738 neigh->ah = mcast->ah;
739 list_add_tail(&neigh->list, &mcast->neigh_list);
740 }
741 }
742 rcu_read_unlock();
743 spin_unlock_irqrestore(&priv->lock, flags);
744 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
745 return;
746 }
747
748 unlock:
749 spin_unlock_irqrestore(&priv->lock, flags);
750 }
751
752 void ipoib_mcast_dev_flush(struct net_device *dev)
753 {
754 struct ipoib_dev_priv *priv = netdev_priv(dev);
755 LIST_HEAD(remove_list);
756 struct ipoib_mcast *mcast, *tmcast;
757 unsigned long flags;
758
759 ipoib_dbg_mcast(priv, "flushing multicast list\n");
760
761 spin_lock_irqsave(&priv->lock, flags);
762
763 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
764 list_del(&mcast->list);
765 rb_erase(&mcast->rb_node, &priv->multicast_tree);
766 list_add_tail(&mcast->list, &remove_list);
767 }
768
769 if (priv->broadcast) {
770 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
771 list_add_tail(&priv->broadcast->list, &remove_list);
772 priv->broadcast = NULL;
773 }
774
775 spin_unlock_irqrestore(&priv->lock, flags);
776
777 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
778 ipoib_mcast_leave(dev, mcast);
779 ipoib_mcast_free(mcast);
780 }
781 }
782
783 static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
784 {
785 /* reserved QPN, prefix, scope */
786 if (memcmp(addr, broadcast, 6))
787 return 0;
788 /* signature lower, pkey */
789 if (memcmp(addr + 7, broadcast + 7, 3))
790 return 0;
791 return 1;
792 }
793
794 void ipoib_mcast_restart_task(struct work_struct *work)
795 {
796 struct ipoib_dev_priv *priv =
797 container_of(work, struct ipoib_dev_priv, restart_task);
798 struct net_device *dev = priv->dev;
799 struct netdev_hw_addr *ha;
800 struct ipoib_mcast *mcast, *tmcast;
801 LIST_HEAD(remove_list);
802 unsigned long flags;
803 struct ib_sa_mcmember_rec rec;
804
805 ipoib_dbg_mcast(priv, "restarting multicast task\n");
806
807 ipoib_mcast_stop_thread(dev, 0);
808
809 local_irq_save(flags);
810 netif_addr_lock(dev);
811 spin_lock(&priv->lock);
812
813 /*
814 * Unfortunately, the networking core only gives us a list of all of
815 * the multicast hardware addresses. We need to figure out which ones
816 * are new and which ones have been removed
817 */
818
819 /* Clear out the found flag */
820 list_for_each_entry(mcast, &priv->multicast_list, list)
821 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
822
823 /* Mark all of the entries that are found or don't exist */
824 netdev_for_each_mc_addr(ha, dev) {
825 union ib_gid mgid;
826
827 if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
828 continue;
829
830 memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
831
832 mcast = __ipoib_mcast_find(dev, &mgid);
833 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
834 struct ipoib_mcast *nmcast;
835
836 /* ignore group which is directly joined by userspace */
837 if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) &&
838 !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) {
839 ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n",
840 mgid.raw);
841 continue;
842 }
843
844 /* Not found or send-only group, let's add a new entry */
845 ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
846 mgid.raw);
847
848 nmcast = ipoib_mcast_alloc(dev, 0);
849 if (!nmcast) {
850 ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
851 continue;
852 }
853
854 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
855
856 nmcast->mcmember.mgid = mgid;
857
858 if (mcast) {
859 /* Destroy the send only entry */
860 list_move_tail(&mcast->list, &remove_list);
861
862 rb_replace_node(&mcast->rb_node,
863 &nmcast->rb_node,
864 &priv->multicast_tree);
865 } else
866 __ipoib_mcast_add(dev, nmcast);
867
868 list_add_tail(&nmcast->list, &priv->multicast_list);
869 }
870
871 if (mcast)
872 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
873 }
874
875 /* Remove all of the entries don't exist anymore */
876 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
877 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
878 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
879 ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n",
880 mcast->mcmember.mgid.raw);
881
882 rb_erase(&mcast->rb_node, &priv->multicast_tree);
883
884 /* Move to the remove list */
885 list_move_tail(&mcast->list, &remove_list);
886 }
887 }
888
889 spin_unlock(&priv->lock);
890 netif_addr_unlock(dev);
891 local_irq_restore(flags);
892
893 /* We have to cancel outside of the spinlock */
894 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
895 ipoib_mcast_leave(mcast->dev, mcast);
896 ipoib_mcast_free(mcast);
897 }
898
899 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
900 ipoib_mcast_start_thread(dev);
901 }
902
903 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
904
905 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
906 {
907 struct ipoib_mcast_iter *iter;
908
909 iter = kmalloc(sizeof *iter, GFP_KERNEL);
910 if (!iter)
911 return NULL;
912
913 iter->dev = dev;
914 memset(iter->mgid.raw, 0, 16);
915
916 if (ipoib_mcast_iter_next(iter)) {
917 kfree(iter);
918 return NULL;
919 }
920
921 return iter;
922 }
923
924 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
925 {
926 struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
927 struct rb_node *n;
928 struct ipoib_mcast *mcast;
929 int ret = 1;
930
931 spin_lock_irq(&priv->lock);
932
933 n = rb_first(&priv->multicast_tree);
934
935 while (n) {
936 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
937
938 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
939 sizeof (union ib_gid)) < 0) {
940 iter->mgid = mcast->mcmember.mgid;
941 iter->created = mcast->created;
942 iter->queuelen = skb_queue_len(&mcast->pkt_queue);
943 iter->complete = !!mcast->ah;
944 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
945
946 ret = 0;
947
948 break;
949 }
950
951 n = rb_next(n);
952 }
953
954 spin_unlock_irq(&priv->lock);
955
956 return ret;
957 }
958
959 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
960 union ib_gid *mgid,
961 unsigned long *created,
962 unsigned int *queuelen,
963 unsigned int *complete,
964 unsigned int *send_only)
965 {
966 *mgid = iter->mgid;
967 *created = iter->created;
968 *queuelen = iter->queuelen;
969 *complete = iter->complete;
970 *send_only = iter->send_only;
971 }
972
973 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */