]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/infiniband/ulp/ipoib/ipoib_multicast.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[mirror_ubuntu-eoan-kernel.git] / drivers / infiniband / ulp / ipoib / ipoib_multicast.c
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/skbuff.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/moduleparam.h>
38 #include <linux/ip.h>
39 #include <linux/in.h>
40 #include <linux/igmp.h>
41 #include <linux/inetdevice.h>
42 #include <linux/delay.h>
43 #include <linux/completion.h>
44 #include <linux/slab.h>
45
46 #include <net/dst.h>
47
48 #include "ipoib.h"
49
50 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
51 static int mcast_debug_level;
52
53 module_param(mcast_debug_level, int, 0644);
54 MODULE_PARM_DESC(mcast_debug_level,
55 "Enable multicast debug tracing if > 0");
56 #endif
57
58 static DEFINE_MUTEX(mcast_mutex);
59
60 struct ipoib_mcast_iter {
61 struct net_device *dev;
62 union ib_gid mgid;
63 unsigned long created;
64 unsigned int queuelen;
65 unsigned int complete;
66 unsigned int send_only;
67 };
68
69 static void ipoib_mcast_free(struct ipoib_mcast *mcast)
70 {
71 struct net_device *dev = mcast->dev;
72 struct ipoib_dev_priv *priv = netdev_priv(dev);
73 struct ipoib_neigh *neigh, *tmp;
74 int tx_dropped = 0;
75
76 ipoib_dbg_mcast(netdev_priv(dev), "deleting multicast group %pI6\n",
77 mcast->mcmember.mgid.raw);
78
79 spin_lock_irq(&priv->lock);
80
81 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
82 /*
83 * It's safe to call ipoib_put_ah() inside priv->lock
84 * here, because we know that mcast->ah will always
85 * hold one more reference, so ipoib_put_ah() will
86 * never do more than decrement the ref count.
87 */
88 if (neigh->ah)
89 ipoib_put_ah(neigh->ah);
90 ipoib_neigh_free(dev, neigh);
91 }
92
93 spin_unlock_irq(&priv->lock);
94
95 if (mcast->ah)
96 ipoib_put_ah(mcast->ah);
97
98 while (!skb_queue_empty(&mcast->pkt_queue)) {
99 ++tx_dropped;
100 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
101 }
102
103 netif_tx_lock_bh(dev);
104 dev->stats.tx_dropped += tx_dropped;
105 netif_tx_unlock_bh(dev);
106
107 kfree(mcast);
108 }
109
110 static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
111 int can_sleep)
112 {
113 struct ipoib_mcast *mcast;
114
115 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
116 if (!mcast)
117 return NULL;
118
119 mcast->dev = dev;
120 mcast->created = jiffies;
121 mcast->backoff = 1;
122
123 INIT_LIST_HEAD(&mcast->list);
124 INIT_LIST_HEAD(&mcast->neigh_list);
125 skb_queue_head_init(&mcast->pkt_queue);
126
127 return mcast;
128 }
129
130 static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
131 {
132 struct ipoib_dev_priv *priv = netdev_priv(dev);
133 struct rb_node *n = priv->multicast_tree.rb_node;
134
135 while (n) {
136 struct ipoib_mcast *mcast;
137 int ret;
138
139 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
140
141 ret = memcmp(mgid, mcast->mcmember.mgid.raw,
142 sizeof (union ib_gid));
143 if (ret < 0)
144 n = n->rb_left;
145 else if (ret > 0)
146 n = n->rb_right;
147 else
148 return mcast;
149 }
150
151 return NULL;
152 }
153
154 static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
155 {
156 struct ipoib_dev_priv *priv = netdev_priv(dev);
157 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
158
159 while (*n) {
160 struct ipoib_mcast *tmcast;
161 int ret;
162
163 pn = *n;
164 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
165
166 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
167 sizeof (union ib_gid));
168 if (ret < 0)
169 n = &pn->rb_left;
170 else if (ret > 0)
171 n = &pn->rb_right;
172 else
173 return -EEXIST;
174 }
175
176 rb_link_node(&mcast->rb_node, pn, n);
177 rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
178
179 return 0;
180 }
181
182 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
183 struct ib_sa_mcmember_rec *mcmember)
184 {
185 struct net_device *dev = mcast->dev;
186 struct ipoib_dev_priv *priv = netdev_priv(dev);
187 struct ipoib_ah *ah;
188 int ret;
189 int set_qkey = 0;
190
191 mcast->mcmember = *mcmember;
192
193 /* Set the cached Q_Key before we attach if it's the broadcast group */
194 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
195 sizeof (union ib_gid))) {
196 spin_lock_irq(&priv->lock);
197 if (!priv->broadcast) {
198 spin_unlock_irq(&priv->lock);
199 return -EAGAIN;
200 }
201 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
202 spin_unlock_irq(&priv->lock);
203 priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
204 set_qkey = 1;
205 }
206
207 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
208 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
209 ipoib_warn(priv, "multicast group %pI6 already attached\n",
210 mcast->mcmember.mgid.raw);
211
212 return 0;
213 }
214
215 ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid),
216 &mcast->mcmember.mgid, set_qkey);
217 if (ret < 0) {
218 ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n",
219 mcast->mcmember.mgid.raw);
220
221 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
222 return ret;
223 }
224 }
225
226 {
227 struct ib_ah_attr av = {
228 .dlid = be16_to_cpu(mcast->mcmember.mlid),
229 .port_num = priv->port,
230 .sl = mcast->mcmember.sl,
231 .ah_flags = IB_AH_GRH,
232 .static_rate = mcast->mcmember.rate,
233 .grh = {
234 .flow_label = be32_to_cpu(mcast->mcmember.flow_label),
235 .hop_limit = mcast->mcmember.hop_limit,
236 .sgid_index = 0,
237 .traffic_class = mcast->mcmember.traffic_class
238 }
239 };
240 av.grh.dgid = mcast->mcmember.mgid;
241
242 ah = ipoib_create_ah(dev, priv->pd, &av);
243 if (!ah) {
244 ipoib_warn(priv, "ib_address_create failed\n");
245 } else {
246 spin_lock_irq(&priv->lock);
247 mcast->ah = ah;
248 spin_unlock_irq(&priv->lock);
249
250 ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n",
251 mcast->mcmember.mgid.raw,
252 mcast->ah->ah,
253 be16_to_cpu(mcast->mcmember.mlid),
254 mcast->mcmember.sl);
255 }
256 }
257
258 /* actually send any queued packets */
259 netif_tx_lock_bh(dev);
260 while (!skb_queue_empty(&mcast->pkt_queue)) {
261 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
262 struct dst_entry *dst = skb_dst(skb);
263 struct neighbour *n = NULL;
264
265 netif_tx_unlock_bh(dev);
266
267 skb->dev = dev;
268 if (dst)
269 n = dst_get_neighbour(dst);
270 if (!dst || !n) {
271 /* put pseudoheader back on for next time */
272 skb_push(skb, sizeof (struct ipoib_pseudoheader));
273 }
274
275 if (dev_queue_xmit(skb))
276 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
277 netif_tx_lock_bh(dev);
278 }
279 netif_tx_unlock_bh(dev);
280
281 return 0;
282 }
283
284 static int
285 ipoib_mcast_sendonly_join_complete(int status,
286 struct ib_sa_multicast *multicast)
287 {
288 struct ipoib_mcast *mcast = multicast->context;
289 struct net_device *dev = mcast->dev;
290
291 /* We trap for port events ourselves. */
292 if (status == -ENETRESET)
293 return 0;
294
295 if (!status)
296 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
297
298 if (status) {
299 if (mcast->logcount++ < 20)
300 ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
301 mcast->mcmember.mgid.raw, status);
302
303 /* Flush out any queued packets */
304 netif_tx_lock_bh(dev);
305 while (!skb_queue_empty(&mcast->pkt_queue)) {
306 ++dev->stats.tx_dropped;
307 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
308 }
309 netif_tx_unlock_bh(dev);
310
311 /* Clear the busy flag so we try again */
312 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
313 &mcast->flags);
314 }
315 return status;
316 }
317
318 static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
319 {
320 struct net_device *dev = mcast->dev;
321 struct ipoib_dev_priv *priv = netdev_priv(dev);
322 struct ib_sa_mcmember_rec rec = {
323 #if 0 /* Some SMs don't support send-only yet */
324 .join_state = 4
325 #else
326 .join_state = 1
327 #endif
328 };
329 int ret = 0;
330
331 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
332 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
333 return -ENODEV;
334 }
335
336 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
337 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
338 return -EBUSY;
339 }
340
341 rec.mgid = mcast->mcmember.mgid;
342 rec.port_gid = priv->local_gid;
343 rec.pkey = cpu_to_be16(priv->pkey);
344
345 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
346 priv->port, &rec,
347 IB_SA_MCMEMBER_REC_MGID |
348 IB_SA_MCMEMBER_REC_PORT_GID |
349 IB_SA_MCMEMBER_REC_PKEY |
350 IB_SA_MCMEMBER_REC_JOIN_STATE,
351 GFP_ATOMIC,
352 ipoib_mcast_sendonly_join_complete,
353 mcast);
354 if (IS_ERR(mcast->mc)) {
355 ret = PTR_ERR(mcast->mc);
356 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
357 ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
358 ret);
359 } else {
360 ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
361 mcast->mcmember.mgid.raw);
362 }
363
364 return ret;
365 }
366
367 void ipoib_mcast_carrier_on_task(struct work_struct *work)
368 {
369 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
370 carrier_on_task);
371 struct ib_port_attr attr;
372
373 /*
374 * Take rtnl_lock to avoid racing with ipoib_stop() and
375 * turning the carrier back on while a device is being
376 * removed.
377 */
378 if (ib_query_port(priv->ca, priv->port, &attr) ||
379 attr.state != IB_PORT_ACTIVE) {
380 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
381 return;
382 }
383
384 rtnl_lock();
385 netif_carrier_on(priv->dev);
386 rtnl_unlock();
387 }
388
389 static int ipoib_mcast_join_complete(int status,
390 struct ib_sa_multicast *multicast)
391 {
392 struct ipoib_mcast *mcast = multicast->context;
393 struct net_device *dev = mcast->dev;
394 struct ipoib_dev_priv *priv = netdev_priv(dev);
395
396 ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
397 mcast->mcmember.mgid.raw, status);
398
399 /* We trap for port events ourselves. */
400 if (status == -ENETRESET)
401 return 0;
402
403 if (!status)
404 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
405
406 if (!status) {
407 mcast->backoff = 1;
408 mutex_lock(&mcast_mutex);
409 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
410 queue_delayed_work(ipoib_workqueue,
411 &priv->mcast_task, 0);
412 mutex_unlock(&mcast_mutex);
413
414 /*
415 * Defer carrier on work to ipoib_workqueue to avoid a
416 * deadlock on rtnl_lock here.
417 */
418 if (mcast == priv->broadcast)
419 queue_work(ipoib_workqueue, &priv->carrier_on_task);
420
421 return 0;
422 }
423
424 if (mcast->logcount++ < 20) {
425 if (status == -ETIMEDOUT || status == -EAGAIN) {
426 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
427 mcast->mcmember.mgid.raw, status);
428 } else {
429 ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
430 mcast->mcmember.mgid.raw, status);
431 }
432 }
433
434 mcast->backoff *= 2;
435 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
436 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
437
438 /* Clear the busy flag so we try again */
439 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
440
441 mutex_lock(&mcast_mutex);
442 spin_lock_irq(&priv->lock);
443 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
444 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
445 mcast->backoff * HZ);
446 spin_unlock_irq(&priv->lock);
447 mutex_unlock(&mcast_mutex);
448
449 return status;
450 }
451
452 static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
453 int create)
454 {
455 struct ipoib_dev_priv *priv = netdev_priv(dev);
456 struct ib_sa_mcmember_rec rec = {
457 .join_state = 1
458 };
459 ib_sa_comp_mask comp_mask;
460 int ret = 0;
461
462 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
463
464 rec.mgid = mcast->mcmember.mgid;
465 rec.port_gid = priv->local_gid;
466 rec.pkey = cpu_to_be16(priv->pkey);
467
468 comp_mask =
469 IB_SA_MCMEMBER_REC_MGID |
470 IB_SA_MCMEMBER_REC_PORT_GID |
471 IB_SA_MCMEMBER_REC_PKEY |
472 IB_SA_MCMEMBER_REC_JOIN_STATE;
473
474 if (create) {
475 comp_mask |=
476 IB_SA_MCMEMBER_REC_QKEY |
477 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
478 IB_SA_MCMEMBER_REC_MTU |
479 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
480 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
481 IB_SA_MCMEMBER_REC_RATE |
482 IB_SA_MCMEMBER_REC_SL |
483 IB_SA_MCMEMBER_REC_FLOW_LABEL |
484 IB_SA_MCMEMBER_REC_HOP_LIMIT;
485
486 rec.qkey = priv->broadcast->mcmember.qkey;
487 rec.mtu_selector = IB_SA_EQ;
488 rec.mtu = priv->broadcast->mcmember.mtu;
489 rec.traffic_class = priv->broadcast->mcmember.traffic_class;
490 rec.rate_selector = IB_SA_EQ;
491 rec.rate = priv->broadcast->mcmember.rate;
492 rec.sl = priv->broadcast->mcmember.sl;
493 rec.flow_label = priv->broadcast->mcmember.flow_label;
494 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
495 }
496
497 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
498 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
499 &rec, comp_mask, GFP_KERNEL,
500 ipoib_mcast_join_complete, mcast);
501 if (IS_ERR(mcast->mc)) {
502 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
503 ret = PTR_ERR(mcast->mc);
504 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
505
506 mcast->backoff *= 2;
507 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
508 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
509
510 mutex_lock(&mcast_mutex);
511 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
512 queue_delayed_work(ipoib_workqueue,
513 &priv->mcast_task,
514 mcast->backoff * HZ);
515 mutex_unlock(&mcast_mutex);
516 }
517 }
518
519 void ipoib_mcast_join_task(struct work_struct *work)
520 {
521 struct ipoib_dev_priv *priv =
522 container_of(work, struct ipoib_dev_priv, mcast_task.work);
523 struct net_device *dev = priv->dev;
524
525 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
526 return;
527
528 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
529 ipoib_warn(priv, "ib_query_gid() failed\n");
530 else
531 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
532
533 {
534 struct ib_port_attr attr;
535
536 if (!ib_query_port(priv->ca, priv->port, &attr))
537 priv->local_lid = attr.lid;
538 else
539 ipoib_warn(priv, "ib_query_port failed\n");
540 }
541
542 if (!priv->broadcast) {
543 struct ipoib_mcast *broadcast;
544
545 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
546 return;
547
548 broadcast = ipoib_mcast_alloc(dev, 1);
549 if (!broadcast) {
550 ipoib_warn(priv, "failed to allocate broadcast group\n");
551 mutex_lock(&mcast_mutex);
552 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
553 queue_delayed_work(ipoib_workqueue,
554 &priv->mcast_task, HZ);
555 mutex_unlock(&mcast_mutex);
556 return;
557 }
558
559 spin_lock_irq(&priv->lock);
560 memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
561 sizeof (union ib_gid));
562 priv->broadcast = broadcast;
563
564 __ipoib_mcast_add(dev, priv->broadcast);
565 spin_unlock_irq(&priv->lock);
566 }
567
568 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
569 if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
570 ipoib_mcast_join(dev, priv->broadcast, 0);
571 return;
572 }
573
574 while (1) {
575 struct ipoib_mcast *mcast = NULL;
576
577 spin_lock_irq(&priv->lock);
578 list_for_each_entry(mcast, &priv->multicast_list, list) {
579 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
580 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
581 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
582 /* Found the next unjoined group */
583 break;
584 }
585 }
586 spin_unlock_irq(&priv->lock);
587
588 if (&mcast->list == &priv->multicast_list) {
589 /* All done */
590 break;
591 }
592
593 ipoib_mcast_join(dev, mcast, 1);
594 return;
595 }
596
597 priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
598
599 if (!ipoib_cm_admin_enabled(dev)) {
600 rtnl_lock();
601 dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
602 rtnl_unlock();
603 }
604
605 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
606
607 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
608 }
609
610 int ipoib_mcast_start_thread(struct net_device *dev)
611 {
612 struct ipoib_dev_priv *priv = netdev_priv(dev);
613
614 ipoib_dbg_mcast(priv, "starting multicast thread\n");
615
616 mutex_lock(&mcast_mutex);
617 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
618 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
619 mutex_unlock(&mcast_mutex);
620
621 return 0;
622 }
623
624 int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
625 {
626 struct ipoib_dev_priv *priv = netdev_priv(dev);
627
628 ipoib_dbg_mcast(priv, "stopping multicast thread\n");
629
630 mutex_lock(&mcast_mutex);
631 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
632 cancel_delayed_work(&priv->mcast_task);
633 mutex_unlock(&mcast_mutex);
634
635 if (flush)
636 flush_workqueue(ipoib_workqueue);
637
638 return 0;
639 }
640
641 static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
642 {
643 struct ipoib_dev_priv *priv = netdev_priv(dev);
644 int ret = 0;
645
646 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
647 ib_sa_free_multicast(mcast->mc);
648
649 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
650 ipoib_dbg_mcast(priv, "leaving MGID %pI6\n",
651 mcast->mcmember.mgid.raw);
652
653 /* Remove ourselves from the multicast group */
654 ret = ib_detach_mcast(priv->qp, &mcast->mcmember.mgid,
655 be16_to_cpu(mcast->mcmember.mlid));
656 if (ret)
657 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
658 }
659
660 return 0;
661 }
662
663 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
664 {
665 struct ipoib_dev_priv *priv = netdev_priv(dev);
666 struct ipoib_mcast *mcast;
667 unsigned long flags;
668
669 spin_lock_irqsave(&priv->lock, flags);
670
671 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
672 !priv->broadcast ||
673 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
674 ++dev->stats.tx_dropped;
675 dev_kfree_skb_any(skb);
676 goto unlock;
677 }
678
679 mcast = __ipoib_mcast_find(dev, mgid);
680 if (!mcast) {
681 /* Let's create a new send only group now */
682 ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
683 mgid);
684
685 mcast = ipoib_mcast_alloc(dev, 0);
686 if (!mcast) {
687 ipoib_warn(priv, "unable to allocate memory for "
688 "multicast structure\n");
689 ++dev->stats.tx_dropped;
690 dev_kfree_skb_any(skb);
691 goto out;
692 }
693
694 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
695 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
696 __ipoib_mcast_add(dev, mcast);
697 list_add_tail(&mcast->list, &priv->multicast_list);
698 }
699
700 if (!mcast->ah) {
701 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
702 skb_queue_tail(&mcast->pkt_queue, skb);
703 else {
704 ++dev->stats.tx_dropped;
705 dev_kfree_skb_any(skb);
706 }
707
708 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
709 ipoib_dbg_mcast(priv, "no address vector, "
710 "but multicast join already started\n");
711 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
712 ipoib_mcast_sendonly_join(mcast);
713
714 /*
715 * If lookup completes between here and out:, don't
716 * want to send packet twice.
717 */
718 mcast = NULL;
719 }
720
721 out:
722 if (mcast && mcast->ah) {
723 struct dst_entry *dst = skb_dst(skb);
724 struct neighbour *n = NULL;
725 if (dst)
726 n = dst_get_neighbour(dst);
727 if (n && !*to_ipoib_neigh(n)) {
728 struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
729 skb->dev);
730
731 if (neigh) {
732 kref_get(&mcast->ah->ref);
733 neigh->ah = mcast->ah;
734 list_add_tail(&neigh->list, &mcast->neigh_list);
735 }
736 }
737
738 spin_unlock_irqrestore(&priv->lock, flags);
739 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
740 return;
741 }
742
743 unlock:
744 spin_unlock_irqrestore(&priv->lock, flags);
745 }
746
747 void ipoib_mcast_dev_flush(struct net_device *dev)
748 {
749 struct ipoib_dev_priv *priv = netdev_priv(dev);
750 LIST_HEAD(remove_list);
751 struct ipoib_mcast *mcast, *tmcast;
752 unsigned long flags;
753
754 ipoib_dbg_mcast(priv, "flushing multicast list\n");
755
756 spin_lock_irqsave(&priv->lock, flags);
757
758 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
759 list_del(&mcast->list);
760 rb_erase(&mcast->rb_node, &priv->multicast_tree);
761 list_add_tail(&mcast->list, &remove_list);
762 }
763
764 if (priv->broadcast) {
765 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
766 list_add_tail(&priv->broadcast->list, &remove_list);
767 priv->broadcast = NULL;
768 }
769
770 spin_unlock_irqrestore(&priv->lock, flags);
771
772 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
773 ipoib_mcast_leave(dev, mcast);
774 ipoib_mcast_free(mcast);
775 }
776 }
777
778 static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
779 {
780 /* reserved QPN, prefix, scope */
781 if (memcmp(addr, broadcast, 6))
782 return 0;
783 /* signature lower, pkey */
784 if (memcmp(addr + 7, broadcast + 7, 3))
785 return 0;
786 return 1;
787 }
788
789 void ipoib_mcast_restart_task(struct work_struct *work)
790 {
791 struct ipoib_dev_priv *priv =
792 container_of(work, struct ipoib_dev_priv, restart_task);
793 struct net_device *dev = priv->dev;
794 struct netdev_hw_addr *ha;
795 struct ipoib_mcast *mcast, *tmcast;
796 LIST_HEAD(remove_list);
797 unsigned long flags;
798 struct ib_sa_mcmember_rec rec;
799
800 ipoib_dbg_mcast(priv, "restarting multicast task\n");
801
802 ipoib_mcast_stop_thread(dev, 0);
803
804 local_irq_save(flags);
805 netif_addr_lock(dev);
806 spin_lock(&priv->lock);
807
808 /*
809 * Unfortunately, the networking core only gives us a list of all of
810 * the multicast hardware addresses. We need to figure out which ones
811 * are new and which ones have been removed
812 */
813
814 /* Clear out the found flag */
815 list_for_each_entry(mcast, &priv->multicast_list, list)
816 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
817
818 /* Mark all of the entries that are found or don't exist */
819 netdev_for_each_mc_addr(ha, dev) {
820 union ib_gid mgid;
821
822 if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
823 continue;
824
825 memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
826
827 mcast = __ipoib_mcast_find(dev, &mgid);
828 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
829 struct ipoib_mcast *nmcast;
830
831 /* ignore group which is directly joined by userspace */
832 if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) &&
833 !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) {
834 ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n",
835 mgid.raw);
836 continue;
837 }
838
839 /* Not found or send-only group, let's add a new entry */
840 ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
841 mgid.raw);
842
843 nmcast = ipoib_mcast_alloc(dev, 0);
844 if (!nmcast) {
845 ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
846 continue;
847 }
848
849 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
850
851 nmcast->mcmember.mgid = mgid;
852
853 if (mcast) {
854 /* Destroy the send only entry */
855 list_move_tail(&mcast->list, &remove_list);
856
857 rb_replace_node(&mcast->rb_node,
858 &nmcast->rb_node,
859 &priv->multicast_tree);
860 } else
861 __ipoib_mcast_add(dev, nmcast);
862
863 list_add_tail(&nmcast->list, &priv->multicast_list);
864 }
865
866 if (mcast)
867 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
868 }
869
870 /* Remove all of the entries don't exist anymore */
871 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
872 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
873 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
874 ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n",
875 mcast->mcmember.mgid.raw);
876
877 rb_erase(&mcast->rb_node, &priv->multicast_tree);
878
879 /* Move to the remove list */
880 list_move_tail(&mcast->list, &remove_list);
881 }
882 }
883
884 spin_unlock(&priv->lock);
885 netif_addr_unlock(dev);
886 local_irq_restore(flags);
887
888 /* We have to cancel outside of the spinlock */
889 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
890 ipoib_mcast_leave(mcast->dev, mcast);
891 ipoib_mcast_free(mcast);
892 }
893
894 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
895 ipoib_mcast_start_thread(dev);
896 }
897
898 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
899
900 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
901 {
902 struct ipoib_mcast_iter *iter;
903
904 iter = kmalloc(sizeof *iter, GFP_KERNEL);
905 if (!iter)
906 return NULL;
907
908 iter->dev = dev;
909 memset(iter->mgid.raw, 0, 16);
910
911 if (ipoib_mcast_iter_next(iter)) {
912 kfree(iter);
913 return NULL;
914 }
915
916 return iter;
917 }
918
919 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
920 {
921 struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
922 struct rb_node *n;
923 struct ipoib_mcast *mcast;
924 int ret = 1;
925
926 spin_lock_irq(&priv->lock);
927
928 n = rb_first(&priv->multicast_tree);
929
930 while (n) {
931 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
932
933 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
934 sizeof (union ib_gid)) < 0) {
935 iter->mgid = mcast->mcmember.mgid;
936 iter->created = mcast->created;
937 iter->queuelen = skb_queue_len(&mcast->pkt_queue);
938 iter->complete = !!mcast->ah;
939 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
940
941 ret = 0;
942
943 break;
944 }
945
946 n = rb_next(n);
947 }
948
949 spin_unlock_irq(&priv->lock);
950
951 return ret;
952 }
953
954 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
955 union ib_gid *mgid,
956 unsigned long *created,
957 unsigned int *queuelen,
958 unsigned int *complete,
959 unsigned int *send_only)
960 {
961 *mgid = iter->mgid;
962 *created = iter->created;
963 *queuelen = iter->queuelen;
964 *complete = iter->complete;
965 *send_only = iter->send_only;
966 }
967
968 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */