]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | |
2a1d9b7f RD |
3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
4 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | |
1da177e4 LT |
5 | * |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
1da177e4 LT |
33 | */ |
34 | ||
35 | #include <linux/skbuff.h> | |
36 | #include <linux/rtnetlink.h> | |
37 | #include <linux/ip.h> | |
38 | #include <linux/in.h> | |
39 | #include <linux/igmp.h> | |
40 | #include <linux/inetdevice.h> | |
41 | #include <linux/delay.h> | |
42 | #include <linux/completion.h> | |
5a0e3ad6 | 43 | #include <linux/slab.h> |
1da177e4 | 44 | |
14c85021 ACM |
45 | #include <net/dst.h> |
46 | ||
1da177e4 LT |
47 | #include "ipoib.h" |
48 | ||
49 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | |
50 | static int mcast_debug_level; | |
51 | ||
52 | module_param(mcast_debug_level, int, 0644); | |
53 | MODULE_PARM_DESC(mcast_debug_level, | |
54 | "Enable multicast debug tracing if > 0"); | |
55 | #endif | |
56 | ||
95ed644f | 57 | static DEFINE_MUTEX(mcast_mutex); |
1da177e4 | 58 | |
1da177e4 LT |
59 | struct ipoib_mcast_iter { |
60 | struct net_device *dev; | |
61 | union ib_gid mgid; | |
62 | unsigned long created; | |
63 | unsigned int queuelen; | |
64 | unsigned int complete; | |
65 | unsigned int send_only; | |
66 | }; | |
67 | ||
68 | static void ipoib_mcast_free(struct ipoib_mcast *mcast) | |
69 | { | |
70 | struct net_device *dev = mcast->dev; | |
71 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
72 | struct ipoib_neigh *neigh, *tmp; | |
b36f170b | 73 | int tx_dropped = 0; |
1da177e4 | 74 | |
5b095d98 | 75 | ipoib_dbg_mcast(netdev_priv(dev), "deleting multicast group %pI6\n", |
fcace2fe | 76 | mcast->mcmember.mgid.raw); |
1da177e4 | 77 | |
943c246e | 78 | spin_lock_irq(&priv->lock); |
1da177e4 LT |
79 | |
80 | list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) { | |
97460df3 EC |
81 | /* |
82 | * It's safe to call ipoib_put_ah() inside priv->lock | |
83 | * here, because we know that mcast->ah will always | |
84 | * hold one more reference, so ipoib_put_ah() will | |
85 | * never do more than decrement the ref count. | |
86 | */ | |
1da177e4 | 87 | if (neigh->ah) |
97460df3 | 88 | ipoib_put_ah(neigh->ah); |
2745b5b7 | 89 | ipoib_neigh_free(dev, neigh); |
1da177e4 LT |
90 | } |
91 | ||
943c246e | 92 | spin_unlock_irq(&priv->lock); |
1da177e4 | 93 | |
1da177e4 LT |
94 | if (mcast->ah) |
95 | ipoib_put_ah(mcast->ah); | |
96 | ||
b36f170b MT |
97 | while (!skb_queue_empty(&mcast->pkt_queue)) { |
98 | ++tx_dropped; | |
8c608a32 | 99 | dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); |
b36f170b MT |
100 | } |
101 | ||
943c246e | 102 | netif_tx_lock_bh(dev); |
de903512 | 103 | dev->stats.tx_dropped += tx_dropped; |
943c246e | 104 | netif_tx_unlock_bh(dev); |
1da177e4 LT |
105 | |
106 | kfree(mcast); | |
107 | } | |
108 | ||
109 | static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, | |
110 | int can_sleep) | |
111 | { | |
112 | struct ipoib_mcast *mcast; | |
113 | ||
de6eb66b | 114 | mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC); |
1da177e4 LT |
115 | if (!mcast) |
116 | return NULL; | |
117 | ||
1da177e4 LT |
118 | mcast->dev = dev; |
119 | mcast->created = jiffies; | |
ce5b65cc | 120 | mcast->backoff = 1; |
1da177e4 LT |
121 | |
122 | INIT_LIST_HEAD(&mcast->list); | |
123 | INIT_LIST_HEAD(&mcast->neigh_list); | |
124 | skb_queue_head_init(&mcast->pkt_queue); | |
125 | ||
1da177e4 LT |
126 | return mcast; |
127 | } | |
128 | ||
37c22a77 | 129 | static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) |
1da177e4 LT |
130 | { |
131 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
132 | struct rb_node *n = priv->multicast_tree.rb_node; | |
133 | ||
134 | while (n) { | |
135 | struct ipoib_mcast *mcast; | |
136 | int ret; | |
137 | ||
138 | mcast = rb_entry(n, struct ipoib_mcast, rb_node); | |
139 | ||
37c22a77 | 140 | ret = memcmp(mgid, mcast->mcmember.mgid.raw, |
1da177e4 LT |
141 | sizeof (union ib_gid)); |
142 | if (ret < 0) | |
143 | n = n->rb_left; | |
144 | else if (ret > 0) | |
145 | n = n->rb_right; | |
146 | else | |
147 | return mcast; | |
148 | } | |
149 | ||
150 | return NULL; | |
151 | } | |
152 | ||
153 | static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast) | |
154 | { | |
155 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
156 | struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL; | |
157 | ||
158 | while (*n) { | |
159 | struct ipoib_mcast *tmcast; | |
160 | int ret; | |
161 | ||
162 | pn = *n; | |
163 | tmcast = rb_entry(pn, struct ipoib_mcast, rb_node); | |
164 | ||
165 | ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw, | |
166 | sizeof (union ib_gid)); | |
167 | if (ret < 0) | |
168 | n = &pn->rb_left; | |
169 | else if (ret > 0) | |
170 | n = &pn->rb_right; | |
171 | else | |
172 | return -EEXIST; | |
173 | } | |
174 | ||
175 | rb_link_node(&mcast->rb_node, pn, n); | |
176 | rb_insert_color(&mcast->rb_node, &priv->multicast_tree); | |
177 | ||
178 | return 0; | |
179 | } | |
180 | ||
181 | static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, | |
182 | struct ib_sa_mcmember_rec *mcmember) | |
183 | { | |
184 | struct net_device *dev = mcast->dev; | |
185 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
7343b231 | 186 | struct ipoib_ah *ah; |
1da177e4 | 187 | int ret; |
d0de1362 | 188 | int set_qkey = 0; |
1da177e4 LT |
189 | |
190 | mcast->mcmember = *mcmember; | |
191 | ||
192 | /* Set the cached Q_Key before we attach if it's the broadcast group */ | |
193 | if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, | |
194 | sizeof (union ib_gid))) { | |
e1d50dce JM |
195 | spin_lock_irq(&priv->lock); |
196 | if (!priv->broadcast) { | |
197 | spin_unlock_irq(&priv->lock); | |
198 | return -EAGAIN; | |
199 | } | |
1da177e4 | 200 | priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); |
e1d50dce | 201 | spin_unlock_irq(&priv->lock); |
1da177e4 | 202 | priv->tx_wr.wr.ud.remote_qkey = priv->qkey; |
d0de1362 | 203 | set_qkey = 1; |
1da177e4 LT |
204 | } |
205 | ||
206 | if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { | |
207 | if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { | |
5b095d98 | 208 | ipoib_warn(priv, "multicast group %pI6 already attached\n", |
fcace2fe | 209 | mcast->mcmember.mgid.raw); |
1da177e4 LT |
210 | |
211 | return 0; | |
212 | } | |
213 | ||
214 | ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid), | |
d0de1362 | 215 | &mcast->mcmember.mgid, set_qkey); |
1da177e4 | 216 | if (ret < 0) { |
5b095d98 | 217 | ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n", |
fcace2fe | 218 | mcast->mcmember.mgid.raw); |
1da177e4 LT |
219 | |
220 | clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags); | |
221 | return ret; | |
222 | } | |
223 | } | |
224 | ||
225 | { | |
226 | struct ib_ah_attr av = { | |
227 | .dlid = be16_to_cpu(mcast->mcmember.mlid), | |
228 | .port_num = priv->port, | |
229 | .sl = mcast->mcmember.sl, | |
230 | .ah_flags = IB_AH_GRH, | |
bf6a9e31 | 231 | .static_rate = mcast->mcmember.rate, |
1da177e4 LT |
232 | .grh = { |
233 | .flow_label = be32_to_cpu(mcast->mcmember.flow_label), | |
234 | .hop_limit = mcast->mcmember.hop_limit, | |
235 | .sgid_index = 0, | |
236 | .traffic_class = mcast->mcmember.traffic_class | |
237 | } | |
238 | }; | |
1da177e4 LT |
239 | av.grh.dgid = mcast->mcmember.mgid; |
240 | ||
7343b231 EC |
241 | ah = ipoib_create_ah(dev, priv->pd, &av); |
242 | if (!ah) { | |
1da177e4 LT |
243 | ipoib_warn(priv, "ib_address_create failed\n"); |
244 | } else { | |
624d01f8 OG |
245 | spin_lock_irq(&priv->lock); |
246 | mcast->ah = ah; | |
247 | spin_unlock_irq(&priv->lock); | |
248 | ||
5b095d98 | 249 | ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n", |
fcace2fe | 250 | mcast->mcmember.mgid.raw, |
1da177e4 LT |
251 | mcast->ah->ah, |
252 | be16_to_cpu(mcast->mcmember.mlid), | |
253 | mcast->mcmember.sl); | |
254 | } | |
255 | } | |
256 | ||
257 | /* actually send any queued packets */ | |
943c246e | 258 | netif_tx_lock_bh(dev); |
1da177e4 LT |
259 | while (!skb_queue_empty(&mcast->pkt_queue)) { |
260 | struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); | |
943c246e | 261 | netif_tx_unlock_bh(dev); |
1da177e4 LT |
262 | |
263 | skb->dev = dev; | |
264 | ||
adf30907 | 265 | if (!skb_dst(skb) || !skb_dst(skb)->neighbour) { |
1da177e4 LT |
266 | /* put pseudoheader back on for next time */ |
267 | skb_push(skb, sizeof (struct ipoib_pseudoheader)); | |
268 | } | |
269 | ||
270 | if (dev_queue_xmit(skb)) | |
271 | ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); | |
943c246e | 272 | netif_tx_lock_bh(dev); |
1da177e4 | 273 | } |
943c246e | 274 | netif_tx_unlock_bh(dev); |
1da177e4 LT |
275 | |
276 | return 0; | |
277 | } | |
278 | ||
faec2f7b | 279 | static int |
1da177e4 | 280 | ipoib_mcast_sendonly_join_complete(int status, |
faec2f7b | 281 | struct ib_sa_multicast *multicast) |
1da177e4 | 282 | { |
faec2f7b | 283 | struct ipoib_mcast *mcast = multicast->context; |
1da177e4 LT |
284 | struct net_device *dev = mcast->dev; |
285 | ||
faec2f7b SH |
286 | /* We trap for port events ourselves. */ |
287 | if (status == -ENETRESET) | |
288 | return 0; | |
289 | ||
1da177e4 | 290 | if (!status) |
faec2f7b SH |
291 | status = ipoib_mcast_join_finish(mcast, &multicast->rec); |
292 | ||
293 | if (status) { | |
1da177e4 | 294 | if (mcast->logcount++ < 20) |
5b095d98 | 295 | ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n", |
fcace2fe | 296 | mcast->mcmember.mgid.raw, status); |
1da177e4 LT |
297 | |
298 | /* Flush out any queued packets */ | |
943c246e | 299 | netif_tx_lock_bh(dev); |
b36f170b | 300 | while (!skb_queue_empty(&mcast->pkt_queue)) { |
de903512 | 301 | ++dev->stats.tx_dropped; |
8c608a32 | 302 | dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); |
b36f170b | 303 | } |
943c246e | 304 | netif_tx_unlock_bh(dev); |
1da177e4 LT |
305 | |
306 | /* Clear the busy flag so we try again */ | |
faec2f7b SH |
307 | status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, |
308 | &mcast->flags); | |
1da177e4 | 309 | } |
faec2f7b | 310 | return status; |
1da177e4 LT |
311 | } |
312 | ||
313 | static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) | |
314 | { | |
315 | struct net_device *dev = mcast->dev; | |
316 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
317 | struct ib_sa_mcmember_rec rec = { | |
318 | #if 0 /* Some SMs don't support send-only yet */ | |
319 | .join_state = 4 | |
320 | #else | |
321 | .join_state = 1 | |
322 | #endif | |
323 | }; | |
324 | int ret = 0; | |
325 | ||
326 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { | |
327 | ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n"); | |
328 | return -ENODEV; | |
329 | } | |
330 | ||
331 | if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { | |
332 | ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n"); | |
333 | return -EBUSY; | |
334 | } | |
335 | ||
336 | rec.mgid = mcast->mcmember.mgid; | |
337 | rec.port_gid = priv->local_gid; | |
97f52eb4 | 338 | rec.pkey = cpu_to_be16(priv->pkey); |
1da177e4 | 339 | |
faec2f7b SH |
340 | mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, |
341 | priv->port, &rec, | |
342 | IB_SA_MCMEMBER_REC_MGID | | |
343 | IB_SA_MCMEMBER_REC_PORT_GID | | |
344 | IB_SA_MCMEMBER_REC_PKEY | | |
345 | IB_SA_MCMEMBER_REC_JOIN_STATE, | |
346 | GFP_ATOMIC, | |
347 | ipoib_mcast_sendonly_join_complete, | |
348 | mcast); | |
349 | if (IS_ERR(mcast->mc)) { | |
350 | ret = PTR_ERR(mcast->mc); | |
351 | clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); | |
352 | ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n", | |
1da177e4 LT |
353 | ret); |
354 | } else { | |
5b095d98 | 355 | ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n", |
fcace2fe | 356 | mcast->mcmember.mgid.raw); |
1da177e4 LT |
357 | } |
358 | ||
359 | return ret; | |
360 | } | |
361 | ||
e8224e4b YE |
362 | void ipoib_mcast_carrier_on_task(struct work_struct *work) |
363 | { | |
364 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
365 | carrier_on_task); | |
5ee95120 | 366 | struct ib_port_attr attr; |
e8224e4b YE |
367 | |
368 | /* | |
369 | * Take rtnl_lock to avoid racing with ipoib_stop() and | |
370 | * turning the carrier back on while a device is being | |
371 | * removed. | |
372 | */ | |
5ee95120 MS |
373 | if (ib_query_port(priv->ca, priv->port, &attr) || |
374 | attr.state != IB_PORT_ACTIVE) { | |
375 | ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); | |
376 | return; | |
377 | } | |
378 | ||
e8224e4b YE |
379 | rtnl_lock(); |
380 | netif_carrier_on(priv->dev); | |
381 | rtnl_unlock(); | |
382 | } | |
383 | ||
faec2f7b SH |
384 | static int ipoib_mcast_join_complete(int status, |
385 | struct ib_sa_multicast *multicast) | |
1da177e4 | 386 | { |
faec2f7b | 387 | struct ipoib_mcast *mcast = multicast->context; |
1da177e4 LT |
388 | struct net_device *dev = mcast->dev; |
389 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
390 | ||
5b095d98 | 391 | ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", |
fcace2fe | 392 | mcast->mcmember.mgid.raw, status); |
1da177e4 | 393 | |
faec2f7b SH |
394 | /* We trap for port events ourselves. */ |
395 | if (status == -ENETRESET) | |
396 | return 0; | |
397 | ||
398 | if (!status) | |
399 | status = ipoib_mcast_join_finish(mcast, &multicast->rec); | |
400 | ||
401 | if (!status) { | |
ce5b65cc | 402 | mcast->backoff = 1; |
95ed644f | 403 | mutex_lock(&mcast_mutex); |
1da177e4 | 404 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
c4028958 DH |
405 | queue_delayed_work(ipoib_workqueue, |
406 | &priv->mcast_task, 0); | |
95ed644f | 407 | mutex_unlock(&mcast_mutex); |
55c9adde | 408 | |
e8224e4b YE |
409 | /* |
410 | * Defer carrier on work to ipoib_workqueue to avoid a | |
411 | * deadlock on rtnl_lock here. | |
412 | */ | |
413 | if (mcast == priv->broadcast) | |
414 | queue_work(ipoib_workqueue, &priv->carrier_on_task); | |
55c9adde | 415 | |
faec2f7b | 416 | return 0; |
1da177e4 LT |
417 | } |
418 | ||
faec2f7b | 419 | if (mcast->logcount++ < 20) { |
3c209620 | 420 | if (status == -ETIMEDOUT || status == -EAGAIN) { |
5b095d98 | 421 | ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", |
fcace2fe | 422 | mcast->mcmember.mgid.raw, status); |
1da177e4 | 423 | } else { |
5b095d98 | 424 | ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", |
fcace2fe | 425 | mcast->mcmember.mgid.raw, status); |
1da177e4 LT |
426 | } |
427 | } | |
428 | ||
429 | mcast->backoff *= 2; | |
430 | if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) | |
431 | mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; | |
432 | ||
faec2f7b SH |
433 | /* Clear the busy flag so we try again */ |
434 | status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); | |
9acf6a85 | 435 | |
faec2f7b | 436 | mutex_lock(&mcast_mutex); |
9acf6a85 | 437 | spin_lock_irq(&priv->lock); |
faec2f7b SH |
438 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
439 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, | |
440 | mcast->backoff * HZ); | |
9acf6a85 | 441 | spin_unlock_irq(&priv->lock); |
95ed644f | 442 | mutex_unlock(&mcast_mutex); |
1da177e4 | 443 | |
faec2f7b | 444 | return status; |
1da177e4 LT |
445 | } |
446 | ||
447 | static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, | |
448 | int create) | |
449 | { | |
450 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
451 | struct ib_sa_mcmember_rec rec = { | |
452 | .join_state = 1 | |
453 | }; | |
454 | ib_sa_comp_mask comp_mask; | |
455 | int ret = 0; | |
456 | ||
5b095d98 | 457 | ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); |
1da177e4 LT |
458 | |
459 | rec.mgid = mcast->mcmember.mgid; | |
460 | rec.port_gid = priv->local_gid; | |
97f52eb4 | 461 | rec.pkey = cpu_to_be16(priv->pkey); |
1da177e4 LT |
462 | |
463 | comp_mask = | |
464 | IB_SA_MCMEMBER_REC_MGID | | |
465 | IB_SA_MCMEMBER_REC_PORT_GID | | |
466 | IB_SA_MCMEMBER_REC_PKEY | | |
467 | IB_SA_MCMEMBER_REC_JOIN_STATE; | |
468 | ||
469 | if (create) { | |
470 | comp_mask |= | |
d0df6d6d RD |
471 | IB_SA_MCMEMBER_REC_QKEY | |
472 | IB_SA_MCMEMBER_REC_MTU_SELECTOR | | |
473 | IB_SA_MCMEMBER_REC_MTU | | |
474 | IB_SA_MCMEMBER_REC_TRAFFIC_CLASS | | |
475 | IB_SA_MCMEMBER_REC_RATE_SELECTOR | | |
476 | IB_SA_MCMEMBER_REC_RATE | | |
477 | IB_SA_MCMEMBER_REC_SL | | |
478 | IB_SA_MCMEMBER_REC_FLOW_LABEL | | |
479 | IB_SA_MCMEMBER_REC_HOP_LIMIT; | |
1da177e4 LT |
480 | |
481 | rec.qkey = priv->broadcast->mcmember.qkey; | |
d0df6d6d RD |
482 | rec.mtu_selector = IB_SA_EQ; |
483 | rec.mtu = priv->broadcast->mcmember.mtu; | |
484 | rec.traffic_class = priv->broadcast->mcmember.traffic_class; | |
485 | rec.rate_selector = IB_SA_EQ; | |
486 | rec.rate = priv->broadcast->mcmember.rate; | |
1da177e4 LT |
487 | rec.sl = priv->broadcast->mcmember.sl; |
488 | rec.flow_label = priv->broadcast->mcmember.flow_label; | |
d0df6d6d | 489 | rec.hop_limit = priv->broadcast->mcmember.hop_limit; |
1da177e4 LT |
490 | } |
491 | ||
faec2f7b SH |
492 | set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); |
493 | mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, | |
494 | &rec, comp_mask, GFP_KERNEL, | |
495 | ipoib_mcast_join_complete, mcast); | |
496 | if (IS_ERR(mcast->mc)) { | |
497 | clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); | |
498 | ret = PTR_ERR(mcast->mc); | |
499 | ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); | |
1da177e4 LT |
500 | |
501 | mcast->backoff *= 2; | |
502 | if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) | |
503 | mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; | |
504 | ||
95ed644f | 505 | mutex_lock(&mcast_mutex); |
1da177e4 LT |
506 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
507 | queue_delayed_work(ipoib_workqueue, | |
508 | &priv->mcast_task, | |
ce5b65cc | 509 | mcast->backoff * HZ); |
95ed644f | 510 | mutex_unlock(&mcast_mutex); |
faec2f7b | 511 | } |
1da177e4 LT |
512 | } |
513 | ||
c4028958 | 514 | void ipoib_mcast_join_task(struct work_struct *work) |
1da177e4 | 515 | { |
c4028958 DH |
516 | struct ipoib_dev_priv *priv = |
517 | container_of(work, struct ipoib_dev_priv, mcast_task.work); | |
518 | struct net_device *dev = priv->dev; | |
1da177e4 LT |
519 | |
520 | if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) | |
521 | return; | |
522 | ||
523 | if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) | |
24bd1e4e | 524 | ipoib_warn(priv, "ib_query_gid() failed\n"); |
1da177e4 LT |
525 | else |
526 | memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); | |
527 | ||
528 | { | |
529 | struct ib_port_attr attr; | |
530 | ||
658bcef6 RD |
531 | if (!ib_query_port(priv->ca, priv->port, &attr)) |
532 | priv->local_lid = attr.lid; | |
533 | else | |
faec2f7b | 534 | ipoib_warn(priv, "ib_query_port failed\n"); |
1da177e4 LT |
535 | } |
536 | ||
537 | if (!priv->broadcast) { | |
20b83382 RD |
538 | struct ipoib_mcast *broadcast; |
539 | ||
50df48f5 YE |
540 | if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) |
541 | return; | |
542 | ||
20b83382 RD |
543 | broadcast = ipoib_mcast_alloc(dev, 1); |
544 | if (!broadcast) { | |
1da177e4 | 545 | ipoib_warn(priv, "failed to allocate broadcast group\n"); |
95ed644f | 546 | mutex_lock(&mcast_mutex); |
1da177e4 LT |
547 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
548 | queue_delayed_work(ipoib_workqueue, | |
549 | &priv->mcast_task, HZ); | |
95ed644f | 550 | mutex_unlock(&mcast_mutex); |
1da177e4 LT |
551 | return; |
552 | } | |
553 | ||
20b83382 RD |
554 | spin_lock_irq(&priv->lock); |
555 | memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4, | |
1da177e4 | 556 | sizeof (union ib_gid)); |
20b83382 | 557 | priv->broadcast = broadcast; |
1da177e4 | 558 | |
1da177e4 LT |
559 | __ipoib_mcast_add(dev, priv->broadcast); |
560 | spin_unlock_irq(&priv->lock); | |
561 | } | |
562 | ||
563 | if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { | |
faec2f7b SH |
564 | if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) |
565 | ipoib_mcast_join(dev, priv->broadcast, 0); | |
1da177e4 LT |
566 | return; |
567 | } | |
568 | ||
569 | while (1) { | |
570 | struct ipoib_mcast *mcast = NULL; | |
571 | ||
572 | spin_lock_irq(&priv->lock); | |
573 | list_for_each_entry(mcast, &priv->multicast_list, list) { | |
574 | if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) | |
575 | && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) | |
576 | && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { | |
577 | /* Found the next unjoined group */ | |
578 | break; | |
579 | } | |
580 | } | |
581 | spin_unlock_irq(&priv->lock); | |
582 | ||
583 | if (&mcast->list == &priv->multicast_list) { | |
584 | /* All done */ | |
585 | break; | |
586 | } | |
587 | ||
588 | ipoib_mcast_join(dev, mcast, 1); | |
589 | return; | |
590 | } | |
591 | ||
bc7b3a36 | 592 | priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); |
839fcaba | 593 | |
c8c2afe3 EC |
594 | if (!ipoib_cm_admin_enabled(dev)) { |
595 | rtnl_lock(); | |
bd360671 | 596 | dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu)); |
c8c2afe3 EC |
597 | rtnl_unlock(); |
598 | } | |
1da177e4 LT |
599 | |
600 | ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n"); | |
601 | ||
602 | clear_bit(IPOIB_MCAST_RUN, &priv->flags); | |
1da177e4 LT |
603 | } |
604 | ||
605 | int ipoib_mcast_start_thread(struct net_device *dev) | |
606 | { | |
607 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
608 | ||
609 | ipoib_dbg_mcast(priv, "starting multicast thread\n"); | |
610 | ||
95ed644f | 611 | mutex_lock(&mcast_mutex); |
1da177e4 | 612 | if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) |
c4028958 | 613 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); |
95ed644f | 614 | mutex_unlock(&mcast_mutex); |
1da177e4 LT |
615 | |
616 | return 0; | |
617 | } | |
618 | ||
8d2cae06 | 619 | int ipoib_mcast_stop_thread(struct net_device *dev, int flush) |
1da177e4 LT |
620 | { |
621 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
1da177e4 LT |
622 | |
623 | ipoib_dbg_mcast(priv, "stopping multicast thread\n"); | |
624 | ||
95ed644f | 625 | mutex_lock(&mcast_mutex); |
1da177e4 LT |
626 | clear_bit(IPOIB_MCAST_RUN, &priv->flags); |
627 | cancel_delayed_work(&priv->mcast_task); | |
95ed644f | 628 | mutex_unlock(&mcast_mutex); |
1da177e4 | 629 | |
8d2cae06 RD |
630 | if (flush) |
631 | flush_workqueue(ipoib_workqueue); | |
1da177e4 | 632 | |
1da177e4 LT |
633 | return 0; |
634 | } | |
635 | ||
636 | static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) | |
637 | { | |
638 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
1da177e4 LT |
639 | int ret = 0; |
640 | ||
e07832b6 SH |
641 | if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) |
642 | ib_sa_free_multicast(mcast->mc); | |
643 | ||
faec2f7b | 644 | if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { |
5b095d98 | 645 | ipoib_dbg_mcast(priv, "leaving MGID %pI6\n", |
fcace2fe | 646 | mcast->mcmember.mgid.raw); |
1da177e4 | 647 | |
faec2f7b | 648 | /* Remove ourselves from the multicast group */ |
9eae554c RD |
649 | ret = ib_detach_mcast(priv->qp, &mcast->mcmember.mgid, |
650 | be16_to_cpu(mcast->mcmember.mlid)); | |
faec2f7b | 651 | if (ret) |
9eae554c | 652 | ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret); |
faec2f7b | 653 | } |
1da177e4 | 654 | |
1da177e4 LT |
655 | return 0; |
656 | } | |
657 | ||
37c22a77 | 658 | void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) |
1da177e4 LT |
659 | { |
660 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
661 | struct ipoib_mcast *mcast; | |
943c246e | 662 | unsigned long flags; |
1da177e4 | 663 | |
943c246e | 664 | spin_lock_irqsave(&priv->lock, flags); |
1da177e4 | 665 | |
b3e2749b | 666 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || |
20b83382 RD |
667 | !priv->broadcast || |
668 | !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { | |
de903512 | 669 | ++dev->stats.tx_dropped; |
479a0796 MT |
670 | dev_kfree_skb_any(skb); |
671 | goto unlock; | |
672 | } | |
673 | ||
1da177e4 LT |
674 | mcast = __ipoib_mcast_find(dev, mgid); |
675 | if (!mcast) { | |
676 | /* Let's create a new send only group now */ | |
5b095d98 | 677 | ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n", |
fcace2fe | 678 | mgid); |
1da177e4 LT |
679 | |
680 | mcast = ipoib_mcast_alloc(dev, 0); | |
681 | if (!mcast) { | |
682 | ipoib_warn(priv, "unable to allocate memory for " | |
683 | "multicast structure\n"); | |
de903512 | 684 | ++dev->stats.tx_dropped; |
1da177e4 LT |
685 | dev_kfree_skb_any(skb); |
686 | goto out; | |
687 | } | |
688 | ||
689 | set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags); | |
37c22a77 | 690 | memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); |
1da177e4 LT |
691 | __ipoib_mcast_add(dev, mcast); |
692 | list_add_tail(&mcast->list, &priv->multicast_list); | |
693 | } | |
694 | ||
695 | if (!mcast->ah) { | |
696 | if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) | |
697 | skb_queue_tail(&mcast->pkt_queue, skb); | |
b36f170b | 698 | else { |
de903512 | 699 | ++dev->stats.tx_dropped; |
1da177e4 | 700 | dev_kfree_skb_any(skb); |
b36f170b | 701 | } |
1da177e4 | 702 | |
faec2f7b | 703 | if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) |
1da177e4 LT |
704 | ipoib_dbg_mcast(priv, "no address vector, " |
705 | "but multicast join already started\n"); | |
706 | else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) | |
707 | ipoib_mcast_sendonly_join(mcast); | |
708 | ||
709 | /* | |
710 | * If lookup completes between here and out:, don't | |
711 | * want to send packet twice. | |
712 | */ | |
713 | mcast = NULL; | |
714 | } | |
715 | ||
716 | out: | |
717 | if (mcast && mcast->ah) { | |
adf30907 ED |
718 | if (skb_dst(skb) && |
719 | skb_dst(skb)->neighbour && | |
720 | !*to_ipoib_neigh(skb_dst(skb)->neighbour)) { | |
721 | struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour, | |
732a2170 | 722 | skb->dev); |
1da177e4 LT |
723 | |
724 | if (neigh) { | |
725 | kref_get(&mcast->ah->ref); | |
2337f809 | 726 | neigh->ah = mcast->ah; |
1da177e4 LT |
727 | list_add_tail(&neigh->list, &mcast->neigh_list); |
728 | } | |
729 | } | |
730 | ||
721d67cd | 731 | spin_unlock_irqrestore(&priv->lock, flags); |
1da177e4 | 732 | ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); |
721d67cd | 733 | return; |
1da177e4 LT |
734 | } |
735 | ||
479a0796 | 736 | unlock: |
943c246e | 737 | spin_unlock_irqrestore(&priv->lock, flags); |
1da177e4 LT |
738 | } |
739 | ||
740 | void ipoib_mcast_dev_flush(struct net_device *dev) | |
741 | { | |
742 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
743 | LIST_HEAD(remove_list); | |
988bd503 | 744 | struct ipoib_mcast *mcast, *tmcast; |
1da177e4 LT |
745 | unsigned long flags; |
746 | ||
747 | ipoib_dbg_mcast(priv, "flushing multicast list\n"); | |
748 | ||
749 | spin_lock_irqsave(&priv->lock, flags); | |
1da177e4 | 750 | |
988bd503 EC |
751 | list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { |
752 | list_del(&mcast->list); | |
753 | rb_erase(&mcast->rb_node, &priv->multicast_tree); | |
754 | list_add_tail(&mcast->list, &remove_list); | |
1da177e4 LT |
755 | } |
756 | ||
757 | if (priv->broadcast) { | |
3cd96564 | 758 | rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); |
988bd503 EC |
759 | list_add_tail(&priv->broadcast->list, &remove_list); |
760 | priv->broadcast = NULL; | |
1da177e4 LT |
761 | } |
762 | ||
763 | spin_unlock_irqrestore(&priv->lock, flags); | |
764 | ||
765 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { | |
766 | ipoib_mcast_leave(dev, mcast); | |
767 | ipoib_mcast_free(mcast); | |
768 | } | |
769 | } | |
770 | ||
6c74651c JP |
771 | static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen, |
772 | const u8 *broadcast) | |
5e47596b | 773 | { |
6c74651c JP |
774 | if (addrlen != INFINIBAND_ALEN) |
775 | return 0; | |
5e47596b JG |
776 | /* reserved QPN, prefix, scope */ |
777 | if (memcmp(addr, broadcast, 6)) | |
778 | return 0; | |
779 | /* signature lower, pkey */ | |
780 | if (memcmp(addr + 7, broadcast + 7, 3)) | |
781 | return 0; | |
782 | return 1; | |
783 | } | |
784 | ||
c4028958 | 785 | void ipoib_mcast_restart_task(struct work_struct *work) |
1da177e4 | 786 | { |
c4028958 DH |
787 | struct ipoib_dev_priv *priv = |
788 | container_of(work, struct ipoib_dev_priv, restart_task); | |
789 | struct net_device *dev = priv->dev; | |
1da177e4 LT |
790 | struct dev_mc_list *mclist; |
791 | struct ipoib_mcast *mcast, *tmcast; | |
792 | LIST_HEAD(remove_list); | |
793 | unsigned long flags; | |
335a64a5 | 794 | struct ib_sa_mcmember_rec rec; |
1da177e4 LT |
795 | |
796 | ipoib_dbg_mcast(priv, "restarting multicast task\n"); | |
797 | ||
8d2cae06 | 798 | ipoib_mcast_stop_thread(dev, 0); |
1da177e4 | 799 | |
932ff279 | 800 | local_irq_save(flags); |
e308a5d8 | 801 | netif_addr_lock(dev); |
78bfe0b5 | 802 | spin_lock(&priv->lock); |
1da177e4 LT |
803 | |
804 | /* | |
805 | * Unfortunately, the networking core only gives us a list of all of | |
806 | * the multicast hardware addresses. We need to figure out which ones | |
807 | * are new and which ones have been removed | |
808 | */ | |
809 | ||
810 | /* Clear out the found flag */ | |
811 | list_for_each_entry(mcast, &priv->multicast_list, list) | |
812 | clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); | |
813 | ||
814 | /* Mark all of the entries that are found or don't exist */ | |
fbf219f1 | 815 | netdev_for_each_mc_addr(mclist, dev) { |
1da177e4 LT |
816 | union ib_gid mgid; |
817 | ||
5e47596b | 818 | if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr, |
6c74651c | 819 | mclist->dmi_addrlen, |
5e47596b JG |
820 | dev->broadcast)) |
821 | continue; | |
822 | ||
1da177e4 LT |
823 | memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); |
824 | ||
1da177e4 LT |
825 | mcast = __ipoib_mcast_find(dev, &mgid); |
826 | if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { | |
827 | struct ipoib_mcast *nmcast; | |
828 | ||
335a64a5 OG |
829 | /* ignore group which is directly joined by userspace */ |
830 | if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) && | |
831 | !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) { | |
5b095d98 | 832 | ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n", |
fcace2fe | 833 | mgid.raw); |
335a64a5 OG |
834 | continue; |
835 | } | |
836 | ||
1da177e4 | 837 | /* Not found or send-only group, let's add a new entry */ |
5b095d98 | 838 | ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n", |
fcace2fe | 839 | mgid.raw); |
1da177e4 LT |
840 | |
841 | nmcast = ipoib_mcast_alloc(dev, 0); | |
842 | if (!nmcast) { | |
843 | ipoib_warn(priv, "unable to allocate memory for multicast structure\n"); | |
844 | continue; | |
845 | } | |
846 | ||
847 | set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags); | |
848 | ||
849 | nmcast->mcmember.mgid = mgid; | |
850 | ||
851 | if (mcast) { | |
852 | /* Destroy the send only entry */ | |
179e0917 | 853 | list_move_tail(&mcast->list, &remove_list); |
1da177e4 LT |
854 | |
855 | rb_replace_node(&mcast->rb_node, | |
856 | &nmcast->rb_node, | |
857 | &priv->multicast_tree); | |
858 | } else | |
859 | __ipoib_mcast_add(dev, nmcast); | |
860 | ||
861 | list_add_tail(&nmcast->list, &priv->multicast_list); | |
862 | } | |
863 | ||
864 | if (mcast) | |
865 | set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); | |
866 | } | |
867 | ||
868 | /* Remove all of the entries don't exist anymore */ | |
869 | list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { | |
870 | if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) && | |
871 | !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { | |
5b095d98 | 872 | ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n", |
fcace2fe | 873 | mcast->mcmember.mgid.raw); |
1da177e4 LT |
874 | |
875 | rb_erase(&mcast->rb_node, &priv->multicast_tree); | |
876 | ||
877 | /* Move to the remove list */ | |
179e0917 | 878 | list_move_tail(&mcast->list, &remove_list); |
1da177e4 LT |
879 | } |
880 | } | |
78bfe0b5 MT |
881 | |
882 | spin_unlock(&priv->lock); | |
e308a5d8 | 883 | netif_addr_unlock(dev); |
932ff279 | 884 | local_irq_restore(flags); |
1da177e4 LT |
885 | |
886 | /* We have to cancel outside of the spinlock */ | |
887 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { | |
888 | ipoib_mcast_leave(mcast->dev, mcast); | |
889 | ipoib_mcast_free(mcast); | |
890 | } | |
891 | ||
892 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) | |
893 | ipoib_mcast_start_thread(dev); | |
894 | } | |
895 | ||
8ae5a8a2 RD |
896 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
897 | ||
1da177e4 LT |
898 | struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev) |
899 | { | |
900 | struct ipoib_mcast_iter *iter; | |
901 | ||
902 | iter = kmalloc(sizeof *iter, GFP_KERNEL); | |
903 | if (!iter) | |
904 | return NULL; | |
905 | ||
906 | iter->dev = dev; | |
1732b0ef | 907 | memset(iter->mgid.raw, 0, 16); |
1da177e4 LT |
908 | |
909 | if (ipoib_mcast_iter_next(iter)) { | |
1732b0ef | 910 | kfree(iter); |
1da177e4 LT |
911 | return NULL; |
912 | } | |
913 | ||
914 | return iter; | |
915 | } | |
916 | ||
1da177e4 LT |
917 | int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter) |
918 | { | |
919 | struct ipoib_dev_priv *priv = netdev_priv(iter->dev); | |
920 | struct rb_node *n; | |
921 | struct ipoib_mcast *mcast; | |
922 | int ret = 1; | |
923 | ||
924 | spin_lock_irq(&priv->lock); | |
925 | ||
926 | n = rb_first(&priv->multicast_tree); | |
927 | ||
928 | while (n) { | |
929 | mcast = rb_entry(n, struct ipoib_mcast, rb_node); | |
930 | ||
931 | if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw, | |
932 | sizeof (union ib_gid)) < 0) { | |
933 | iter->mgid = mcast->mcmember.mgid; | |
934 | iter->created = mcast->created; | |
935 | iter->queuelen = skb_queue_len(&mcast->pkt_queue); | |
936 | iter->complete = !!mcast->ah; | |
937 | iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY)); | |
938 | ||
939 | ret = 0; | |
940 | ||
941 | break; | |
942 | } | |
943 | ||
944 | n = rb_next(n); | |
945 | } | |
946 | ||
947 | spin_unlock_irq(&priv->lock); | |
948 | ||
949 | return ret; | |
950 | } | |
951 | ||
952 | void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, | |
953 | union ib_gid *mgid, | |
954 | unsigned long *created, | |
955 | unsigned int *queuelen, | |
956 | unsigned int *complete, | |
957 | unsigned int *send_only) | |
958 | { | |
959 | *mgid = iter->mgid; | |
960 | *created = iter->created; | |
961 | *queuelen = iter->queuelen; | |
962 | *complete = iter->complete; | |
963 | *send_only = iter->send_only; | |
964 | } | |
8ae5a8a2 RD |
965 | |
966 | #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ |