]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/core/sa_query.c
IB/core: Rename ib_destroy_ah to rdma_destroy_ah
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / core / sa_query.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
43506d95 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
c1a0b23b 4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
1da177e4
LT
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
1da177e4
LT
33 */
34
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/err.h>
38#include <linux/random.h>
39#include <linux/spinlock.h>
40#include <linux/slab.h>
1da177e4
LT
41#include <linux/dma-mapping.h>
42#include <linux/kref.h>
43#include <linux/idr.h>
4e57b681 44#include <linux/workqueue.h>
dd5f03be 45#include <uapi/linux/if_ether.h>
a4d61e84 46#include <rdma/ib_pack.h>
6d969a47 47#include <rdma/ib_cache.h>
2ca546b9
KW
48#include <rdma/rdma_netlink.h>
49#include <net/netlink.h>
50#include <uapi/rdma/ib_user_sa.h>
51#include <rdma/ib_marshall.h>
20029832 52#include <rdma/ib_addr.h>
faec2f7b 53#include "sa.h"
20029832 54#include "core_priv.h"
1da177e4 55
2ca546b9
KW
56#define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
57#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
58#define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
ee1c60b1
DC
59#define IB_SA_CPI_MAX_RETRY_CNT 3
60#define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */
2ca546b9
KW
61static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
62
1da177e4
LT
63struct ib_sa_sm_ah {
64 struct ib_ah *ah;
65 struct kref ref;
2aec5c60 66 u16 pkey_index;
d0e7bb14 67 u8 src_path_mask;
1da177e4
LT
68};
69
2196f271
DC
70enum rdma_class_port_info_type {
71 RDMA_CLASS_PORT_INFO_IB,
72 RDMA_CLASS_PORT_INFO_OPA
73};
74
75struct rdma_class_port_info {
76 enum rdma_class_port_info_type type;
77 union {
78 struct ib_class_port_info ib;
79 struct opa_class_port_info opa;
80 };
81};
82
3d3fd742
AV
83struct ib_sa_classport_cache {
84 bool valid;
ee1c60b1 85 int retry_cnt;
2196f271 86 struct rdma_class_port_info data;
3d3fd742
AV
87};
88
1da177e4
LT
89struct ib_sa_port {
90 struct ib_mad_agent *agent;
1da177e4
LT
91 struct ib_sa_sm_ah *sm_ah;
92 struct work_struct update_task;
3d3fd742 93 struct ib_sa_classport_cache classport_info;
ee1c60b1 94 struct delayed_work ib_cpi_work;
3d3fd742 95 spinlock_t classport_lock; /* protects class port info set */
1da177e4
LT
96 spinlock_t ah_lock;
97 u8 port_num;
98};
99
100struct ib_sa_device {
101 int start_port, end_port;
102 struct ib_event_handler event_handler;
103 struct ib_sa_port port[0];
104};
105
106struct ib_sa_query {
107 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
108 void (*release)(struct ib_sa_query *);
c1a0b23b 109 struct ib_sa_client *client;
34816ad9
SH
110 struct ib_sa_port *port;
111 struct ib_mad_send_buf *mad_buf;
112 struct ib_sa_sm_ah *sm_ah;
113 int id;
2ca546b9
KW
114 u32 flags;
115 struct list_head list; /* Local svc request list */
116 u32 seq; /* Local svc request sequence number */
117 unsigned long timeout; /* Local svc timeout */
118 u8 path_use; /* How will the pathrecord be used */
1da177e4
LT
119};
120
2ca546b9
KW
121#define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
122#define IB_SA_CANCEL 0x00000002
2196f271 123#define IB_SA_QUERY_OPA 0x00000004
2ca546b9 124
cbae32c5
HR
125struct ib_sa_service_query {
126 void (*callback)(int, struct ib_sa_service_rec *, void *);
127 void *context;
128 struct ib_sa_query sa_query;
129};
130
1da177e4
LT
131struct ib_sa_path_query {
132 void (*callback)(int, struct ib_sa_path_rec *, void *);
133 void *context;
134 struct ib_sa_query sa_query;
135};
136
aeab97ed
ES
137struct ib_sa_guidinfo_query {
138 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
139 void *context;
140 struct ib_sa_query sa_query;
141};
142
628e6f75 143struct ib_sa_classport_info_query {
ee1c60b1 144 void (*callback)(void *);
628e6f75
ES
145 void *context;
146 struct ib_sa_query sa_query;
147};
148
1da177e4
LT
149struct ib_sa_mcmember_query {
150 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
151 void *context;
152 struct ib_sa_query sa_query;
153};
154
2ca546b9
KW
155static LIST_HEAD(ib_nl_request_list);
156static DEFINE_SPINLOCK(ib_nl_request_lock);
157static atomic_t ib_nl_sa_request_seq;
158static struct workqueue_struct *ib_nl_wq;
159static struct delayed_work ib_nl_timed_work;
160static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
161 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
162 .len = sizeof(struct ib_path_rec_data)},
163 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
164 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
165 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
166 .len = sizeof(struct rdma_nla_ls_gid)},
167 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
168 .len = sizeof(struct rdma_nla_ls_gid)},
169 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
170 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
171 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
172};
173
174
1da177e4 175static void ib_sa_add_one(struct ib_device *device);
7c1eb45a 176static void ib_sa_remove_one(struct ib_device *device, void *client_data);
1da177e4
LT
177
178static struct ib_client sa_client = {
179 .name = "sa",
180 .add = ib_sa_add_one,
181 .remove = ib_sa_remove_one
182};
183
6276e08a 184static DEFINE_SPINLOCK(idr_lock);
1da177e4
LT
185static DEFINE_IDR(query_idr);
186
6276e08a 187static DEFINE_SPINLOCK(tid_lock);
1da177e4
LT
188static u32 tid;
189
1da177e4
LT
190#define PATH_REC_FIELD(field) \
191 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
192 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
193 .field_name = "sa_path_rec:" #field
194
195static const struct ib_field path_rec_table[] = {
733d65fe 196 { PATH_REC_FIELD(service_id),
1da177e4
LT
197 .offset_words = 0,
198 .offset_bits = 0,
733d65fe 199 .size_bits = 64 },
1da177e4
LT
200 { PATH_REC_FIELD(dgid),
201 .offset_words = 2,
202 .offset_bits = 0,
203 .size_bits = 128 },
204 { PATH_REC_FIELD(sgid),
205 .offset_words = 6,
206 .offset_bits = 0,
207 .size_bits = 128 },
208 { PATH_REC_FIELD(dlid),
209 .offset_words = 10,
210 .offset_bits = 0,
211 .size_bits = 16 },
212 { PATH_REC_FIELD(slid),
213 .offset_words = 10,
214 .offset_bits = 16,
215 .size_bits = 16 },
216 { PATH_REC_FIELD(raw_traffic),
217 .offset_words = 11,
218 .offset_bits = 0,
219 .size_bits = 1 },
220 { RESERVED,
221 .offset_words = 11,
222 .offset_bits = 1,
223 .size_bits = 3 },
224 { PATH_REC_FIELD(flow_label),
225 .offset_words = 11,
226 .offset_bits = 4,
227 .size_bits = 20 },
228 { PATH_REC_FIELD(hop_limit),
229 .offset_words = 11,
230 .offset_bits = 24,
231 .size_bits = 8 },
232 { PATH_REC_FIELD(traffic_class),
233 .offset_words = 12,
234 .offset_bits = 0,
235 .size_bits = 8 },
236 { PATH_REC_FIELD(reversible),
237 .offset_words = 12,
238 .offset_bits = 8,
239 .size_bits = 1 },
240 { PATH_REC_FIELD(numb_path),
241 .offset_words = 12,
242 .offset_bits = 9,
243 .size_bits = 7 },
244 { PATH_REC_FIELD(pkey),
245 .offset_words = 12,
246 .offset_bits = 16,
247 .size_bits = 16 },
733d65fe 248 { PATH_REC_FIELD(qos_class),
1da177e4
LT
249 .offset_words = 13,
250 .offset_bits = 0,
251 .size_bits = 12 },
252 { PATH_REC_FIELD(sl),
253 .offset_words = 13,
254 .offset_bits = 12,
255 .size_bits = 4 },
256 { PATH_REC_FIELD(mtu_selector),
257 .offset_words = 13,
258 .offset_bits = 16,
259 .size_bits = 2 },
260 { PATH_REC_FIELD(mtu),
261 .offset_words = 13,
262 .offset_bits = 18,
263 .size_bits = 6 },
264 { PATH_REC_FIELD(rate_selector),
265 .offset_words = 13,
266 .offset_bits = 24,
267 .size_bits = 2 },
268 { PATH_REC_FIELD(rate),
269 .offset_words = 13,
270 .offset_bits = 26,
271 .size_bits = 6 },
272 { PATH_REC_FIELD(packet_life_time_selector),
273 .offset_words = 14,
274 .offset_bits = 0,
275 .size_bits = 2 },
276 { PATH_REC_FIELD(packet_life_time),
277 .offset_words = 14,
278 .offset_bits = 2,
279 .size_bits = 6 },
280 { PATH_REC_FIELD(preference),
281 .offset_words = 14,
282 .offset_bits = 8,
283 .size_bits = 8 },
284 { RESERVED,
285 .offset_words = 14,
286 .offset_bits = 16,
287 .size_bits = 48 },
288};
289
290#define MCMEMBER_REC_FIELD(field) \
291 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
292 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
293 .field_name = "sa_mcmember_rec:" #field
294
295static const struct ib_field mcmember_rec_table[] = {
296 { MCMEMBER_REC_FIELD(mgid),
297 .offset_words = 0,
298 .offset_bits = 0,
299 .size_bits = 128 },
300 { MCMEMBER_REC_FIELD(port_gid),
301 .offset_words = 4,
302 .offset_bits = 0,
303 .size_bits = 128 },
304 { MCMEMBER_REC_FIELD(qkey),
305 .offset_words = 8,
306 .offset_bits = 0,
307 .size_bits = 32 },
308 { MCMEMBER_REC_FIELD(mlid),
309 .offset_words = 9,
310 .offset_bits = 0,
311 .size_bits = 16 },
312 { MCMEMBER_REC_FIELD(mtu_selector),
313 .offset_words = 9,
314 .offset_bits = 16,
315 .size_bits = 2 },
316 { MCMEMBER_REC_FIELD(mtu),
317 .offset_words = 9,
318 .offset_bits = 18,
319 .size_bits = 6 },
320 { MCMEMBER_REC_FIELD(traffic_class),
321 .offset_words = 9,
322 .offset_bits = 24,
323 .size_bits = 8 },
324 { MCMEMBER_REC_FIELD(pkey),
325 .offset_words = 10,
326 .offset_bits = 0,
327 .size_bits = 16 },
328 { MCMEMBER_REC_FIELD(rate_selector),
329 .offset_words = 10,
330 .offset_bits = 16,
331 .size_bits = 2 },
332 { MCMEMBER_REC_FIELD(rate),
333 .offset_words = 10,
334 .offset_bits = 18,
335 .size_bits = 6 },
336 { MCMEMBER_REC_FIELD(packet_life_time_selector),
337 .offset_words = 10,
338 .offset_bits = 24,
339 .size_bits = 2 },
340 { MCMEMBER_REC_FIELD(packet_life_time),
341 .offset_words = 10,
342 .offset_bits = 26,
343 .size_bits = 6 },
344 { MCMEMBER_REC_FIELD(sl),
345 .offset_words = 11,
346 .offset_bits = 0,
347 .size_bits = 4 },
348 { MCMEMBER_REC_FIELD(flow_label),
349 .offset_words = 11,
350 .offset_bits = 4,
351 .size_bits = 20 },
352 { MCMEMBER_REC_FIELD(hop_limit),
353 .offset_words = 11,
354 .offset_bits = 24,
355 .size_bits = 8 },
356 { MCMEMBER_REC_FIELD(scope),
357 .offset_words = 12,
358 .offset_bits = 0,
359 .size_bits = 4 },
360 { MCMEMBER_REC_FIELD(join_state),
361 .offset_words = 12,
362 .offset_bits = 4,
363 .size_bits = 4 },
364 { MCMEMBER_REC_FIELD(proxy_join),
365 .offset_words = 12,
366 .offset_bits = 8,
367 .size_bits = 1 },
368 { RESERVED,
369 .offset_words = 12,
370 .offset_bits = 9,
371 .size_bits = 23 },
372};
373
cbae32c5
HR
374#define SERVICE_REC_FIELD(field) \
375 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
376 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
377 .field_name = "sa_service_rec:" #field
378
379static const struct ib_field service_rec_table[] = {
380 { SERVICE_REC_FIELD(id),
381 .offset_words = 0,
382 .offset_bits = 0,
383 .size_bits = 64 },
384 { SERVICE_REC_FIELD(gid),
385 .offset_words = 2,
386 .offset_bits = 0,
387 .size_bits = 128 },
388 { SERVICE_REC_FIELD(pkey),
389 .offset_words = 6,
390 .offset_bits = 0,
391 .size_bits = 16 },
392 { SERVICE_REC_FIELD(lease),
393 .offset_words = 7,
394 .offset_bits = 0,
395 .size_bits = 32 },
396 { SERVICE_REC_FIELD(key),
397 .offset_words = 8,
398 .offset_bits = 0,
399 .size_bits = 128 },
400 { SERVICE_REC_FIELD(name),
401 .offset_words = 12,
402 .offset_bits = 0,
403 .size_bits = 64*8 },
404 { SERVICE_REC_FIELD(data8),
405 .offset_words = 28,
406 .offset_bits = 0,
407 .size_bits = 16*8 },
408 { SERVICE_REC_FIELD(data16),
409 .offset_words = 32,
410 .offset_bits = 0,
411 .size_bits = 8*16 },
412 { SERVICE_REC_FIELD(data32),
413 .offset_words = 36,
414 .offset_bits = 0,
415 .size_bits = 4*32 },
416 { SERVICE_REC_FIELD(data64),
417 .offset_words = 40,
418 .offset_bits = 0,
419 .size_bits = 2*64 },
420};
421
628e6f75
ES
422#define CLASSPORTINFO_REC_FIELD(field) \
423 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
424 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
425 .field_name = "ib_class_port_info:" #field
426
2196f271 427static const struct ib_field ib_classport_info_rec_table[] = {
628e6f75
ES
428 { CLASSPORTINFO_REC_FIELD(base_version),
429 .offset_words = 0,
430 .offset_bits = 0,
431 .size_bits = 8 },
432 { CLASSPORTINFO_REC_FIELD(class_version),
433 .offset_words = 0,
434 .offset_bits = 8,
435 .size_bits = 8 },
436 { CLASSPORTINFO_REC_FIELD(capability_mask),
437 .offset_words = 0,
438 .offset_bits = 16,
439 .size_bits = 16 },
440 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
441 .offset_words = 1,
442 .offset_bits = 0,
443 .size_bits = 32 },
444 { CLASSPORTINFO_REC_FIELD(redirect_gid),
445 .offset_words = 2,
446 .offset_bits = 0,
447 .size_bits = 128 },
448 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
449 .offset_words = 6,
450 .offset_bits = 0,
451 .size_bits = 32 },
452 { CLASSPORTINFO_REC_FIELD(redirect_lid),
453 .offset_words = 7,
454 .offset_bits = 0,
455 .size_bits = 16 },
456 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
457 .offset_words = 7,
458 .offset_bits = 16,
459 .size_bits = 16 },
460
461 { CLASSPORTINFO_REC_FIELD(redirect_qp),
462 .offset_words = 8,
463 .offset_bits = 0,
464 .size_bits = 32 },
465 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
466 .offset_words = 9,
467 .offset_bits = 0,
468 .size_bits = 32 },
469
470 { CLASSPORTINFO_REC_FIELD(trap_gid),
471 .offset_words = 10,
472 .offset_bits = 0,
473 .size_bits = 128 },
474 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
475 .offset_words = 14,
476 .offset_bits = 0,
477 .size_bits = 32 },
478
479 { CLASSPORTINFO_REC_FIELD(trap_lid),
480 .offset_words = 15,
481 .offset_bits = 0,
482 .size_bits = 16 },
483 { CLASSPORTINFO_REC_FIELD(trap_pkey),
484 .offset_words = 15,
485 .offset_bits = 16,
486 .size_bits = 16 },
487
488 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
489 .offset_words = 16,
490 .offset_bits = 0,
491 .size_bits = 32 },
492 { CLASSPORTINFO_REC_FIELD(trap_qkey),
493 .offset_words = 17,
494 .offset_bits = 0,
495 .size_bits = 32 },
496};
497
2196f271
DC
498#define OPA_CLASSPORTINFO_REC_FIELD(field) \
499 .struct_offset_bytes =\
500 offsetof(struct opa_class_port_info, field), \
501 .struct_size_bytes = \
502 sizeof((struct opa_class_port_info *)0)->field, \
503 .field_name = "opa_class_port_info:" #field
504
505static const struct ib_field opa_classport_info_rec_table[] = {
506 { OPA_CLASSPORTINFO_REC_FIELD(base_version),
507 .offset_words = 0,
508 .offset_bits = 0,
509 .size_bits = 8 },
510 { OPA_CLASSPORTINFO_REC_FIELD(class_version),
511 .offset_words = 0,
512 .offset_bits = 8,
513 .size_bits = 8 },
514 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
515 .offset_words = 0,
516 .offset_bits = 16,
517 .size_bits = 16 },
518 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
519 .offset_words = 1,
520 .offset_bits = 0,
521 .size_bits = 32 },
522 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
523 .offset_words = 2,
524 .offset_bits = 0,
525 .size_bits = 128 },
526 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
527 .offset_words = 6,
528 .offset_bits = 0,
529 .size_bits = 32 },
530 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
531 .offset_words = 7,
532 .offset_bits = 0,
533 .size_bits = 32 },
534 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
535 .offset_words = 8,
536 .offset_bits = 0,
537 .size_bits = 32 },
538 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
539 .offset_words = 9,
540 .offset_bits = 0,
541 .size_bits = 32 },
542 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
543 .offset_words = 10,
544 .offset_bits = 0,
545 .size_bits = 128 },
546 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
547 .offset_words = 14,
548 .offset_bits = 0,
549 .size_bits = 32 },
550 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
551 .offset_words = 15,
552 .offset_bits = 0,
553 .size_bits = 32 },
554 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
555 .offset_words = 16,
556 .offset_bits = 0,
557 .size_bits = 32 },
558 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
559 .offset_words = 17,
560 .offset_bits = 0,
561 .size_bits = 32 },
562 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
563 .offset_words = 18,
564 .offset_bits = 0,
565 .size_bits = 16 },
566 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
567 .offset_words = 18,
568 .offset_bits = 16,
569 .size_bits = 16 },
570 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
571 .offset_words = 19,
572 .offset_bits = 0,
573 .size_bits = 8 },
574 { RESERVED,
575 .offset_words = 19,
576 .offset_bits = 8,
577 .size_bits = 24 },
578};
579
aeab97ed
ES
580#define GUIDINFO_REC_FIELD(field) \
581 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
582 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
583 .field_name = "sa_guidinfo_rec:" #field
584
585static const struct ib_field guidinfo_rec_table[] = {
586 { GUIDINFO_REC_FIELD(lid),
587 .offset_words = 0,
588 .offset_bits = 0,
589 .size_bits = 16 },
590 { GUIDINFO_REC_FIELD(block_num),
591 .offset_words = 0,
592 .offset_bits = 16,
593 .size_bits = 8 },
594 { GUIDINFO_REC_FIELD(res1),
595 .offset_words = 0,
596 .offset_bits = 24,
597 .size_bits = 8 },
598 { GUIDINFO_REC_FIELD(res2),
599 .offset_words = 1,
600 .offset_bits = 0,
601 .size_bits = 32 },
602 { GUIDINFO_REC_FIELD(guid_info_list),
603 .offset_words = 2,
604 .offset_bits = 0,
605 .size_bits = 512 },
606};
607
2ca546b9
KW
608static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
609{
610 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
611}
612
613static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
614{
615 return (query->flags & IB_SA_CANCEL);
616}
617
618static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
619 struct ib_sa_query *query)
620{
621 struct ib_sa_path_rec *sa_rec = query->mad_buf->context[1];
622 struct ib_sa_mad *mad = query->mad_buf->mad;
623 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
624 u16 val16;
625 u64 val64;
626 struct rdma_ls_resolve_header *header;
627
628 query->mad_buf->context[1] = NULL;
629
630 /* Construct the family header first */
631 header = (struct rdma_ls_resolve_header *)
632 skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
633 memcpy(header->device_name, query->port->agent->device->name,
634 LS_DEVICE_NAME_MAX);
635 header->port_num = query->port->port_num;
636
637 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
638 sa_rec->reversible != 0)
639 query->path_use = LS_RESOLVE_PATH_USE_GMP;
640 else
641 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
642 header->path_use = query->path_use;
643
644 /* Now build the attributes */
645 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
646 val64 = be64_to_cpu(sa_rec->service_id);
647 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
648 sizeof(val64), &val64);
649 }
650 if (comp_mask & IB_SA_PATH_REC_DGID)
651 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
652 sizeof(sa_rec->dgid), &sa_rec->dgid);
653 if (comp_mask & IB_SA_PATH_REC_SGID)
654 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
655 sizeof(sa_rec->sgid), &sa_rec->sgid);
656 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
657 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
658 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
659
660 if (comp_mask & IB_SA_PATH_REC_PKEY) {
661 val16 = be16_to_cpu(sa_rec->pkey);
662 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
663 sizeof(val16), &val16);
664 }
665 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
666 val16 = be16_to_cpu(sa_rec->qos_class);
667 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
668 sizeof(val16), &val16);
669 }
670}
671
672static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
673{
674 int len = 0;
675
676 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
677 len += nla_total_size(sizeof(u64));
678 if (comp_mask & IB_SA_PATH_REC_DGID)
679 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
680 if (comp_mask & IB_SA_PATH_REC_SGID)
681 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
682 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
683 len += nla_total_size(sizeof(u8));
684 if (comp_mask & IB_SA_PATH_REC_PKEY)
685 len += nla_total_size(sizeof(u16));
686 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
687 len += nla_total_size(sizeof(u16));
688
689 /*
690 * Make sure that at least some of the required comp_mask bits are
691 * set.
692 */
693 if (WARN_ON(len == 0))
694 return len;
695
696 /* Add the family header */
697 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
698
699 return len;
700}
701
3ebd2fd0 702static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
2ca546b9
KW
703{
704 struct sk_buff *skb = NULL;
705 struct nlmsghdr *nlh;
706 void *data;
707 int ret = 0;
708 struct ib_sa_mad *mad;
709 int len;
710
711 mad = query->mad_buf->mad;
712 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
713 if (len <= 0)
714 return -EMSGSIZE;
715
3ebd2fd0 716 skb = nlmsg_new(len, gfp_mask);
2ca546b9
KW
717 if (!skb)
718 return -ENOMEM;
719
720 /* Put nlmsg header only for now */
721 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
ba13b5f8 722 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
2ca546b9 723 if (!data) {
0f377d86 724 nlmsg_free(skb);
2ca546b9
KW
725 return -EMSGSIZE;
726 }
727
728 /* Add attributes */
729 ib_nl_set_path_rec_attrs(skb, query);
730
731 /* Repair the nlmsg header length */
732 nlmsg_end(skb, nlh);
733
3ebd2fd0 734 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask);
2ca546b9
KW
735 if (!ret)
736 ret = len;
737 else
738 ret = 0;
739
740 return ret;
741}
742
3ebd2fd0 743static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
2ca546b9
KW
744{
745 unsigned long flags;
746 unsigned long delay;
747 int ret;
748
749 INIT_LIST_HEAD(&query->list);
750 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
751
3ebd2fd0 752 /* Put the request on the list first.*/
2ca546b9 753 spin_lock_irqsave(&ib_nl_request_lock, flags);
2ca546b9
KW
754 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
755 query->timeout = delay + jiffies;
756 list_add_tail(&query->list, &ib_nl_request_list);
757 /* Start the timeout if this is the only request */
758 if (ib_nl_request_list.next == &query->list)
759 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
2ca546b9
KW
760 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
761
3ebd2fd0
KW
762 ret = ib_nl_send_msg(query, gfp_mask);
763 if (ret <= 0) {
764 ret = -EIO;
765 /* Remove the request */
766 spin_lock_irqsave(&ib_nl_request_lock, flags);
767 list_del(&query->list);
768 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
769 } else {
770 ret = 0;
771 }
772
2ca546b9
KW
773 return ret;
774}
775
776static int ib_nl_cancel_request(struct ib_sa_query *query)
777{
778 unsigned long flags;
779 struct ib_sa_query *wait_query;
780 int found = 0;
781
782 spin_lock_irqsave(&ib_nl_request_lock, flags);
783 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
784 /* Let the timeout to take care of the callback */
785 if (query == wait_query) {
786 query->flags |= IB_SA_CANCEL;
787 query->timeout = jiffies;
788 list_move(&query->list, &ib_nl_request_list);
789 found = 1;
790 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
791 break;
792 }
793 }
794 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
795
796 return found;
797}
798
799static void send_handler(struct ib_mad_agent *agent,
800 struct ib_mad_send_wc *mad_send_wc);
801
802static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
803 const struct nlmsghdr *nlh)
804{
805 struct ib_mad_send_wc mad_send_wc;
806 struct ib_sa_mad *mad = NULL;
807 const struct nlattr *head, *curr;
808 struct ib_path_rec_data *rec;
809 int len, rem;
810 u32 mask = 0;
811 int status = -EIO;
812
813 if (query->callback) {
814 head = (const struct nlattr *) nlmsg_data(nlh);
815 len = nlmsg_len(nlh);
816 switch (query->path_use) {
817 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
818 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
819 break;
820
821 case LS_RESOLVE_PATH_USE_ALL:
822 case LS_RESOLVE_PATH_USE_GMP:
823 default:
824 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
825 IB_PATH_BIDIRECTIONAL;
826 break;
827 }
828 nla_for_each_attr(curr, head, len, rem) {
829 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
830 rec = nla_data(curr);
831 /*
832 * Get the first one. In the future, we may
833 * need to get up to 6 pathrecords.
834 */
835 if ((rec->flags & mask) == mask) {
836 mad = query->mad_buf->mad;
837 mad->mad_hdr.method |=
838 IB_MGMT_METHOD_RESP;
839 memcpy(mad->data, rec->path_rec,
840 sizeof(rec->path_rec));
841 status = 0;
842 break;
843 }
844 }
845 }
846 query->callback(query, status, mad);
847 }
848
849 mad_send_wc.send_buf = query->mad_buf;
850 mad_send_wc.status = IB_WC_SUCCESS;
851 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
852}
853
854static void ib_nl_request_timeout(struct work_struct *work)
855{
856 unsigned long flags;
857 struct ib_sa_query *query;
858 unsigned long delay;
859 struct ib_mad_send_wc mad_send_wc;
860 int ret;
861
862 spin_lock_irqsave(&ib_nl_request_lock, flags);
863 while (!list_empty(&ib_nl_request_list)) {
864 query = list_entry(ib_nl_request_list.next,
865 struct ib_sa_query, list);
866
867 if (time_after(query->timeout, jiffies)) {
868 delay = query->timeout - jiffies;
869 if ((long)delay <= 0)
870 delay = 1;
871 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
872 break;
873 }
874
875 list_del(&query->list);
876 ib_sa_disable_local_svc(query);
877 /* Hold the lock to protect against query cancellation */
878 if (ib_sa_query_cancelled(query))
879 ret = -1;
880 else
881 ret = ib_post_send_mad(query->mad_buf, NULL);
882 if (ret) {
883 mad_send_wc.send_buf = query->mad_buf;
884 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
885 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
886 send_handler(query->port->agent, &mad_send_wc);
887 spin_lock_irqsave(&ib_nl_request_lock, flags);
888 }
889 }
890 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
891}
892
735c631a
MB
893int ib_nl_handle_set_timeout(struct sk_buff *skb,
894 struct netlink_callback *cb)
2ca546b9
KW
895{
896 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
897 int timeout, delta, abs_delta;
898 const struct nlattr *attr;
899 unsigned long flags;
900 struct ib_sa_query *query;
901 long delay = 0;
902 struct nlattr *tb[LS_NLA_TYPE_MAX];
903 int ret;
904
2deeb477
KW
905 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
906 !(NETLINK_CB(skb).sk) ||
907 !netlink_capable(skb, CAP_NET_ADMIN))
2ca546b9
KW
908 return -EPERM;
909
910 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
fceb6435 911 nlmsg_len(nlh), ib_nl_policy, NULL);
2ca546b9
KW
912 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
913 if (ret || !attr)
914 goto settimeout_out;
915
916 timeout = *(int *) nla_data(attr);
917 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
918 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
919 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
920 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
921
922 delta = timeout - sa_local_svc_timeout_ms;
923 if (delta < 0)
924 abs_delta = -delta;
925 else
926 abs_delta = delta;
927
928 if (delta != 0) {
929 spin_lock_irqsave(&ib_nl_request_lock, flags);
930 sa_local_svc_timeout_ms = timeout;
931 list_for_each_entry(query, &ib_nl_request_list, list) {
932 if (delta < 0 && abs_delta > query->timeout)
933 query->timeout = 0;
934 else
935 query->timeout += delta;
936
937 /* Get the new delay from the first entry */
938 if (!delay) {
939 delay = query->timeout - jiffies;
940 if (delay <= 0)
941 delay = 1;
942 }
943 }
944 if (delay)
945 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
946 (unsigned long)delay);
947 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
948 }
949
950settimeout_out:
951 return skb->len;
952}
953
954static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
955{
956 struct nlattr *tb[LS_NLA_TYPE_MAX];
957 int ret;
958
959 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
960 return 0;
961
962 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
fceb6435 963 nlmsg_len(nlh), ib_nl_policy, NULL);
2ca546b9
KW
964 if (ret)
965 return 0;
966
967 return 1;
968}
969
735c631a
MB
970int ib_nl_handle_resolve_resp(struct sk_buff *skb,
971 struct netlink_callback *cb)
2ca546b9
KW
972{
973 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
974 unsigned long flags;
975 struct ib_sa_query *query;
976 struct ib_mad_send_buf *send_buf;
977 struct ib_mad_send_wc mad_send_wc;
978 int found = 0;
979 int ret;
980
2deeb477
KW
981 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
982 !(NETLINK_CB(skb).sk) ||
983 !netlink_capable(skb, CAP_NET_ADMIN))
2ca546b9
KW
984 return -EPERM;
985
986 spin_lock_irqsave(&ib_nl_request_lock, flags);
987 list_for_each_entry(query, &ib_nl_request_list, list) {
988 /*
989 * If the query is cancelled, let the timeout routine
990 * take care of it.
991 */
992 if (nlh->nlmsg_seq == query->seq) {
993 found = !ib_sa_query_cancelled(query);
994 if (found)
995 list_del(&query->list);
996 break;
997 }
998 }
999
1000 if (!found) {
1001 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1002 goto resp_out;
1003 }
1004
1005 send_buf = query->mad_buf;
1006
1007 if (!ib_nl_is_good_resolve_resp(nlh)) {
1008 /* if the result is a failure, send out the packet via IB */
1009 ib_sa_disable_local_svc(query);
1010 ret = ib_post_send_mad(query->mad_buf, NULL);
1011 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1012 if (ret) {
1013 mad_send_wc.send_buf = send_buf;
1014 mad_send_wc.status = IB_WC_GENERAL_ERR;
1015 send_handler(query->port->agent, &mad_send_wc);
1016 }
1017 } else {
1018 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1019 ib_nl_process_good_resolve_rsp(query, nlh);
1020 }
1021
1022resp_out:
1023 return skb->len;
1024}
1025
1da177e4
LT
1026static void free_sm_ah(struct kref *kref)
1027{
1028 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
1029
36523159 1030 rdma_destroy_ah(sm_ah->ah);
1da177e4
LT
1031 kfree(sm_ah);
1032}
1033
c1a0b23b
MT
1034void ib_sa_register_client(struct ib_sa_client *client)
1035{
1036 atomic_set(&client->users, 1);
1037 init_completion(&client->comp);
1038}
1039EXPORT_SYMBOL(ib_sa_register_client);
1040
c1a0b23b
MT
1041void ib_sa_unregister_client(struct ib_sa_client *client)
1042{
1043 ib_sa_client_put(client);
1044 wait_for_completion(&client->comp);
1045}
1046EXPORT_SYMBOL(ib_sa_unregister_client);
1047
1da177e4
LT
1048/**
1049 * ib_sa_cancel_query - try to cancel an SA query
1050 * @id:ID of query to cancel
1051 * @query:query pointer to cancel
1052 *
1053 * Try to cancel an SA query. If the id and query don't match up or
1054 * the query has already completed, nothing is done. Otherwise the
1055 * query is canceled and will complete with a status of -EINTR.
1056 */
1057void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1058{
1059 unsigned long flags;
1060 struct ib_mad_agent *agent;
34816ad9 1061 struct ib_mad_send_buf *mad_buf;
1da177e4
LT
1062
1063 spin_lock_irqsave(&idr_lock, flags);
1064 if (idr_find(&query_idr, id) != query) {
1065 spin_unlock_irqrestore(&idr_lock, flags);
1066 return;
1067 }
1068 agent = query->port->agent;
34816ad9 1069 mad_buf = query->mad_buf;
1da177e4
LT
1070 spin_unlock_irqrestore(&idr_lock, flags);
1071
2ca546b9
KW
1072 /*
1073 * If the query is still on the netlink request list, schedule
1074 * it to be cancelled by the timeout routine. Otherwise, it has been
1075 * sent to the MAD layer and has to be cancelled from there.
1076 */
1077 if (!ib_nl_cancel_request(query))
1078 ib_cancel_mad(agent, mad_buf);
1da177e4
LT
1079}
1080EXPORT_SYMBOL(ib_sa_cancel_query);
1081
d0e7bb14
SH
1082static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
1083{
1084 struct ib_sa_device *sa_dev;
1085 struct ib_sa_port *port;
1086 unsigned long flags;
1087 u8 src_path_mask;
1088
1089 sa_dev = ib_get_client_data(device, &sa_client);
1090 if (!sa_dev)
1091 return 0x7f;
1092
1093 port = &sa_dev->port[port_num - sa_dev->start_port];
1094 spin_lock_irqsave(&port->ah_lock, flags);
1095 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1096 spin_unlock_irqrestore(&port->ah_lock, flags);
1097
1098 return src_path_mask;
1099}
1100
6d969a47 1101int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
90898850
DC
1102 struct ib_sa_path_rec *rec,
1103 struct rdma_ah_attr *ah_attr)
6d969a47
SH
1104{
1105 int ret;
1106 u16 gid_index;
20029832
MB
1107 int use_roce;
1108 struct net_device *ndev = NULL;
6d969a47
SH
1109
1110 memset(ah_attr, 0, sizeof *ah_attr);
1111 ah_attr->dlid = be16_to_cpu(rec->dlid);
1112 ah_attr->sl = rec->sl;
d0e7bb14
SH
1113 ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
1114 get_src_path_mask(device, port_num);
6d969a47 1115 ah_attr->port_num = port_num;
7084f842 1116 ah_attr->static_rate = rec->rate;
6d969a47 1117
20029832
MB
1118 use_roce = rdma_cap_eth_ah(device, port_num);
1119
1120 if (use_roce) {
1121 struct net_device *idev;
1122 struct net_device *resolved_dev;
1123 struct rdma_dev_addr dev_addr = {.bound_dev_if = rec->ifindex,
1124 .net = rec->net ? rec->net :
1125 &init_net};
1126 union {
1127 struct sockaddr _sockaddr;
1128 struct sockaddr_in _sockaddr_in;
1129 struct sockaddr_in6 _sockaddr_in6;
1130 } sgid_addr, dgid_addr;
1131
1132 if (!device->get_netdev)
1133 return -EOPNOTSUPP;
1134
1135 rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
1136 rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
1137
1138 /* validate the route */
1139 ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
1140 &dgid_addr._sockaddr, &dev_addr);
1141 if (ret)
1142 return ret;
3c86aa70 1143
20029832
MB
1144 if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
1145 dev_addr.network == RDMA_NETWORK_IPV6) &&
1146 rec->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
1147 return -EINVAL;
1148
1149 idev = device->get_netdev(device, port_num);
1150 if (!idev)
1151 return -ENODEV;
1152
1153 resolved_dev = dev_get_by_index(dev_addr.net,
1154 dev_addr.bound_dev_if);
1155 if (resolved_dev->flags & IFF_LOOPBACK) {
1156 dev_put(resolved_dev);
1157 resolved_dev = idev;
1158 dev_hold(resolved_dev);
1159 }
1160 ndev = ib_get_ndev_from_path(rec);
1161 rcu_read_lock();
1162 if ((ndev && ndev != resolved_dev) ||
1163 (resolved_dev != idev &&
1164 !rdma_is_upper_dev_rcu(idev, resolved_dev)))
1165 ret = -EHOSTUNREACH;
1166 rcu_read_unlock();
1167 dev_put(idev);
1168 dev_put(resolved_dev);
1169 if (ret) {
1170 if (ndev)
1171 dev_put(ndev);
1172 return ret;
1173 }
1174 }
ba36e37f 1175
11d8d645 1176 if (rec->hop_limit > 0 || use_roce) {
6d969a47
SH
1177 ah_attr->ah_flags = IB_AH_GRH;
1178 ah_attr->grh.dgid = rec->dgid;
1179
20029832
MB
1180 ret = ib_find_cached_gid_by_port(device, &rec->sgid,
1181 rec->gid_type, port_num, ndev,
1182 &gid_index);
ba36e37f
MB
1183 if (ret) {
1184 if (ndev)
1185 dev_put(ndev);
6d969a47 1186 return ret;
ba36e37f 1187 }
6d969a47 1188
ca222c6b
SH
1189 ah_attr->grh.sgid_index = gid_index;
1190 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
1191 ah_attr->grh.hop_limit = rec->hop_limit;
6d969a47 1192 ah_attr->grh.traffic_class = rec->traffic_class;
ba36e37f
MB
1193 if (ndev)
1194 dev_put(ndev);
6d969a47 1195 }
20029832
MB
1196
1197 if (use_roce)
dd5f03be 1198 memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
20029832 1199
6d969a47
SH
1200 return 0;
1201}
1202EXPORT_SYMBOL(ib_init_ah_from_path);
1203
2aec5c60
SH
1204static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1205{
1206 unsigned long flags;
1207
1208 spin_lock_irqsave(&query->port->ah_lock, flags);
164ba089
MS
1209 if (!query->port->sm_ah) {
1210 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1211 return -EAGAIN;
1212 }
2aec5c60
SH
1213 kref_get(&query->port->sm_ah->ref);
1214 query->sm_ah = query->port->sm_ah;
1215 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1216
1217 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1218 query->sm_ah->pkey_index,
1219 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
da2dfaa3 1220 gfp_mask,
2196f271
DC
1221 ((query->flags & IB_SA_QUERY_OPA) ?
1222 OPA_MGMT_BASE_VERSION :
1223 IB_MGMT_BASE_VERSION));
3c10c7c9 1224 if (IS_ERR(query->mad_buf)) {
2aec5c60
SH
1225 kref_put(&query->sm_ah->ref, free_sm_ah);
1226 return -ENOMEM;
1227 }
1228
1229 query->mad_buf->ah = query->sm_ah->ah;
1230
1231 return 0;
1232}
1233
1234static void free_mad(struct ib_sa_query *query)
1235{
1236 ib_free_send_mad(query->mad_buf);
1237 kref_put(&query->sm_ah->ref, free_sm_ah);
1238}
1239
2196f271 1240static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
1da177e4 1241{
2196f271 1242 struct ib_sa_mad *mad = query->mad_buf->mad;
1da177e4
LT
1243 unsigned long flags;
1244
1245 memset(mad, 0, sizeof *mad);
1246
2196f271
DC
1247 if (query->flags & IB_SA_QUERY_OPA) {
1248 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
1249 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
1250 } else {
1251 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1252 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1253 }
1da177e4 1254 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1da177e4
LT
1255 spin_lock_irqsave(&tid_lock, flags);
1256 mad->mad_hdr.tid =
1257 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1258 spin_unlock_irqrestore(&tid_lock, flags);
1259}
1260
e322fedf 1261static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1da177e4 1262{
d0164adc 1263 bool preload = gfpflags_allow_blocking(gfp_mask);
1da177e4 1264 unsigned long flags;
34816ad9 1265 int ret, id;
1da177e4 1266
3b069c5d
TH
1267 if (preload)
1268 idr_preload(gfp_mask);
1da177e4 1269 spin_lock_irqsave(&idr_lock, flags);
3b069c5d
TH
1270
1271 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
1272
1da177e4 1273 spin_unlock_irqrestore(&idr_lock, flags);
3b069c5d
TH
1274 if (preload)
1275 idr_preload_end();
1276 if (id < 0)
1277 return id;
1da177e4 1278
34816ad9
SH
1279 query->mad_buf->timeout_ms = timeout_ms;
1280 query->mad_buf->context[0] = query;
1281 query->id = id;
1da177e4 1282
2ca546b9
KW
1283 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
1284 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
3ebd2fd0 1285 if (!ib_nl_make_request(query, gfp_mask))
2ca546b9
KW
1286 return id;
1287 }
1288 ib_sa_disable_local_svc(query);
1289 }
1290
34816ad9 1291 ret = ib_post_send_mad(query->mad_buf, NULL);
1da177e4 1292 if (ret) {
1da177e4 1293 spin_lock_irqsave(&idr_lock, flags);
34816ad9 1294 idr_remove(&query_idr, id);
1da177e4
LT
1295 spin_unlock_irqrestore(&idr_lock, flags);
1296 }
1297
dae4c1d2
RD
1298 /*
1299 * It's not safe to dereference query any more, because the
1300 * send may already have completed and freed the query in
34816ad9 1301 * another context.
dae4c1d2 1302 */
34816ad9 1303 return ret ? ret : id;
1da177e4
LT
1304}
1305
a7ca1f00
SH
1306void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
1307{
1308 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1309}
1310EXPORT_SYMBOL(ib_sa_unpack_path);
1311
2e08b587
SH
1312void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute)
1313{
1314 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1315}
1316EXPORT_SYMBOL(ib_sa_pack_path);
1317
1da177e4
LT
1318static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1319 int status,
1320 struct ib_sa_mad *mad)
1321{
1322 struct ib_sa_path_query *query =
1323 container_of(sa_query, struct ib_sa_path_query, sa_query);
1324
1325 if (mad) {
1326 struct ib_sa_path_rec rec;
1327
1328 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
1329 mad->data, &rec);
ba36e37f
MB
1330 rec.net = NULL;
1331 rec.ifindex = 0;
b39ffa1d 1332 rec.gid_type = IB_GID_TYPE_IB;
db9314cd 1333 eth_zero_addr(rec.dmac);
1da177e4
LT
1334 query->callback(status, &rec, query->context);
1335 } else
1336 query->callback(status, NULL, query->context);
1337}
1338
1339static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1340{
1da177e4
LT
1341 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
1342}
1343
1344/**
1345 * ib_sa_path_rec_get - Start a Path get query
c1a0b23b 1346 * @client:SA client
1da177e4
LT
1347 * @device:device to send query on
1348 * @port_num: port number to send query on
1349 * @rec:Path Record to send in query
1350 * @comp_mask:component mask to send in query
1351 * @timeout_ms:time to wait for response
1352 * @gfp_mask:GFP mask to use for internal allocations
1353 * @callback:function called when query completes, times out or is
1354 * canceled
1355 * @context:opaque user context passed to callback
1356 * @sa_query:query context, used to cancel query
1357 *
1358 * Send a Path Record Get query to the SA to look up a path. The
1359 * callback function will be called when the query completes (or
1360 * fails); status is 0 for a successful response, -EINTR if the query
1361 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1362 * occurred sending the query. The resp parameter of the callback is
1363 * only valid if status is 0.
1364 *
1365 * If the return value of ib_sa_path_rec_get() is negative, it is an
1366 * error code. Otherwise it is a query ID that can be used to cancel
1367 * the query.
1368 */
c1a0b23b
MT
1369int ib_sa_path_rec_get(struct ib_sa_client *client,
1370 struct ib_device *device, u8 port_num,
1da177e4
LT
1371 struct ib_sa_path_rec *rec,
1372 ib_sa_comp_mask comp_mask,
dd0fc66f 1373 int timeout_ms, gfp_t gfp_mask,
1da177e4
LT
1374 void (*callback)(int status,
1375 struct ib_sa_path_rec *resp,
1376 void *context),
1377 void *context,
1378 struct ib_sa_query **sa_query)
1379{
1380 struct ib_sa_path_query *query;
1381 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
56c202d6
RD
1382 struct ib_sa_port *port;
1383 struct ib_mad_agent *agent;
34816ad9 1384 struct ib_sa_mad *mad;
1da177e4
LT
1385 int ret;
1386
56c202d6
RD
1387 if (!sa_dev)
1388 return -ENODEV;
1389
1390 port = &sa_dev->port[port_num - sa_dev->start_port];
1391 agent = port->agent;
1392
5d265770 1393 query = kzalloc(sizeof(*query), gfp_mask);
1da177e4
LT
1394 if (!query)
1395 return -ENOMEM;
34816ad9 1396
2aec5c60
SH
1397 query->sa_query.port = port;
1398 ret = alloc_mad(&query->sa_query, gfp_mask);
1399 if (ret)
34816ad9 1400 goto err1;
1da177e4 1401
c1a0b23b
MT
1402 ib_sa_client_get(client);
1403 query->sa_query.client = client;
1404 query->callback = callback;
1405 query->context = context;
1da177e4 1406
34816ad9 1407 mad = query->sa_query.mad_buf->mad;
2196f271 1408 init_mad(&query->sa_query, agent);
1da177e4 1409
34816ad9
SH
1410 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1411 query->sa_query.release = ib_sa_path_rec_release;
34816ad9
SH
1412 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1413 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1414 mad->sa_hdr.comp_mask = comp_mask;
1da177e4 1415
34816ad9 1416 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
1da177e4
LT
1417
1418 *sa_query = &query->sa_query;
dae4c1d2 1419
2ca546b9
KW
1420 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1421 query->sa_query.mad_buf->context[1] = rec;
1422
e322fedf 1423 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
34816ad9
SH
1424 if (ret < 0)
1425 goto err2;
1426
1427 return ret;
1428
1429err2:
1430 *sa_query = NULL;
c1a0b23b 1431 ib_sa_client_put(query->sa_query.client);
2aec5c60 1432 free_mad(&query->sa_query);
1da177e4 1433
34816ad9
SH
1434err1:
1435 kfree(query);
dae4c1d2 1436 return ret;
1da177e4
LT
1437}
1438EXPORT_SYMBOL(ib_sa_path_rec_get);
1439
cbae32c5
HR
1440static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1441 int status,
1442 struct ib_sa_mad *mad)
1443{
1444 struct ib_sa_service_query *query =
1445 container_of(sa_query, struct ib_sa_service_query, sa_query);
1446
1447 if (mad) {
1448 struct ib_sa_service_rec rec;
1449
1450 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1451 mad->data, &rec);
1452 query->callback(status, &rec, query->context);
1453 } else
1454 query->callback(status, NULL, query->context);
1455}
1456
1457static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1458{
cbae32c5
HR
1459 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1460}
1461
1462/**
1463 * ib_sa_service_rec_query - Start Service Record operation
c1a0b23b 1464 * @client:SA client
cbae32c5
HR
1465 * @device:device to send request on
1466 * @port_num: port number to send request on
1467 * @method:SA method - should be get, set, or delete
1468 * @rec:Service Record to send in request
1469 * @comp_mask:component mask to send in request
1470 * @timeout_ms:time to wait for response
1471 * @gfp_mask:GFP mask to use for internal allocations
1472 * @callback:function called when request completes, times out or is
1473 * canceled
1474 * @context:opaque user context passed to callback
1475 * @sa_query:request context, used to cancel request
1476 *
1477 * Send a Service Record set/get/delete to the SA to register,
1478 * unregister or query a service record.
1479 * The callback function will be called when the request completes (or
1480 * fails); status is 0 for a successful response, -EINTR if the query
1481 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1482 * occurred sending the query. The resp parameter of the callback is
1483 * only valid if status is 0.
1484 *
1485 * If the return value of ib_sa_service_rec_query() is negative, it is an
1486 * error code. Otherwise it is a request ID that can be used to cancel
1487 * the query.
1488 */
c1a0b23b
MT
1489int ib_sa_service_rec_query(struct ib_sa_client *client,
1490 struct ib_device *device, u8 port_num, u8 method,
cbae32c5
HR
1491 struct ib_sa_service_rec *rec,
1492 ib_sa_comp_mask comp_mask,
dd0fc66f 1493 int timeout_ms, gfp_t gfp_mask,
cbae32c5
HR
1494 void (*callback)(int status,
1495 struct ib_sa_service_rec *resp,
1496 void *context),
1497 void *context,
1498 struct ib_sa_query **sa_query)
1499{
1500 struct ib_sa_service_query *query;
1501 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
56c202d6
RD
1502 struct ib_sa_port *port;
1503 struct ib_mad_agent *agent;
34816ad9 1504 struct ib_sa_mad *mad;
cbae32c5
HR
1505 int ret;
1506
56c202d6
RD
1507 if (!sa_dev)
1508 return -ENODEV;
1509
1510 port = &sa_dev->port[port_num - sa_dev->start_port];
1511 agent = port->agent;
1512
cbae32c5
HR
1513 if (method != IB_MGMT_METHOD_GET &&
1514 method != IB_MGMT_METHOD_SET &&
1515 method != IB_SA_METHOD_DELETE)
1516 return -EINVAL;
1517
5d265770 1518 query = kzalloc(sizeof(*query), gfp_mask);
cbae32c5
HR
1519 if (!query)
1520 return -ENOMEM;
34816ad9 1521
2aec5c60
SH
1522 query->sa_query.port = port;
1523 ret = alloc_mad(&query->sa_query, gfp_mask);
1524 if (ret)
34816ad9 1525 goto err1;
cbae32c5 1526
c1a0b23b
MT
1527 ib_sa_client_get(client);
1528 query->sa_query.client = client;
1529 query->callback = callback;
1530 query->context = context;
cbae32c5 1531
34816ad9 1532 mad = query->sa_query.mad_buf->mad;
2196f271 1533 init_mad(&query->sa_query, agent);
cbae32c5 1534
34816ad9
SH
1535 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1536 query->sa_query.release = ib_sa_service_rec_release;
34816ad9
SH
1537 mad->mad_hdr.method = method;
1538 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1539 mad->sa_hdr.comp_mask = comp_mask;
cbae32c5
HR
1540
1541 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
34816ad9 1542 rec, mad->data);
cbae32c5
HR
1543
1544 *sa_query = &query->sa_query;
1545
e322fedf 1546 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
34816ad9
SH
1547 if (ret < 0)
1548 goto err2;
1549
1550 return ret;
cbae32c5 1551
34816ad9
SH
1552err2:
1553 *sa_query = NULL;
c1a0b23b 1554 ib_sa_client_put(query->sa_query.client);
2aec5c60 1555 free_mad(&query->sa_query);
34816ad9
SH
1556
1557err1:
1558 kfree(query);
cbae32c5
HR
1559 return ret;
1560}
1561EXPORT_SYMBOL(ib_sa_service_rec_query);
1562
1da177e4
LT
1563static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1564 int status,
1565 struct ib_sa_mad *mad)
1566{
1567 struct ib_sa_mcmember_query *query =
1568 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1569
1570 if (mad) {
1571 struct ib_sa_mcmember_rec rec;
1572
1573 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1574 mad->data, &rec);
1575 query->callback(status, &rec, query->context);
1576 } else
1577 query->callback(status, NULL, query->context);
1578}
1579
1580static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1581{
1da177e4
LT
1582 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1583}
1584
c1a0b23b
MT
1585int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1586 struct ib_device *device, u8 port_num,
1da177e4
LT
1587 u8 method,
1588 struct ib_sa_mcmember_rec *rec,
1589 ib_sa_comp_mask comp_mask,
dd0fc66f 1590 int timeout_ms, gfp_t gfp_mask,
1da177e4
LT
1591 void (*callback)(int status,
1592 struct ib_sa_mcmember_rec *resp,
1593 void *context),
1594 void *context,
1595 struct ib_sa_query **sa_query)
1596{
1597 struct ib_sa_mcmember_query *query;
1598 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
56c202d6
RD
1599 struct ib_sa_port *port;
1600 struct ib_mad_agent *agent;
34816ad9 1601 struct ib_sa_mad *mad;
1da177e4
LT
1602 int ret;
1603
56c202d6
RD
1604 if (!sa_dev)
1605 return -ENODEV;
1606
1607 port = &sa_dev->port[port_num - sa_dev->start_port];
1608 agent = port->agent;
1609
5d265770 1610 query = kzalloc(sizeof(*query), gfp_mask);
1da177e4
LT
1611 if (!query)
1612 return -ENOMEM;
34816ad9 1613
2aec5c60
SH
1614 query->sa_query.port = port;
1615 ret = alloc_mad(&query->sa_query, gfp_mask);
1616 if (ret)
34816ad9 1617 goto err1;
1da177e4 1618
c1a0b23b
MT
1619 ib_sa_client_get(client);
1620 query->sa_query.client = client;
1621 query->callback = callback;
1622 query->context = context;
1da177e4 1623
34816ad9 1624 mad = query->sa_query.mad_buf->mad;
2196f271 1625 init_mad(&query->sa_query, agent);
1da177e4 1626
34816ad9
SH
1627 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1628 query->sa_query.release = ib_sa_mcmember_rec_release;
34816ad9
SH
1629 mad->mad_hdr.method = method;
1630 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1631 mad->sa_hdr.comp_mask = comp_mask;
1da177e4
LT
1632
1633 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
34816ad9 1634 rec, mad->data);
1da177e4
LT
1635
1636 *sa_query = &query->sa_query;
dae4c1d2 1637
e322fedf 1638 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
34816ad9
SH
1639 if (ret < 0)
1640 goto err2;
1da177e4 1641
dae4c1d2 1642 return ret;
34816ad9
SH
1643
1644err2:
1645 *sa_query = NULL;
c1a0b23b 1646 ib_sa_client_put(query->sa_query.client);
2aec5c60 1647 free_mad(&query->sa_query);
34816ad9
SH
1648
1649err1:
1650 kfree(query);
1651 return ret;
1da177e4 1652}
1da177e4 1653
aeab97ed
ES
1654/* Support GuidInfoRecord */
1655static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1656 int status,
1657 struct ib_sa_mad *mad)
1658{
1659 struct ib_sa_guidinfo_query *query =
1660 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1661
1662 if (mad) {
1663 struct ib_sa_guidinfo_rec rec;
1664
1665 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1666 mad->data, &rec);
1667 query->callback(status, &rec, query->context);
1668 } else
1669 query->callback(status, NULL, query->context);
1670}
1671
1672static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1673{
1674 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1675}
1676
1677int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1678 struct ib_device *device, u8 port_num,
1679 struct ib_sa_guidinfo_rec *rec,
1680 ib_sa_comp_mask comp_mask, u8 method,
1681 int timeout_ms, gfp_t gfp_mask,
1682 void (*callback)(int status,
1683 struct ib_sa_guidinfo_rec *resp,
1684 void *context),
1685 void *context,
1686 struct ib_sa_query **sa_query)
1687{
1688 struct ib_sa_guidinfo_query *query;
1689 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1690 struct ib_sa_port *port;
1691 struct ib_mad_agent *agent;
1692 struct ib_sa_mad *mad;
1693 int ret;
1694
1695 if (!sa_dev)
1696 return -ENODEV;
1697
1698 if (method != IB_MGMT_METHOD_GET &&
1699 method != IB_MGMT_METHOD_SET &&
1700 method != IB_SA_METHOD_DELETE) {
1701 return -EINVAL;
1702 }
1703
1704 port = &sa_dev->port[port_num - sa_dev->start_port];
1705 agent = port->agent;
1706
5d265770 1707 query = kzalloc(sizeof(*query), gfp_mask);
aeab97ed
ES
1708 if (!query)
1709 return -ENOMEM;
1710
1711 query->sa_query.port = port;
1712 ret = alloc_mad(&query->sa_query, gfp_mask);
1713 if (ret)
1714 goto err1;
1715
1716 ib_sa_client_get(client);
1717 query->sa_query.client = client;
1718 query->callback = callback;
1719 query->context = context;
1720
1721 mad = query->sa_query.mad_buf->mad;
2196f271 1722 init_mad(&query->sa_query, agent);
aeab97ed
ES
1723
1724 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1725 query->sa_query.release = ib_sa_guidinfo_rec_release;
1726
1727 mad->mad_hdr.method = method;
1728 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1729 mad->sa_hdr.comp_mask = comp_mask;
1730
1731 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1732 mad->data);
1733
1734 *sa_query = &query->sa_query;
1735
1736 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1737 if (ret < 0)
1738 goto err2;
1739
1740 return ret;
1741
1742err2:
1743 *sa_query = NULL;
1744 ib_sa_client_put(query->sa_query.client);
1745 free_mad(&query->sa_query);
1746
1747err1:
1748 kfree(query);
1749 return ret;
1750}
1751EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1752
ee1c60b1
DC
1753bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
1754 struct ib_device *device,
1755 u8 port_num)
1756{
1757 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1758 struct ib_sa_port *port;
1759 bool ret = false;
1760 unsigned long flags;
1761
1762 if (!sa_dev)
1763 return ret;
1764
1765 port = &sa_dev->port[port_num - sa_dev->start_port];
1766
1767 spin_lock_irqsave(&port->classport_lock, flags);
2196f271
DC
1768 if ((port->classport_info.valid) &&
1769 (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB))
1770 ret = ib_get_cpi_capmask2(&port->classport_info.data.ib)
1771 & IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT;
ee1c60b1
DC
1772 spin_unlock_irqrestore(&port->classport_lock, flags);
1773 return ret;
1774}
1775EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support);
1776
1777struct ib_classport_info_context {
1778 struct completion done;
1779 struct ib_sa_query *sa_query;
1780};
1781
1782static void ib_classportinfo_cb(void *context)
1783{
1784 struct ib_classport_info_context *cb_ctx = context;
1785
1786 complete(&cb_ctx->done);
1787}
1788
628e6f75
ES
1789static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
1790 int status,
1791 struct ib_sa_mad *mad)
1792{
3d3fd742 1793 unsigned long flags;
628e6f75
ES
1794 struct ib_sa_classport_info_query *query =
1795 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
2196f271 1796 struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
628e6f75
ES
1797
1798 if (mad) {
2196f271
DC
1799 if (sa_query->flags & IB_SA_QUERY_OPA) {
1800 struct opa_class_port_info rec;
628e6f75 1801
2196f271
DC
1802 ib_unpack(opa_classport_info_rec_table,
1803 ARRAY_SIZE(opa_classport_info_rec_table),
1804 mad->data, &rec);
1805
1806 spin_lock_irqsave(&sa_query->port->classport_lock,
1807 flags);
1808 if (!status && !info->valid) {
1809 memcpy(&info->data.opa, &rec,
1810 sizeof(info->data.opa));
1811
1812 info->valid = true;
1813 info->data.type = RDMA_CLASS_PORT_INFO_OPA;
1814 }
1815 spin_unlock_irqrestore(&sa_query->port->classport_lock,
1816 flags);
1817
1818 } else {
1819 struct ib_class_port_info rec;
3d3fd742 1820
2196f271
DC
1821 ib_unpack(ib_classport_info_rec_table,
1822 ARRAY_SIZE(ib_classport_info_rec_table),
1823 mad->data, &rec);
3d3fd742 1824
2196f271
DC
1825 spin_lock_irqsave(&sa_query->port->classport_lock,
1826 flags);
1827 if (!status && !info->valid) {
1828 memcpy(&info->data.ib, &rec,
1829 sizeof(info->data.ib));
1830
1831 info->valid = true;
1832 info->data.type = RDMA_CLASS_PORT_INFO_IB;
1833 }
1834 spin_unlock_irqrestore(&sa_query->port->classport_lock,
1835 flags);
3d3fd742 1836 }
628e6f75 1837 }
ee1c60b1 1838 query->callback(query->context);
628e6f75
ES
1839}
1840
ee1c60b1 1841static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
628e6f75
ES
1842{
1843 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
1844 sa_query));
1845}
1846
ee1c60b1
DC
1847static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
1848 int timeout_ms,
1849 void (*callback)(void *context),
1850 void *context,
1851 struct ib_sa_query **sa_query)
628e6f75 1852{
628e6f75 1853 struct ib_mad_agent *agent;
ee1c60b1 1854 struct ib_sa_classport_info_query *query;
628e6f75 1855 struct ib_sa_mad *mad;
ee1c60b1 1856 gfp_t gfp_mask = GFP_KERNEL;
628e6f75
ES
1857 int ret;
1858
628e6f75
ES
1859 agent = port->agent;
1860
1861 query = kzalloc(sizeof(*query), gfp_mask);
1862 if (!query)
1863 return -ENOMEM;
1864
1865 query->sa_query.port = port;
2196f271
DC
1866 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
1867 port->port_num) ?
1868 IB_SA_QUERY_OPA : 0;
628e6f75
ES
1869 ret = alloc_mad(&query->sa_query, gfp_mask);
1870 if (ret)
ee1c60b1 1871 goto err_free;
628e6f75 1872
ee1c60b1
DC
1873 query->callback = callback;
1874 query->context = context;
628e6f75
ES
1875
1876 mad = query->sa_query.mad_buf->mad;
2196f271 1877 init_mad(&query->sa_query, agent);
628e6f75 1878
ee1c60b1
DC
1879 query->sa_query.callback = ib_sa_classport_info_rec_callback;
1880 query->sa_query.release = ib_sa_classport_info_rec_release;
628e6f75
ES
1881 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1882 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
1883 mad->sa_hdr.comp_mask = 0;
1884 *sa_query = &query->sa_query;
1885
1886 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1887 if (ret < 0)
ee1c60b1 1888 goto err_free_mad;
628e6f75
ES
1889
1890 return ret;
1891
ee1c60b1 1892err_free_mad:
628e6f75 1893 *sa_query = NULL;
628e6f75
ES
1894 free_mad(&query->sa_query);
1895
ee1c60b1 1896err_free:
628e6f75
ES
1897 kfree(query);
1898 return ret;
1899}
ee1c60b1
DC
1900
1901static void update_ib_cpi(struct work_struct *work)
1902{
1903 struct ib_sa_port *port =
1904 container_of(work, struct ib_sa_port, ib_cpi_work.work);
1905 struct ib_classport_info_context *cb_context;
1906 unsigned long flags;
1907 int ret;
1908
1909 /* If the classport info is valid, nothing
1910 * to do here.
1911 */
1912 spin_lock_irqsave(&port->classport_lock, flags);
1913 if (port->classport_info.valid) {
1914 spin_unlock_irqrestore(&port->classport_lock, flags);
1915 return;
1916 }
1917 spin_unlock_irqrestore(&port->classport_lock, flags);
1918
1919 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL);
1920 if (!cb_context)
1921 goto err_nomem;
1922
1923 init_completion(&cb_context->done);
1924
1925 ret = ib_sa_classport_info_rec_query(port, 3000,
1926 ib_classportinfo_cb, cb_context,
1927 &cb_context->sa_query);
1928 if (ret < 0)
1929 goto free_cb_err;
1930 wait_for_completion(&cb_context->done);
1931free_cb_err:
1932 kfree(cb_context);
1933 spin_lock_irqsave(&port->classport_lock, flags);
1934
1935 /* If the classport info is still not valid, the query should have
1936 * failed for some reason. Retry issuing the query
1937 */
1938 if (!port->classport_info.valid) {
1939 port->classport_info.retry_cnt++;
1940 if (port->classport_info.retry_cnt <=
1941 IB_SA_CPI_MAX_RETRY_CNT) {
1942 unsigned long delay =
1943 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
1944
1945 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
1946 }
1947 }
1948 spin_unlock_irqrestore(&port->classport_lock, flags);
1949
1950err_nomem:
1951 return;
1952}
628e6f75 1953
1da177e4
LT
1954static void send_handler(struct ib_mad_agent *agent,
1955 struct ib_mad_send_wc *mad_send_wc)
1956{
34816ad9 1957 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
1da177e4
LT
1958 unsigned long flags;
1959
e4f50f00
RD
1960 if (query->callback)
1961 switch (mad_send_wc->status) {
1962 case IB_WC_SUCCESS:
1963 /* No callback -- already got recv */
1964 break;
1965 case IB_WC_RESP_TIMEOUT_ERR:
1966 query->callback(query, -ETIMEDOUT, NULL);
1967 break;
1968 case IB_WC_WR_FLUSH_ERR:
1969 query->callback(query, -EINTR, NULL);
1970 break;
1971 default:
1972 query->callback(query, -EIO, NULL);
1973 break;
1974 }
1da177e4 1975
1da177e4 1976 spin_lock_irqsave(&idr_lock, flags);
34816ad9 1977 idr_remove(&query_idr, query->id);
1da177e4 1978 spin_unlock_irqrestore(&idr_lock, flags);
34816ad9 1979
2aec5c60 1980 free_mad(query);
ee1c60b1
DC
1981 if (query->client)
1982 ib_sa_client_put(query->client);
34816ad9 1983 query->release(query);
1da177e4
LT
1984}
1985
1986static void recv_handler(struct ib_mad_agent *mad_agent,
ca281265 1987 struct ib_mad_send_buf *send_buf,
1da177e4
LT
1988 struct ib_mad_recv_wc *mad_recv_wc)
1989{
1990 struct ib_sa_query *query;
1da177e4 1991
ca281265
CH
1992 if (!send_buf)
1993 return;
1da177e4 1994
ca281265 1995 query = send_buf->context[0];
34816ad9 1996 if (query->callback) {
1da177e4
LT
1997 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
1998 query->callback(query,
1999 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
2000 -EINVAL : 0,
2001 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
2002 else
2003 query->callback(query, -EIO, NULL);
2004 }
2005
2006 ib_free_recv_mad(mad_recv_wc);
2007}
2008
cb863766
DC
2009static void update_sm_ah(struct work_struct *work)
2010{
2011 struct ib_sa_port *port =
2012 container_of(work, struct ib_sa_port, update_task);
2013 struct ib_sa_sm_ah *new_ah;
2014 struct ib_port_attr port_attr;
90898850 2015 struct rdma_ah_attr ah_attr;
cb863766
DC
2016
2017 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
2018 pr_warn("Couldn't query port\n");
2019 return;
2020 }
2021
2022 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL);
2023 if (!new_ah)
2024 return;
2025
2026 kref_init(&new_ah->ref);
2027 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
2028
2029 new_ah->pkey_index = 0;
2030 if (ib_find_pkey(port->agent->device, port->port_num,
2031 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
2032 pr_err("Couldn't find index for default PKey\n");
2033
2034 memset(&ah_attr, 0, sizeof(ah_attr));
2035 ah_attr.dlid = port_attr.sm_lid;
2036 ah_attr.sl = port_attr.sm_sl;
2037 ah_attr.port_num = port->port_num;
2038 if (port_attr.grh_required) {
2039 ah_attr.ah_flags = IB_AH_GRH;
2040 ah_attr.grh.dgid.global.subnet_prefix =
2041 cpu_to_be64(port_attr.subnet_prefix);
2042 ah_attr.grh.dgid.global.interface_id =
2043 cpu_to_be64(IB_SA_WELL_KNOWN_GUID);
2044 }
2045
0a18cfe4 2046 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr);
cb863766
DC
2047 if (IS_ERR(new_ah->ah)) {
2048 pr_warn("Couldn't create new SM AH\n");
2049 kfree(new_ah);
2050 return;
2051 }
2052
2053 spin_lock_irq(&port->ah_lock);
2054 if (port->sm_ah)
2055 kref_put(&port->sm_ah->ref, free_sm_ah);
2056 port->sm_ah = new_ah;
2057 spin_unlock_irq(&port->ah_lock);
2058}
2059
2060static void ib_sa_event(struct ib_event_handler *handler,
2061 struct ib_event *event)
2062{
2063 if (event->event == IB_EVENT_PORT_ERR ||
2064 event->event == IB_EVENT_PORT_ACTIVE ||
2065 event->event == IB_EVENT_LID_CHANGE ||
2066 event->event == IB_EVENT_PKEY_CHANGE ||
2067 event->event == IB_EVENT_SM_CHANGE ||
2068 event->event == IB_EVENT_CLIENT_REREGISTER) {
2069 unsigned long flags;
2070 struct ib_sa_device *sa_dev =
2071 container_of(handler, typeof(*sa_dev), event_handler);
2072 u8 port_num = event->element.port_num - sa_dev->start_port;
2073 struct ib_sa_port *port = &sa_dev->port[port_num];
2074
2075 if (!rdma_cap_ib_sa(handler->device, port->port_num))
2076 return;
2077
2078 spin_lock_irqsave(&port->ah_lock, flags);
2079 if (port->sm_ah)
2080 kref_put(&port->sm_ah->ref, free_sm_ah);
2081 port->sm_ah = NULL;
2082 spin_unlock_irqrestore(&port->ah_lock, flags);
2083
2084 if (event->event == IB_EVENT_SM_CHANGE ||
2085 event->event == IB_EVENT_CLIENT_REREGISTER ||
ee1c60b1
DC
2086 event->event == IB_EVENT_LID_CHANGE ||
2087 event->event == IB_EVENT_PORT_ACTIVE) {
2088 unsigned long delay =
2089 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2090
cb863766
DC
2091 spin_lock_irqsave(&port->classport_lock, flags);
2092 port->classport_info.valid = false;
ee1c60b1 2093 port->classport_info.retry_cnt = 0;
cb863766 2094 spin_unlock_irqrestore(&port->classport_lock, flags);
ee1c60b1
DC
2095 queue_delayed_work(ib_wq,
2096 &port->ib_cpi_work, delay);
cb863766
DC
2097 }
2098 queue_work(ib_wq, &sa_dev->port[port_num].update_task);
2099 }
2100}
2101
1da177e4
LT
2102static void ib_sa_add_one(struct ib_device *device)
2103{
2104 struct ib_sa_device *sa_dev;
2105 int s, e, i;
08e3681a 2106 int count = 0;
07ebafba 2107
4139032b
HR
2108 s = rdma_start_port(device);
2109 e = rdma_end_port(device);
1da177e4 2110
fac70d51 2111 sa_dev = kzalloc(sizeof *sa_dev +
1da177e4
LT
2112 (e - s + 1) * sizeof (struct ib_sa_port),
2113 GFP_KERNEL);
2114 if (!sa_dev)
2115 return;
2116
2117 sa_dev->start_port = s;
2118 sa_dev->end_port = e;
2119
2120 for (i = 0; i <= e - s; ++i) {
fac70d51 2121 spin_lock_init(&sa_dev->port[i].ah_lock);
fe53ba2f 2122 if (!rdma_cap_ib_sa(device, i + 1))
fac70d51
EC
2123 continue;
2124
1da177e4
LT
2125 sa_dev->port[i].sm_ah = NULL;
2126 sa_dev->port[i].port_num = i + s;
1da177e4 2127
3d3fd742
AV
2128 spin_lock_init(&sa_dev->port[i].classport_lock);
2129 sa_dev->port[i].classport_info.valid = false;
2130
1da177e4
LT
2131 sa_dev->port[i].agent =
2132 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
2133 NULL, 0, send_handler,
0f29b46d 2134 recv_handler, sa_dev, 0);
1da177e4
LT
2135 if (IS_ERR(sa_dev->port[i].agent))
2136 goto err;
2137
c4028958 2138 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
ee1c60b1
DC
2139 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
2140 update_ib_cpi);
08e3681a
MW
2141
2142 count++;
1da177e4
LT
2143 }
2144
08e3681a
MW
2145 if (!count)
2146 goto free;
2147
1da177e4
LT
2148 ib_set_client_data(device, &sa_client, sa_dev);
2149
2150 /*
2151 * We register our event handler after everything is set up,
2152 * and then update our cached info after the event handler is
2153 * registered to avoid any problems if a port changes state
2154 * during our initialization.
2155 */
2156
2157 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
2158 if (ib_register_event_handler(&sa_dev->event_handler))
2159 goto err;
2160
08e3681a 2161 for (i = 0; i <= e - s; ++i) {
fe53ba2f 2162 if (rdma_cap_ib_sa(device, i + 1))
fac70d51 2163 update_sm_ah(&sa_dev->port[i].update_task);
08e3681a 2164 }
1da177e4
LT
2165
2166 return;
2167
2168err:
08e3681a 2169 while (--i >= 0) {
fe53ba2f 2170 if (rdma_cap_ib_sa(device, i + 1))
fac70d51 2171 ib_unregister_mad_agent(sa_dev->port[i].agent);
08e3681a
MW
2172 }
2173free:
1da177e4 2174 kfree(sa_dev);
1da177e4
LT
2175 return;
2176}
2177
7c1eb45a 2178static void ib_sa_remove_one(struct ib_device *device, void *client_data)
1da177e4 2179{
7c1eb45a 2180 struct ib_sa_device *sa_dev = client_data;
1da177e4
LT
2181 int i;
2182
2183 if (!sa_dev)
2184 return;
2185
2186 ib_unregister_event_handler(&sa_dev->event_handler);
96e61fa5 2187 flush_workqueue(ib_wq);
0f47ae0b 2188
1da177e4 2189 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
fe53ba2f 2190 if (rdma_cap_ib_sa(device, i + 1)) {
ee1c60b1 2191 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
fac70d51
EC
2192 ib_unregister_mad_agent(sa_dev->port[i].agent);
2193 if (sa_dev->port[i].sm_ah)
2194 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
2195 }
2196
1da177e4
LT
2197 }
2198
2199 kfree(sa_dev);
2200}
2201
c2e49c92 2202int ib_sa_init(void)
1da177e4
LT
2203{
2204 int ret;
2205
1da177e4
LT
2206 get_random_bytes(&tid, sizeof tid);
2207
2ca546b9
KW
2208 atomic_set(&ib_nl_sa_request_seq, 0);
2209
1da177e4 2210 ret = ib_register_client(&sa_client);
faec2f7b 2211 if (ret) {
aba25a3e 2212 pr_err("Couldn't register ib_sa client\n");
faec2f7b
SH
2213 goto err1;
2214 }
2215
2216 ret = mcast_init();
2217 if (ret) {
aba25a3e 2218 pr_err("Couldn't initialize multicast handling\n");
faec2f7b
SH
2219 goto err2;
2220 }
1da177e4 2221
4534d859 2222 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
2ca546b9
KW
2223 if (!ib_nl_wq) {
2224 ret = -ENOMEM;
2225 goto err3;
2226 }
2227
2ca546b9
KW
2228 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2229
faec2f7b 2230 return 0;
735c631a 2231
2ca546b9
KW
2232err3:
2233 mcast_cleanup();
faec2f7b
SH
2234err2:
2235 ib_unregister_client(&sa_client);
2236err1:
1da177e4
LT
2237 return ret;
2238}
2239
c2e49c92 2240void ib_sa_cleanup(void)
1da177e4 2241{
2ca546b9
KW
2242 cancel_delayed_work(&ib_nl_timed_work);
2243 flush_workqueue(ib_nl_wq);
2244 destroy_workqueue(ib_nl_wq);
faec2f7b 2245 mcast_cleanup();
1da177e4 2246 ib_unregister_client(&sa_client);
5d7edb3c 2247 idr_destroy(&query_idr);
1da177e4 2248}