2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/security.h>
44 #include <rdma/ib_cache.h>
47 #include "core_priv.h"
52 #include "core_priv.h"
54 static int mad_sendq_size
= IB_MAD_QP_SEND_SIZE
;
55 static int mad_recvq_size
= IB_MAD_QP_RECV_SIZE
;
57 module_param_named(send_queue_size
, mad_sendq_size
, int, 0444);
58 MODULE_PARM_DESC(send_queue_size
, "Size of send queue in number of work requests");
59 module_param_named(recv_queue_size
, mad_recvq_size
, int, 0444);
60 MODULE_PARM_DESC(recv_queue_size
, "Size of receive queue in number of work requests");
62 static struct list_head ib_mad_port_list
;
63 static atomic_t ib_mad_client_id
= ATOMIC_INIT(0);
66 static DEFINE_SPINLOCK(ib_mad_port_list_lock
);
68 /* Forward declarations */
69 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
70 struct ib_mad_reg_req
*mad_reg_req
);
71 static void remove_mad_reg_req(struct ib_mad_agent_private
*priv
);
72 static struct ib_mad_agent_private
*find_mad_agent(
73 struct ib_mad_port_private
*port_priv
,
74 const struct ib_mad_hdr
*mad
);
75 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
76 struct ib_mad_private
*mad
);
77 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
);
78 static void timeout_sends(struct work_struct
*work
);
79 static void local_completions(struct work_struct
*work
);
80 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
81 struct ib_mad_agent_private
*agent_priv
,
83 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
84 struct ib_mad_agent_private
*agent_priv
);
85 static bool ib_mad_send_error(struct ib_mad_port_private
*port_priv
,
87 static void ib_mad_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
90 * Returns a ib_mad_port_private structure or NULL for a device/port
91 * Assumes ib_mad_port_list_lock is being held
93 static inline struct ib_mad_port_private
*
94 __ib_get_mad_port(struct ib_device
*device
, int port_num
)
96 struct ib_mad_port_private
*entry
;
98 list_for_each_entry(entry
, &ib_mad_port_list
, port_list
) {
99 if (entry
->device
== device
&& entry
->port_num
== port_num
)
106 * Wrapper function to return a ib_mad_port_private structure or NULL
109 static inline struct ib_mad_port_private
*
110 ib_get_mad_port(struct ib_device
*device
, int port_num
)
112 struct ib_mad_port_private
*entry
;
115 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
116 entry
= __ib_get_mad_port(device
, port_num
);
117 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
122 static inline u8
convert_mgmt_class(u8 mgmt_class
)
124 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
125 return mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
?
129 static int get_spl_qp_index(enum ib_qp_type qp_type
)
142 static int vendor_class_index(u8 mgmt_class
)
144 return mgmt_class
- IB_MGMT_CLASS_VENDOR_RANGE2_START
;
147 static int is_vendor_class(u8 mgmt_class
)
149 if ((mgmt_class
< IB_MGMT_CLASS_VENDOR_RANGE2_START
) ||
150 (mgmt_class
> IB_MGMT_CLASS_VENDOR_RANGE2_END
))
155 static int is_vendor_oui(char *oui
)
157 if (oui
[0] || oui
[1] || oui
[2])
162 static int is_vendor_method_in_use(
163 struct ib_mad_mgmt_vendor_class
*vendor_class
,
164 struct ib_mad_reg_req
*mad_reg_req
)
166 struct ib_mad_mgmt_method_table
*method
;
169 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
170 if (!memcmp(vendor_class
->oui
[i
], mad_reg_req
->oui
, 3)) {
171 method
= vendor_class
->method_table
[i
];
173 if (method_in_use(&method
, mad_reg_req
))
183 int ib_response_mad(const struct ib_mad_hdr
*hdr
)
185 return ((hdr
->method
& IB_MGMT_METHOD_RESP
) ||
186 (hdr
->method
== IB_MGMT_METHOD_TRAP_REPRESS
) ||
187 ((hdr
->mgmt_class
== IB_MGMT_CLASS_BM
) &&
188 (hdr
->attr_mod
& IB_BM_ATTR_MOD_RESP
)));
190 EXPORT_SYMBOL(ib_response_mad
);
193 * ib_register_mad_agent - Register to send/receive MADs
195 struct ib_mad_agent
*ib_register_mad_agent(struct ib_device
*device
,
197 enum ib_qp_type qp_type
,
198 struct ib_mad_reg_req
*mad_reg_req
,
200 ib_mad_send_handler send_handler
,
201 ib_mad_recv_handler recv_handler
,
203 u32 registration_flags
)
205 struct ib_mad_port_private
*port_priv
;
206 struct ib_mad_agent
*ret
= ERR_PTR(-EINVAL
);
207 struct ib_mad_agent_private
*mad_agent_priv
;
208 struct ib_mad_reg_req
*reg_req
= NULL
;
209 struct ib_mad_mgmt_class_table
*class;
210 struct ib_mad_mgmt_vendor_class_table
*vendor
;
211 struct ib_mad_mgmt_vendor_class
*vendor_class
;
212 struct ib_mad_mgmt_method_table
*method
;
215 u8 mgmt_class
, vclass
;
217 /* Validate parameters */
218 qpn
= get_spl_qp_index(qp_type
);
220 dev_dbg_ratelimited(&device
->dev
, "%s: invalid QP Type %d\n",
225 if (rmpp_version
&& rmpp_version
!= IB_MGMT_RMPP_VERSION
) {
226 dev_dbg_ratelimited(&device
->dev
,
227 "%s: invalid RMPP Version %u\n",
228 __func__
, rmpp_version
);
232 /* Validate MAD registration request if supplied */
234 if (mad_reg_req
->mgmt_class_version
>= MAX_MGMT_VERSION
) {
235 dev_dbg_ratelimited(&device
->dev
,
236 "%s: invalid Class Version %u\n",
238 mad_reg_req
->mgmt_class_version
);
242 dev_dbg_ratelimited(&device
->dev
,
243 "%s: no recv_handler\n", __func__
);
246 if (mad_reg_req
->mgmt_class
>= MAX_MGMT_CLASS
) {
248 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
249 * one in this range currently allowed
251 if (mad_reg_req
->mgmt_class
!=
252 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
253 dev_dbg_ratelimited(&device
->dev
,
254 "%s: Invalid Mgmt Class 0x%x\n",
255 __func__
, mad_reg_req
->mgmt_class
);
258 } else if (mad_reg_req
->mgmt_class
== 0) {
260 * Class 0 is reserved in IBA and is used for
261 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
263 dev_dbg_ratelimited(&device
->dev
,
264 "%s: Invalid Mgmt Class 0\n",
267 } else if (is_vendor_class(mad_reg_req
->mgmt_class
)) {
269 * If class is in "new" vendor range,
270 * ensure supplied OUI is not zero
272 if (!is_vendor_oui(mad_reg_req
->oui
)) {
273 dev_dbg_ratelimited(&device
->dev
,
274 "%s: No OUI specified for class 0x%x\n",
276 mad_reg_req
->mgmt_class
);
280 /* Make sure class supplied is consistent with RMPP */
281 if (!ib_is_mad_class_rmpp(mad_reg_req
->mgmt_class
)) {
283 dev_dbg_ratelimited(&device
->dev
,
284 "%s: RMPP version for non-RMPP class 0x%x\n",
285 __func__
, mad_reg_req
->mgmt_class
);
290 /* Make sure class supplied is consistent with QP type */
291 if (qp_type
== IB_QPT_SMI
) {
292 if ((mad_reg_req
->mgmt_class
!=
293 IB_MGMT_CLASS_SUBN_LID_ROUTED
) &&
294 (mad_reg_req
->mgmt_class
!=
295 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
296 dev_dbg_ratelimited(&device
->dev
,
297 "%s: Invalid SM QP type: class 0x%x\n",
298 __func__
, mad_reg_req
->mgmt_class
);
302 if ((mad_reg_req
->mgmt_class
==
303 IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
304 (mad_reg_req
->mgmt_class
==
305 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
306 dev_dbg_ratelimited(&device
->dev
,
307 "%s: Invalid GS QP type: class 0x%x\n",
308 __func__
, mad_reg_req
->mgmt_class
);
313 /* No registration request supplied */
316 if (registration_flags
& IB_MAD_USER_RMPP
)
320 /* Validate device and port */
321 port_priv
= ib_get_mad_port(device
, port_num
);
323 dev_dbg_ratelimited(&device
->dev
, "%s: Invalid port %d\n",
325 ret
= ERR_PTR(-ENODEV
);
329 /* Verify the QP requested is supported. For example, Ethernet devices
332 if (!port_priv
->qp_info
[qpn
].qp
) {
333 dev_dbg_ratelimited(&device
->dev
, "%s: QP %d not supported\n",
335 ret
= ERR_PTR(-EPROTONOSUPPORT
);
339 /* Allocate structures */
340 mad_agent_priv
= kzalloc(sizeof *mad_agent_priv
, GFP_KERNEL
);
341 if (!mad_agent_priv
) {
342 ret
= ERR_PTR(-ENOMEM
);
347 reg_req
= kmemdup(mad_reg_req
, sizeof *reg_req
, GFP_KERNEL
);
349 ret
= ERR_PTR(-ENOMEM
);
354 /* Now, fill in the various structures */
355 mad_agent_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
356 mad_agent_priv
->reg_req
= reg_req
;
357 mad_agent_priv
->agent
.rmpp_version
= rmpp_version
;
358 mad_agent_priv
->agent
.device
= device
;
359 mad_agent_priv
->agent
.recv_handler
= recv_handler
;
360 mad_agent_priv
->agent
.send_handler
= send_handler
;
361 mad_agent_priv
->agent
.context
= context
;
362 mad_agent_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
363 mad_agent_priv
->agent
.port_num
= port_num
;
364 mad_agent_priv
->agent
.flags
= registration_flags
;
365 spin_lock_init(&mad_agent_priv
->lock
);
366 INIT_LIST_HEAD(&mad_agent_priv
->send_list
);
367 INIT_LIST_HEAD(&mad_agent_priv
->wait_list
);
368 INIT_LIST_HEAD(&mad_agent_priv
->done_list
);
369 INIT_LIST_HEAD(&mad_agent_priv
->rmpp_list
);
370 INIT_DELAYED_WORK(&mad_agent_priv
->timed_work
, timeout_sends
);
371 INIT_LIST_HEAD(&mad_agent_priv
->local_list
);
372 INIT_WORK(&mad_agent_priv
->local_work
, local_completions
);
373 atomic_set(&mad_agent_priv
->refcount
, 1);
374 init_completion(&mad_agent_priv
->comp
);
376 ret2
= ib_mad_agent_security_setup(&mad_agent_priv
->agent
, qp_type
);
382 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
383 mad_agent_priv
->agent
.hi_tid
= atomic_inc_return(&ib_mad_client_id
);
386 * Make sure MAD registration (if supplied)
387 * is non overlapping with any existing ones
390 mgmt_class
= convert_mgmt_class(mad_reg_req
->mgmt_class
);
391 if (!is_vendor_class(mgmt_class
)) {
392 class = port_priv
->version
[mad_reg_req
->
393 mgmt_class_version
].class;
395 method
= class->method_table
[mgmt_class
];
397 if (method_in_use(&method
,
402 ret2
= add_nonoui_reg_req(mad_reg_req
, mad_agent_priv
,
405 /* "New" vendor class range */
406 vendor
= port_priv
->version
[mad_reg_req
->
407 mgmt_class_version
].vendor
;
409 vclass
= vendor_class_index(mgmt_class
);
410 vendor_class
= vendor
->vendor_class
[vclass
];
412 if (is_vendor_method_in_use(
418 ret2
= add_oui_reg_req(mad_reg_req
, mad_agent_priv
);
426 /* Add mad agent into port's agent list */
427 list_add_tail(&mad_agent_priv
->agent_list
, &port_priv
->agent_list
);
428 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
430 return &mad_agent_priv
->agent
;
432 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
433 ib_mad_agent_security_cleanup(&mad_agent_priv
->agent
);
437 kfree(mad_agent_priv
);
441 EXPORT_SYMBOL(ib_register_mad_agent
);
443 static inline int is_snooping_sends(int mad_snoop_flags
)
445 return (mad_snoop_flags
&
446 (/*IB_MAD_SNOOP_POSTED_SENDS |
447 IB_MAD_SNOOP_RMPP_SENDS |*/
448 IB_MAD_SNOOP_SEND_COMPLETIONS
/*|
449 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
452 static inline int is_snooping_recvs(int mad_snoop_flags
)
454 return (mad_snoop_flags
&
455 (IB_MAD_SNOOP_RECVS
/*|
456 IB_MAD_SNOOP_RMPP_RECVS*/));
459 static int register_snoop_agent(struct ib_mad_qp_info
*qp_info
,
460 struct ib_mad_snoop_private
*mad_snoop_priv
)
462 struct ib_mad_snoop_private
**new_snoop_table
;
466 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
467 /* Check for empty slot in array. */
468 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++)
469 if (!qp_info
->snoop_table
[i
])
472 if (i
== qp_info
->snoop_table_size
) {
474 new_snoop_table
= krealloc(qp_info
->snoop_table
,
475 sizeof mad_snoop_priv
*
476 (qp_info
->snoop_table_size
+ 1),
478 if (!new_snoop_table
) {
483 qp_info
->snoop_table
= new_snoop_table
;
484 qp_info
->snoop_table_size
++;
486 qp_info
->snoop_table
[i
] = mad_snoop_priv
;
487 atomic_inc(&qp_info
->snoop_count
);
489 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
493 struct ib_mad_agent
*ib_register_mad_snoop(struct ib_device
*device
,
495 enum ib_qp_type qp_type
,
497 ib_mad_snoop_handler snoop_handler
,
498 ib_mad_recv_handler recv_handler
,
501 struct ib_mad_port_private
*port_priv
;
502 struct ib_mad_agent
*ret
;
503 struct ib_mad_snoop_private
*mad_snoop_priv
;
507 /* Validate parameters */
508 if ((is_snooping_sends(mad_snoop_flags
) && !snoop_handler
) ||
509 (is_snooping_recvs(mad_snoop_flags
) && !recv_handler
)) {
510 ret
= ERR_PTR(-EINVAL
);
513 qpn
= get_spl_qp_index(qp_type
);
515 ret
= ERR_PTR(-EINVAL
);
518 port_priv
= ib_get_mad_port(device
, port_num
);
520 ret
= ERR_PTR(-ENODEV
);
523 /* Allocate structures */
524 mad_snoop_priv
= kzalloc(sizeof *mad_snoop_priv
, GFP_KERNEL
);
525 if (!mad_snoop_priv
) {
526 ret
= ERR_PTR(-ENOMEM
);
530 /* Now, fill in the various structures */
531 mad_snoop_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
532 mad_snoop_priv
->agent
.device
= device
;
533 mad_snoop_priv
->agent
.recv_handler
= recv_handler
;
534 mad_snoop_priv
->agent
.snoop_handler
= snoop_handler
;
535 mad_snoop_priv
->agent
.context
= context
;
536 mad_snoop_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
537 mad_snoop_priv
->agent
.port_num
= port_num
;
538 mad_snoop_priv
->mad_snoop_flags
= mad_snoop_flags
;
539 init_completion(&mad_snoop_priv
->comp
);
541 err
= ib_mad_agent_security_setup(&mad_snoop_priv
->agent
, qp_type
);
547 mad_snoop_priv
->snoop_index
= register_snoop_agent(
548 &port_priv
->qp_info
[qpn
],
550 if (mad_snoop_priv
->snoop_index
< 0) {
551 ret
= ERR_PTR(mad_snoop_priv
->snoop_index
);
555 atomic_set(&mad_snoop_priv
->refcount
, 1);
556 return &mad_snoop_priv
->agent
;
558 ib_mad_agent_security_cleanup(&mad_snoop_priv
->agent
);
560 kfree(mad_snoop_priv
);
564 EXPORT_SYMBOL(ib_register_mad_snoop
);
566 static inline void deref_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
568 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
569 complete(&mad_agent_priv
->comp
);
572 static inline void deref_snoop_agent(struct ib_mad_snoop_private
*mad_snoop_priv
)
574 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
575 complete(&mad_snoop_priv
->comp
);
578 static void unregister_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
580 struct ib_mad_port_private
*port_priv
;
583 /* Note that we could still be handling received MADs */
586 * Canceling all sends results in dropping received response
587 * MADs, preventing us from queuing additional work
589 cancel_mads(mad_agent_priv
);
590 port_priv
= mad_agent_priv
->qp_info
->port_priv
;
591 cancel_delayed_work(&mad_agent_priv
->timed_work
);
593 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
594 remove_mad_reg_req(mad_agent_priv
);
595 list_del(&mad_agent_priv
->agent_list
);
596 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
598 flush_workqueue(port_priv
->wq
);
599 ib_cancel_rmpp_recvs(mad_agent_priv
);
601 deref_mad_agent(mad_agent_priv
);
602 wait_for_completion(&mad_agent_priv
->comp
);
604 ib_mad_agent_security_cleanup(&mad_agent_priv
->agent
);
606 kfree(mad_agent_priv
->reg_req
);
607 kfree(mad_agent_priv
);
610 static void unregister_mad_snoop(struct ib_mad_snoop_private
*mad_snoop_priv
)
612 struct ib_mad_qp_info
*qp_info
;
615 qp_info
= mad_snoop_priv
->qp_info
;
616 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
617 qp_info
->snoop_table
[mad_snoop_priv
->snoop_index
] = NULL
;
618 atomic_dec(&qp_info
->snoop_count
);
619 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
621 deref_snoop_agent(mad_snoop_priv
);
622 wait_for_completion(&mad_snoop_priv
->comp
);
624 ib_mad_agent_security_cleanup(&mad_snoop_priv
->agent
);
626 kfree(mad_snoop_priv
);
630 * ib_unregister_mad_agent - Unregisters a client from using MAD services
632 void ib_unregister_mad_agent(struct ib_mad_agent
*mad_agent
)
634 struct ib_mad_agent_private
*mad_agent_priv
;
635 struct ib_mad_snoop_private
*mad_snoop_priv
;
637 /* If the TID is zero, the agent can only snoop. */
638 if (mad_agent
->hi_tid
) {
639 mad_agent_priv
= container_of(mad_agent
,
640 struct ib_mad_agent_private
,
642 unregister_mad_agent(mad_agent_priv
);
644 mad_snoop_priv
= container_of(mad_agent
,
645 struct ib_mad_snoop_private
,
647 unregister_mad_snoop(mad_snoop_priv
);
650 EXPORT_SYMBOL(ib_unregister_mad_agent
);
652 static void dequeue_mad(struct ib_mad_list_head
*mad_list
)
654 struct ib_mad_queue
*mad_queue
;
657 BUG_ON(!mad_list
->mad_queue
);
658 mad_queue
= mad_list
->mad_queue
;
659 spin_lock_irqsave(&mad_queue
->lock
, flags
);
660 list_del(&mad_list
->list
);
662 spin_unlock_irqrestore(&mad_queue
->lock
, flags
);
665 static void snoop_send(struct ib_mad_qp_info
*qp_info
,
666 struct ib_mad_send_buf
*send_buf
,
667 struct ib_mad_send_wc
*mad_send_wc
,
670 struct ib_mad_snoop_private
*mad_snoop_priv
;
674 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
675 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
676 mad_snoop_priv
= qp_info
->snoop_table
[i
];
677 if (!mad_snoop_priv
||
678 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
681 atomic_inc(&mad_snoop_priv
->refcount
);
682 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
683 mad_snoop_priv
->agent
.snoop_handler(&mad_snoop_priv
->agent
,
684 send_buf
, mad_send_wc
);
685 deref_snoop_agent(mad_snoop_priv
);
686 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
688 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
691 static void snoop_recv(struct ib_mad_qp_info
*qp_info
,
692 struct ib_mad_recv_wc
*mad_recv_wc
,
695 struct ib_mad_snoop_private
*mad_snoop_priv
;
699 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
700 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
701 mad_snoop_priv
= qp_info
->snoop_table
[i
];
702 if (!mad_snoop_priv
||
703 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
706 atomic_inc(&mad_snoop_priv
->refcount
);
707 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
708 mad_snoop_priv
->agent
.recv_handler(&mad_snoop_priv
->agent
, NULL
,
710 deref_snoop_agent(mad_snoop_priv
);
711 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
713 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
716 static void build_smp_wc(struct ib_qp
*qp
, struct ib_cqe
*cqe
, u16 slid
,
717 u16 pkey_index
, u8 port_num
, struct ib_wc
*wc
)
719 memset(wc
, 0, sizeof *wc
);
721 wc
->status
= IB_WC_SUCCESS
;
722 wc
->opcode
= IB_WC_RECV
;
723 wc
->pkey_index
= pkey_index
;
724 wc
->byte_len
= sizeof(struct ib_mad
) + sizeof(struct ib_grh
);
729 wc
->dlid_path_bits
= 0;
730 wc
->port_num
= port_num
;
733 static size_t mad_priv_size(const struct ib_mad_private
*mp
)
735 return sizeof(struct ib_mad_private
) + mp
->mad_size
;
738 static struct ib_mad_private
*alloc_mad_private(size_t mad_size
, gfp_t flags
)
740 size_t size
= sizeof(struct ib_mad_private
) + mad_size
;
741 struct ib_mad_private
*ret
= kzalloc(size
, flags
);
744 ret
->mad_size
= mad_size
;
749 static size_t port_mad_size(const struct ib_mad_port_private
*port_priv
)
751 return rdma_max_mad_size(port_priv
->device
, port_priv
->port_num
);
754 static size_t mad_priv_dma_size(const struct ib_mad_private
*mp
)
756 return sizeof(struct ib_grh
) + mp
->mad_size
;
760 * Return 0 if SMP is to be sent
761 * Return 1 if SMP was consumed locally (whether or not solicited)
762 * Return < 0 if error
764 static int handle_outgoing_dr_smp(struct ib_mad_agent_private
*mad_agent_priv
,
765 struct ib_mad_send_wr_private
*mad_send_wr
)
768 struct ib_smp
*smp
= mad_send_wr
->send_buf
.mad
;
769 struct opa_smp
*opa_smp
= (struct opa_smp
*)smp
;
771 struct ib_mad_local_private
*local
;
772 struct ib_mad_private
*mad_priv
;
773 struct ib_mad_port_private
*port_priv
;
774 struct ib_mad_agent_private
*recv_mad_agent
= NULL
;
775 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
778 struct ib_ud_wr
*send_wr
= &mad_send_wr
->send_wr
;
779 size_t mad_size
= port_mad_size(mad_agent_priv
->qp_info
->port_priv
);
780 u16 out_mad_pkey_index
= 0;
782 bool opa
= rdma_cap_opa_mad(mad_agent_priv
->qp_info
->port_priv
->device
,
783 mad_agent_priv
->qp_info
->port_priv
->port_num
);
785 if (rdma_cap_ib_switch(device
) &&
786 smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
787 port_num
= send_wr
->port_num
;
789 port_num
= mad_agent_priv
->agent
.port_num
;
792 * Directed route handling starts if the initial LID routed part of
793 * a request or the ending LID routed part of a response is empty.
794 * If we are at the start of the LID routed part, don't update the
795 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
797 if (opa
&& smp
->class_version
== OPA_SM_CLASS_VERSION
) {
800 if ((opa_get_smp_direction(opa_smp
)
801 ? opa_smp
->route
.dr
.dr_dlid
: opa_smp
->route
.dr
.dr_slid
) ==
802 OPA_LID_PERMISSIVE
&&
803 opa_smi_handle_dr_smp_send(opa_smp
,
804 rdma_cap_ib_switch(device
),
805 port_num
) == IB_SMI_DISCARD
) {
807 dev_err(&device
->dev
, "OPA Invalid directed route\n");
810 opa_drslid
= be32_to_cpu(opa_smp
->route
.dr
.dr_slid
);
811 if (opa_drslid
!= be32_to_cpu(OPA_LID_PERMISSIVE
) &&
812 opa_drslid
& 0xffff0000) {
814 dev_err(&device
->dev
, "OPA Invalid dr_slid 0x%x\n",
818 drslid
= (u16
)(opa_drslid
& 0x0000ffff);
820 /* Check to post send on QP or process locally */
821 if (opa_smi_check_local_smp(opa_smp
, device
) == IB_SMI_DISCARD
&&
822 opa_smi_check_local_returning_smp(opa_smp
, device
) == IB_SMI_DISCARD
)
825 if ((ib_get_smp_direction(smp
) ? smp
->dr_dlid
: smp
->dr_slid
) ==
827 smi_handle_dr_smp_send(smp
, rdma_cap_ib_switch(device
), port_num
) ==
830 dev_err(&device
->dev
, "Invalid directed route\n");
833 drslid
= be16_to_cpu(smp
->dr_slid
);
835 /* Check to post send on QP or process locally */
836 if (smi_check_local_smp(smp
, device
) == IB_SMI_DISCARD
&&
837 smi_check_local_returning_smp(smp
, device
) == IB_SMI_DISCARD
)
841 local
= kmalloc(sizeof *local
, GFP_ATOMIC
);
846 local
->mad_priv
= NULL
;
847 local
->recv_mad_agent
= NULL
;
848 mad_priv
= alloc_mad_private(mad_size
, GFP_ATOMIC
);
855 build_smp_wc(mad_agent_priv
->agent
.qp
,
856 send_wr
->wr
.wr_cqe
, drslid
,
858 send_wr
->port_num
, &mad_wc
);
860 if (opa
&& smp
->base_version
== OPA_MGMT_BASE_VERSION
) {
861 mad_wc
.byte_len
= mad_send_wr
->send_buf
.hdr_len
862 + mad_send_wr
->send_buf
.data_len
863 + sizeof(struct ib_grh
);
866 /* No GRH for DR SMP */
867 ret
= device
->process_mad(device
, 0, port_num
, &mad_wc
, NULL
,
868 (const struct ib_mad_hdr
*)smp
, mad_size
,
869 (struct ib_mad_hdr
*)mad_priv
->mad
,
870 &mad_size
, &out_mad_pkey_index
);
873 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
:
874 if (ib_response_mad((const struct ib_mad_hdr
*)mad_priv
->mad
) &&
875 mad_agent_priv
->agent
.recv_handler
) {
876 local
->mad_priv
= mad_priv
;
877 local
->recv_mad_agent
= mad_agent_priv
;
879 * Reference MAD agent until receive
880 * side of local completion handled
882 atomic_inc(&mad_agent_priv
->refcount
);
886 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
:
889 case IB_MAD_RESULT_SUCCESS
:
890 /* Treat like an incoming receive MAD */
891 port_priv
= ib_get_mad_port(mad_agent_priv
->agent
.device
,
892 mad_agent_priv
->agent
.port_num
);
894 memcpy(mad_priv
->mad
, smp
, mad_priv
->mad_size
);
895 recv_mad_agent
= find_mad_agent(port_priv
,
896 (const struct ib_mad_hdr
*)mad_priv
->mad
);
898 if (!port_priv
|| !recv_mad_agent
) {
900 * No receiving agent so drop packet and
901 * generate send completion.
906 local
->mad_priv
= mad_priv
;
907 local
->recv_mad_agent
= recv_mad_agent
;
916 local
->mad_send_wr
= mad_send_wr
;
918 local
->mad_send_wr
->send_wr
.pkey_index
= out_mad_pkey_index
;
919 local
->return_wc_byte_len
= mad_size
;
921 /* Reference MAD agent until send side of local completion handled */
922 atomic_inc(&mad_agent_priv
->refcount
);
923 /* Queue local completion to local list */
924 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
925 list_add_tail(&local
->completion_list
, &mad_agent_priv
->local_list
);
926 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
927 queue_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
928 &mad_agent_priv
->local_work
);
934 static int get_pad_size(int hdr_len
, int data_len
, size_t mad_size
)
938 seg_size
= mad_size
- hdr_len
;
939 if (data_len
&& seg_size
) {
940 pad
= seg_size
- data_len
% seg_size
;
941 return pad
== seg_size
? 0 : pad
;
946 static void free_send_rmpp_list(struct ib_mad_send_wr_private
*mad_send_wr
)
948 struct ib_rmpp_segment
*s
, *t
;
950 list_for_each_entry_safe(s
, t
, &mad_send_wr
->rmpp_list
, list
) {
956 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private
*send_wr
,
957 size_t mad_size
, gfp_t gfp_mask
)
959 struct ib_mad_send_buf
*send_buf
= &send_wr
->send_buf
;
960 struct ib_rmpp_mad
*rmpp_mad
= send_buf
->mad
;
961 struct ib_rmpp_segment
*seg
= NULL
;
962 int left
, seg_size
, pad
;
964 send_buf
->seg_size
= mad_size
- send_buf
->hdr_len
;
965 send_buf
->seg_rmpp_size
= mad_size
- IB_MGMT_RMPP_HDR
;
966 seg_size
= send_buf
->seg_size
;
969 /* Allocate data segments. */
970 for (left
= send_buf
->data_len
+ pad
; left
> 0; left
-= seg_size
) {
971 seg
= kmalloc(sizeof (*seg
) + seg_size
, gfp_mask
);
973 free_send_rmpp_list(send_wr
);
976 seg
->num
= ++send_buf
->seg_count
;
977 list_add_tail(&seg
->list
, &send_wr
->rmpp_list
);
980 /* Zero any padding */
982 memset(seg
->data
+ seg_size
- pad
, 0, pad
);
984 rmpp_mad
->rmpp_hdr
.rmpp_version
= send_wr
->mad_agent_priv
->
986 rmpp_mad
->rmpp_hdr
.rmpp_type
= IB_MGMT_RMPP_TYPE_DATA
;
987 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
989 send_wr
->cur_seg
= container_of(send_wr
->rmpp_list
.next
,
990 struct ib_rmpp_segment
, list
);
991 send_wr
->last_ack_seg
= send_wr
->cur_seg
;
995 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent
*agent
)
997 return agent
->rmpp_version
&& !(agent
->flags
& IB_MAD_USER_RMPP
);
999 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent
);
1001 struct ib_mad_send_buf
* ib_create_send_mad(struct ib_mad_agent
*mad_agent
,
1002 u32 remote_qpn
, u16 pkey_index
,
1004 int hdr_len
, int data_len
,
1008 struct ib_mad_agent_private
*mad_agent_priv
;
1009 struct ib_mad_send_wr_private
*mad_send_wr
;
1010 int pad
, message_size
, ret
, size
;
1015 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
1018 opa
= rdma_cap_opa_mad(mad_agent
->device
, mad_agent
->port_num
);
1020 if (opa
&& base_version
== OPA_MGMT_BASE_VERSION
)
1021 mad_size
= sizeof(struct opa_mad
);
1023 mad_size
= sizeof(struct ib_mad
);
1025 pad
= get_pad_size(hdr_len
, data_len
, mad_size
);
1026 message_size
= hdr_len
+ data_len
+ pad
;
1028 if (ib_mad_kernel_rmpp_agent(mad_agent
)) {
1029 if (!rmpp_active
&& message_size
> mad_size
)
1030 return ERR_PTR(-EINVAL
);
1032 if (rmpp_active
|| message_size
> mad_size
)
1033 return ERR_PTR(-EINVAL
);
1035 size
= rmpp_active
? hdr_len
: mad_size
;
1036 buf
= kzalloc(sizeof *mad_send_wr
+ size
, gfp_mask
);
1038 return ERR_PTR(-ENOMEM
);
1040 mad_send_wr
= buf
+ size
;
1041 INIT_LIST_HEAD(&mad_send_wr
->rmpp_list
);
1042 mad_send_wr
->send_buf
.mad
= buf
;
1043 mad_send_wr
->send_buf
.hdr_len
= hdr_len
;
1044 mad_send_wr
->send_buf
.data_len
= data_len
;
1045 mad_send_wr
->pad
= pad
;
1047 mad_send_wr
->mad_agent_priv
= mad_agent_priv
;
1048 mad_send_wr
->sg_list
[0].length
= hdr_len
;
1049 mad_send_wr
->sg_list
[0].lkey
= mad_agent
->qp
->pd
->local_dma_lkey
;
1051 /* OPA MADs don't have to be the full 2048 bytes */
1052 if (opa
&& base_version
== OPA_MGMT_BASE_VERSION
&&
1053 data_len
< mad_size
- hdr_len
)
1054 mad_send_wr
->sg_list
[1].length
= data_len
;
1056 mad_send_wr
->sg_list
[1].length
= mad_size
- hdr_len
;
1058 mad_send_wr
->sg_list
[1].lkey
= mad_agent
->qp
->pd
->local_dma_lkey
;
1060 mad_send_wr
->mad_list
.cqe
.done
= ib_mad_send_done
;
1062 mad_send_wr
->send_wr
.wr
.wr_cqe
= &mad_send_wr
->mad_list
.cqe
;
1063 mad_send_wr
->send_wr
.wr
.sg_list
= mad_send_wr
->sg_list
;
1064 mad_send_wr
->send_wr
.wr
.num_sge
= 2;
1065 mad_send_wr
->send_wr
.wr
.opcode
= IB_WR_SEND
;
1066 mad_send_wr
->send_wr
.wr
.send_flags
= IB_SEND_SIGNALED
;
1067 mad_send_wr
->send_wr
.remote_qpn
= remote_qpn
;
1068 mad_send_wr
->send_wr
.remote_qkey
= IB_QP_SET_QKEY
;
1069 mad_send_wr
->send_wr
.pkey_index
= pkey_index
;
1072 ret
= alloc_send_rmpp_list(mad_send_wr
, mad_size
, gfp_mask
);
1075 return ERR_PTR(ret
);
1079 mad_send_wr
->send_buf
.mad_agent
= mad_agent
;
1080 atomic_inc(&mad_agent_priv
->refcount
);
1081 return &mad_send_wr
->send_buf
;
1083 EXPORT_SYMBOL(ib_create_send_mad
);
1085 int ib_get_mad_data_offset(u8 mgmt_class
)
1087 if (mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
)
1088 return IB_MGMT_SA_HDR
;
1089 else if ((mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
1090 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
1091 (mgmt_class
== IB_MGMT_CLASS_BIS
))
1092 return IB_MGMT_DEVICE_HDR
;
1093 else if ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
1094 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
))
1095 return IB_MGMT_VENDOR_HDR
;
1097 return IB_MGMT_MAD_HDR
;
1099 EXPORT_SYMBOL(ib_get_mad_data_offset
);
1101 int ib_is_mad_class_rmpp(u8 mgmt_class
)
1103 if ((mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
) ||
1104 (mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
1105 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
1106 (mgmt_class
== IB_MGMT_CLASS_BIS
) ||
1107 ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
1108 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
)))
1112 EXPORT_SYMBOL(ib_is_mad_class_rmpp
);
1114 void *ib_get_rmpp_segment(struct ib_mad_send_buf
*send_buf
, int seg_num
)
1116 struct ib_mad_send_wr_private
*mad_send_wr
;
1117 struct list_head
*list
;
1119 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
1121 list
= &mad_send_wr
->cur_seg
->list
;
1123 if (mad_send_wr
->cur_seg
->num
< seg_num
) {
1124 list_for_each_entry(mad_send_wr
->cur_seg
, list
, list
)
1125 if (mad_send_wr
->cur_seg
->num
== seg_num
)
1127 } else if (mad_send_wr
->cur_seg
->num
> seg_num
) {
1128 list_for_each_entry_reverse(mad_send_wr
->cur_seg
, list
, list
)
1129 if (mad_send_wr
->cur_seg
->num
== seg_num
)
1132 return mad_send_wr
->cur_seg
->data
;
1134 EXPORT_SYMBOL(ib_get_rmpp_segment
);
1136 static inline void *ib_get_payload(struct ib_mad_send_wr_private
*mad_send_wr
)
1138 if (mad_send_wr
->send_buf
.seg_count
)
1139 return ib_get_rmpp_segment(&mad_send_wr
->send_buf
,
1140 mad_send_wr
->seg_num
);
1142 return mad_send_wr
->send_buf
.mad
+
1143 mad_send_wr
->send_buf
.hdr_len
;
1146 void ib_free_send_mad(struct ib_mad_send_buf
*send_buf
)
1148 struct ib_mad_agent_private
*mad_agent_priv
;
1149 struct ib_mad_send_wr_private
*mad_send_wr
;
1151 mad_agent_priv
= container_of(send_buf
->mad_agent
,
1152 struct ib_mad_agent_private
, agent
);
1153 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
1156 free_send_rmpp_list(mad_send_wr
);
1157 kfree(send_buf
->mad
);
1158 deref_mad_agent(mad_agent_priv
);
1160 EXPORT_SYMBOL(ib_free_send_mad
);
1162 int ib_send_mad(struct ib_mad_send_wr_private
*mad_send_wr
)
1164 struct ib_mad_qp_info
*qp_info
;
1165 struct list_head
*list
;
1166 struct ib_send_wr
*bad_send_wr
;
1167 struct ib_mad_agent
*mad_agent
;
1169 unsigned long flags
;
1172 /* Set WR ID to find mad_send_wr upon completion */
1173 qp_info
= mad_send_wr
->mad_agent_priv
->qp_info
;
1174 mad_send_wr
->mad_list
.mad_queue
= &qp_info
->send_queue
;
1175 mad_send_wr
->mad_list
.cqe
.done
= ib_mad_send_done
;
1176 mad_send_wr
->send_wr
.wr
.wr_cqe
= &mad_send_wr
->mad_list
.cqe
;
1178 mad_agent
= mad_send_wr
->send_buf
.mad_agent
;
1179 sge
= mad_send_wr
->sg_list
;
1180 sge
[0].addr
= ib_dma_map_single(mad_agent
->device
,
1181 mad_send_wr
->send_buf
.mad
,
1184 if (unlikely(ib_dma_mapping_error(mad_agent
->device
, sge
[0].addr
)))
1187 mad_send_wr
->header_mapping
= sge
[0].addr
;
1189 sge
[1].addr
= ib_dma_map_single(mad_agent
->device
,
1190 ib_get_payload(mad_send_wr
),
1193 if (unlikely(ib_dma_mapping_error(mad_agent
->device
, sge
[1].addr
))) {
1194 ib_dma_unmap_single(mad_agent
->device
,
1195 mad_send_wr
->header_mapping
,
1196 sge
[0].length
, DMA_TO_DEVICE
);
1199 mad_send_wr
->payload_mapping
= sge
[1].addr
;
1201 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
1202 if (qp_info
->send_queue
.count
< qp_info
->send_queue
.max_active
) {
1203 ret
= ib_post_send(mad_agent
->qp
, &mad_send_wr
->send_wr
.wr
,
1205 list
= &qp_info
->send_queue
.list
;
1208 list
= &qp_info
->overflow_list
;
1212 qp_info
->send_queue
.count
++;
1213 list_add_tail(&mad_send_wr
->mad_list
.list
, list
);
1215 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
1217 ib_dma_unmap_single(mad_agent
->device
,
1218 mad_send_wr
->header_mapping
,
1219 sge
[0].length
, DMA_TO_DEVICE
);
1220 ib_dma_unmap_single(mad_agent
->device
,
1221 mad_send_wr
->payload_mapping
,
1222 sge
[1].length
, DMA_TO_DEVICE
);
1228 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1229 * with the registered client
1231 int ib_post_send_mad(struct ib_mad_send_buf
*send_buf
,
1232 struct ib_mad_send_buf
**bad_send_buf
)
1234 struct ib_mad_agent_private
*mad_agent_priv
;
1235 struct ib_mad_send_buf
*next_send_buf
;
1236 struct ib_mad_send_wr_private
*mad_send_wr
;
1237 unsigned long flags
;
1240 /* Walk list of send WRs and post each on send list */
1241 for (; send_buf
; send_buf
= next_send_buf
) {
1242 mad_send_wr
= container_of(send_buf
,
1243 struct ib_mad_send_wr_private
,
1245 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1247 ret
= ib_mad_enforce_security(mad_agent_priv
,
1248 mad_send_wr
->send_wr
.pkey_index
);
1252 if (!send_buf
->mad_agent
->send_handler
||
1253 (send_buf
->timeout_ms
&&
1254 !send_buf
->mad_agent
->recv_handler
)) {
1259 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
)) {
1260 if (mad_agent_priv
->agent
.rmpp_version
) {
1267 * Save pointer to next work request to post in case the
1268 * current one completes, and the user modifies the work
1269 * request associated with the completion
1271 next_send_buf
= send_buf
->next
;
1272 mad_send_wr
->send_wr
.ah
= send_buf
->ah
;
1274 if (((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
==
1275 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1276 ret
= handle_outgoing_dr_smp(mad_agent_priv
,
1278 if (ret
< 0) /* error */
1280 else if (ret
== 1) /* locally consumed */
1284 mad_send_wr
->tid
= ((struct ib_mad_hdr
*) send_buf
->mad
)->tid
;
1285 /* Timeout will be updated after send completes */
1286 mad_send_wr
->timeout
= msecs_to_jiffies(send_buf
->timeout_ms
);
1287 mad_send_wr
->max_retries
= send_buf
->retries
;
1288 mad_send_wr
->retries_left
= send_buf
->retries
;
1289 send_buf
->retries
= 0;
1290 /* Reference for work request to QP + response */
1291 mad_send_wr
->refcount
= 1 + (mad_send_wr
->timeout
> 0);
1292 mad_send_wr
->status
= IB_WC_SUCCESS
;
1294 /* Reference MAD agent until send completes */
1295 atomic_inc(&mad_agent_priv
->refcount
);
1296 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1297 list_add_tail(&mad_send_wr
->agent_list
,
1298 &mad_agent_priv
->send_list
);
1299 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1301 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
1302 ret
= ib_send_rmpp_mad(mad_send_wr
);
1303 if (ret
>= 0 && ret
!= IB_RMPP_RESULT_CONSUMED
)
1304 ret
= ib_send_mad(mad_send_wr
);
1306 ret
= ib_send_mad(mad_send_wr
);
1308 /* Fail send request */
1309 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1310 list_del(&mad_send_wr
->agent_list
);
1311 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1312 atomic_dec(&mad_agent_priv
->refcount
);
1319 *bad_send_buf
= send_buf
;
1322 EXPORT_SYMBOL(ib_post_send_mad
);
1325 * ib_free_recv_mad - Returns data buffers used to receive
1326 * a MAD to the access layer
1328 void ib_free_recv_mad(struct ib_mad_recv_wc
*mad_recv_wc
)
1330 struct ib_mad_recv_buf
*mad_recv_buf
, *temp_recv_buf
;
1331 struct ib_mad_private_header
*mad_priv_hdr
;
1332 struct ib_mad_private
*priv
;
1333 struct list_head free_list
;
1335 INIT_LIST_HEAD(&free_list
);
1336 list_splice_init(&mad_recv_wc
->rmpp_list
, &free_list
);
1338 list_for_each_entry_safe(mad_recv_buf
, temp_recv_buf
,
1340 mad_recv_wc
= container_of(mad_recv_buf
, struct ib_mad_recv_wc
,
1342 mad_priv_hdr
= container_of(mad_recv_wc
,
1343 struct ib_mad_private_header
,
1345 priv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
1350 EXPORT_SYMBOL(ib_free_recv_mad
);
1352 struct ib_mad_agent
*ib_redirect_mad_qp(struct ib_qp
*qp
,
1354 ib_mad_send_handler send_handler
,
1355 ib_mad_recv_handler recv_handler
,
1358 return ERR_PTR(-EINVAL
); /* XXX: for now */
1360 EXPORT_SYMBOL(ib_redirect_mad_qp
);
1362 int ib_process_mad_wc(struct ib_mad_agent
*mad_agent
,
1365 dev_err(&mad_agent
->device
->dev
,
1366 "ib_process_mad_wc() not implemented yet\n");
1369 EXPORT_SYMBOL(ib_process_mad_wc
);
1371 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
1372 struct ib_mad_reg_req
*mad_reg_req
)
1376 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
) {
1377 if ((*method
)->agent
[i
]) {
1378 pr_err("Method %d already in use\n", i
);
1385 static int allocate_method_table(struct ib_mad_mgmt_method_table
**method
)
1387 /* Allocate management method table */
1388 *method
= kzalloc(sizeof **method
, GFP_ATOMIC
);
1389 return (*method
) ? 0 : (-ENOMEM
);
1393 * Check to see if there are any methods still in use
1395 static int check_method_table(struct ib_mad_mgmt_method_table
*method
)
1399 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++)
1400 if (method
->agent
[i
])
1406 * Check to see if there are any method tables for this class still in use
1408 static int check_class_table(struct ib_mad_mgmt_class_table
*class)
1412 for (i
= 0; i
< MAX_MGMT_CLASS
; i
++)
1413 if (class->method_table
[i
])
1418 static int check_vendor_class(struct ib_mad_mgmt_vendor_class
*vendor_class
)
1422 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1423 if (vendor_class
->method_table
[i
])
1428 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class
*vendor_class
,
1433 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1434 /* Is there matching OUI for this vendor class ? */
1435 if (!memcmp(vendor_class
->oui
[i
], oui
, 3))
1441 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table
*vendor
)
1445 for (i
= 0; i
< MAX_MGMT_VENDOR_RANGE2
; i
++)
1446 if (vendor
->vendor_class
[i
])
1452 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table
*method
,
1453 struct ib_mad_agent_private
*agent
)
1457 /* Remove any methods for this mad agent */
1458 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++) {
1459 if (method
->agent
[i
] == agent
) {
1460 method
->agent
[i
] = NULL
;
1465 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1466 struct ib_mad_agent_private
*agent_priv
,
1469 struct ib_mad_port_private
*port_priv
;
1470 struct ib_mad_mgmt_class_table
**class;
1471 struct ib_mad_mgmt_method_table
**method
;
1474 port_priv
= agent_priv
->qp_info
->port_priv
;
1475 class = &port_priv
->version
[mad_reg_req
->mgmt_class_version
].class;
1477 /* Allocate management class table for "new" class version */
1478 *class = kzalloc(sizeof **class, GFP_ATOMIC
);
1484 /* Allocate method table for this management class */
1485 method
= &(*class)->method_table
[mgmt_class
];
1486 if ((ret
= allocate_method_table(method
)))
1489 method
= &(*class)->method_table
[mgmt_class
];
1491 /* Allocate method table for this management class */
1492 if ((ret
= allocate_method_table(method
)))
1497 /* Now, make sure methods are not already in use */
1498 if (method_in_use(method
, mad_reg_req
))
1501 /* Finally, add in methods being registered */
1502 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
)
1503 (*method
)->agent
[i
] = agent_priv
;
1508 /* Remove any methods for this mad agent */
1509 remove_methods_mad_agent(*method
, agent_priv
);
1510 /* Now, check to see if there are any methods in use */
1511 if (!check_method_table(*method
)) {
1512 /* If not, release management method table */
1525 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1526 struct ib_mad_agent_private
*agent_priv
)
1528 struct ib_mad_port_private
*port_priv
;
1529 struct ib_mad_mgmt_vendor_class_table
**vendor_table
;
1530 struct ib_mad_mgmt_vendor_class_table
*vendor
= NULL
;
1531 struct ib_mad_mgmt_vendor_class
*vendor_class
= NULL
;
1532 struct ib_mad_mgmt_method_table
**method
;
1533 int i
, ret
= -ENOMEM
;
1536 /* "New" vendor (with OUI) class */
1537 vclass
= vendor_class_index(mad_reg_req
->mgmt_class
);
1538 port_priv
= agent_priv
->qp_info
->port_priv
;
1539 vendor_table
= &port_priv
->version
[
1540 mad_reg_req
->mgmt_class_version
].vendor
;
1541 if (!*vendor_table
) {
1542 /* Allocate mgmt vendor class table for "new" class version */
1543 vendor
= kzalloc(sizeof *vendor
, GFP_ATOMIC
);
1547 *vendor_table
= vendor
;
1549 if (!(*vendor_table
)->vendor_class
[vclass
]) {
1550 /* Allocate table for this management vendor class */
1551 vendor_class
= kzalloc(sizeof *vendor_class
, GFP_ATOMIC
);
1555 (*vendor_table
)->vendor_class
[vclass
] = vendor_class
;
1557 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1558 /* Is there matching OUI for this vendor class ? */
1559 if (!memcmp((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1560 mad_reg_req
->oui
, 3)) {
1561 method
= &(*vendor_table
)->vendor_class
[
1562 vclass
]->method_table
[i
];
1568 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1569 /* OUI slot available ? */
1570 if (!is_vendor_oui((*vendor_table
)->vendor_class
[
1572 method
= &(*vendor_table
)->vendor_class
[
1573 vclass
]->method_table
[i
];
1574 /* Allocate method table for this OUI */
1576 ret
= allocate_method_table(method
);
1580 memcpy((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1581 mad_reg_req
->oui
, 3);
1585 dev_err(&agent_priv
->agent
.device
->dev
, "All OUI slots in use\n");
1589 /* Now, make sure methods are not already in use */
1590 if (method_in_use(method
, mad_reg_req
))
1593 /* Finally, add in methods being registered */
1594 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
)
1595 (*method
)->agent
[i
] = agent_priv
;
1600 /* Remove any methods for this mad agent */
1601 remove_methods_mad_agent(*method
, agent_priv
);
1602 /* Now, check to see if there are any methods in use */
1603 if (!check_method_table(*method
)) {
1604 /* If not, release management method table */
1611 (*vendor_table
)->vendor_class
[vclass
] = NULL
;
1612 kfree(vendor_class
);
1616 *vendor_table
= NULL
;
1623 static void remove_mad_reg_req(struct ib_mad_agent_private
*agent_priv
)
1625 struct ib_mad_port_private
*port_priv
;
1626 struct ib_mad_mgmt_class_table
*class;
1627 struct ib_mad_mgmt_method_table
*method
;
1628 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1629 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1634 * Was MAD registration request supplied
1635 * with original registration ?
1637 if (!agent_priv
->reg_req
) {
1641 port_priv
= agent_priv
->qp_info
->port_priv
;
1642 mgmt_class
= convert_mgmt_class(agent_priv
->reg_req
->mgmt_class
);
1643 class = port_priv
->version
[
1644 agent_priv
->reg_req
->mgmt_class_version
].class;
1648 method
= class->method_table
[mgmt_class
];
1650 /* Remove any methods for this mad agent */
1651 remove_methods_mad_agent(method
, agent_priv
);
1652 /* Now, check to see if there are any methods still in use */
1653 if (!check_method_table(method
)) {
1654 /* If not, release management method table */
1656 class->method_table
[mgmt_class
] = NULL
;
1657 /* Any management classes left ? */
1658 if (!check_class_table(class)) {
1659 /* If not, release management class table */
1662 agent_priv
->reg_req
->
1663 mgmt_class_version
].class = NULL
;
1669 if (!is_vendor_class(mgmt_class
))
1672 /* normalize mgmt_class to vendor range 2 */
1673 mgmt_class
= vendor_class_index(agent_priv
->reg_req
->mgmt_class
);
1674 vendor
= port_priv
->version
[
1675 agent_priv
->reg_req
->mgmt_class_version
].vendor
;
1680 vendor_class
= vendor
->vendor_class
[mgmt_class
];
1682 index
= find_vendor_oui(vendor_class
, agent_priv
->reg_req
->oui
);
1685 method
= vendor_class
->method_table
[index
];
1687 /* Remove any methods for this mad agent */
1688 remove_methods_mad_agent(method
, agent_priv
);
1690 * Now, check to see if there are
1691 * any methods still in use
1693 if (!check_method_table(method
)) {
1694 /* If not, release management method table */
1696 vendor_class
->method_table
[index
] = NULL
;
1697 memset(vendor_class
->oui
[index
], 0, 3);
1698 /* Any OUIs left ? */
1699 if (!check_vendor_class(vendor_class
)) {
1700 /* If not, release vendor class table */
1701 kfree(vendor_class
);
1702 vendor
->vendor_class
[mgmt_class
] = NULL
;
1703 /* Any other vendor classes left ? */
1704 if (!check_vendor_table(vendor
)) {
1707 agent_priv
->reg_req
->
1708 mgmt_class_version
].
1720 static struct ib_mad_agent_private
*
1721 find_mad_agent(struct ib_mad_port_private
*port_priv
,
1722 const struct ib_mad_hdr
*mad_hdr
)
1724 struct ib_mad_agent_private
*mad_agent
= NULL
;
1725 unsigned long flags
;
1727 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
1728 if (ib_response_mad(mad_hdr
)) {
1730 struct ib_mad_agent_private
*entry
;
1733 * Routing is based on high 32 bits of transaction ID
1736 hi_tid
= be64_to_cpu(mad_hdr
->tid
) >> 32;
1737 list_for_each_entry(entry
, &port_priv
->agent_list
, agent_list
) {
1738 if (entry
->agent
.hi_tid
== hi_tid
) {
1744 struct ib_mad_mgmt_class_table
*class;
1745 struct ib_mad_mgmt_method_table
*method
;
1746 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1747 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1748 const struct ib_vendor_mad
*vendor_mad
;
1752 * Routing is based on version, class, and method
1753 * For "newer" vendor MADs, also based on OUI
1755 if (mad_hdr
->class_version
>= MAX_MGMT_VERSION
)
1757 if (!is_vendor_class(mad_hdr
->mgmt_class
)) {
1758 class = port_priv
->version
[
1759 mad_hdr
->class_version
].class;
1762 if (convert_mgmt_class(mad_hdr
->mgmt_class
) >=
1763 ARRAY_SIZE(class->method_table
))
1765 method
= class->method_table
[convert_mgmt_class(
1766 mad_hdr
->mgmt_class
)];
1768 mad_agent
= method
->agent
[mad_hdr
->method
&
1769 ~IB_MGMT_METHOD_RESP
];
1771 vendor
= port_priv
->version
[
1772 mad_hdr
->class_version
].vendor
;
1775 vendor_class
= vendor
->vendor_class
[vendor_class_index(
1776 mad_hdr
->mgmt_class
)];
1779 /* Find matching OUI */
1780 vendor_mad
= (const struct ib_vendor_mad
*)mad_hdr
;
1781 index
= find_vendor_oui(vendor_class
, vendor_mad
->oui
);
1784 method
= vendor_class
->method_table
[index
];
1786 mad_agent
= method
->agent
[mad_hdr
->method
&
1787 ~IB_MGMT_METHOD_RESP
];
1793 if (mad_agent
->agent
.recv_handler
)
1794 atomic_inc(&mad_agent
->refcount
);
1796 dev_notice(&port_priv
->device
->dev
,
1797 "No receive handler for client %p on port %d\n",
1798 &mad_agent
->agent
, port_priv
->port_num
);
1803 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
1808 static int validate_mad(const struct ib_mad_hdr
*mad_hdr
,
1809 const struct ib_mad_qp_info
*qp_info
,
1813 u32 qp_num
= qp_info
->qp
->qp_num
;
1815 /* Make sure MAD base version is understood */
1816 if (mad_hdr
->base_version
!= IB_MGMT_BASE_VERSION
&&
1817 (!opa
|| mad_hdr
->base_version
!= OPA_MGMT_BASE_VERSION
)) {
1818 pr_err("MAD received with unsupported base version %d %s\n",
1819 mad_hdr
->base_version
, opa
? "(opa)" : "");
1823 /* Filter SMI packets sent to other than QP0 */
1824 if ((mad_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
1825 (mad_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
1829 /* CM attributes other than ClassPortInfo only use Send method */
1830 if ((mad_hdr
->mgmt_class
== IB_MGMT_CLASS_CM
) &&
1831 (mad_hdr
->attr_id
!= IB_MGMT_CLASSPORTINFO_ATTR_ID
) &&
1832 (mad_hdr
->method
!= IB_MGMT_METHOD_SEND
))
1834 /* Filter GSI packets sent to QP0 */
1843 static int is_rmpp_data_mad(const struct ib_mad_agent_private
*mad_agent_priv
,
1844 const struct ib_mad_hdr
*mad_hdr
)
1846 struct ib_rmpp_mad
*rmpp_mad
;
1848 rmpp_mad
= (struct ib_rmpp_mad
*)mad_hdr
;
1849 return !mad_agent_priv
->agent
.rmpp_version
||
1850 !ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
) ||
1851 !(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
1852 IB_MGMT_RMPP_FLAG_ACTIVE
) ||
1853 (rmpp_mad
->rmpp_hdr
.rmpp_type
== IB_MGMT_RMPP_TYPE_DATA
);
1856 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private
*wr
,
1857 const struct ib_mad_recv_wc
*rwc
)
1859 return ((struct ib_mad_hdr
*)(wr
->send_buf
.mad
))->mgmt_class
==
1860 rwc
->recv_buf
.mad
->mad_hdr
.mgmt_class
;
1863 static inline int rcv_has_same_gid(const struct ib_mad_agent_private
*mad_agent_priv
,
1864 const struct ib_mad_send_wr_private
*wr
,
1865 const struct ib_mad_recv_wc
*rwc
)
1867 struct rdma_ah_attr attr
;
1868 u8 send_resp
, rcv_resp
;
1870 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
1871 u8 port_num
= mad_agent_priv
->agent
.port_num
;
1875 send_resp
= ib_response_mad((struct ib_mad_hdr
*)wr
->send_buf
.mad
);
1876 rcv_resp
= ib_response_mad(&rwc
->recv_buf
.mad
->mad_hdr
);
1878 if (send_resp
== rcv_resp
)
1879 /* both requests, or both responses. GIDs different */
1882 if (rdma_query_ah(wr
->send_buf
.ah
, &attr
))
1883 /* Assume not equal, to avoid false positives. */
1886 has_grh
= !!(rdma_ah_get_ah_flags(&attr
) & IB_AH_GRH
);
1887 if (has_grh
!= !!(rwc
->wc
->wc_flags
& IB_WC_GRH
))
1888 /* one has GID, other does not. Assume different */
1891 if (!send_resp
&& rcv_resp
) {
1892 /* is request/response. */
1894 if (ib_get_cached_lmc(device
, port_num
, &lmc
))
1896 return (!lmc
|| !((rdma_ah_get_path_bits(&attr
) ^
1897 rwc
->wc
->dlid_path_bits
) &
1900 const struct ib_global_route
*grh
=
1901 rdma_ah_read_grh(&attr
);
1903 if (ib_get_cached_gid(device
, port_num
,
1904 grh
->sgid_index
, &sgid
, NULL
))
1906 return !memcmp(sgid
.raw
, rwc
->recv_buf
.grh
->dgid
.raw
,
1912 return rdma_ah_get_dlid(&attr
) == rwc
->wc
->slid
;
1914 return !memcmp(rdma_ah_read_grh(&attr
)->dgid
.raw
,
1915 rwc
->recv_buf
.grh
->sgid
.raw
,
1919 static inline int is_direct(u8
class)
1921 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
);
1924 struct ib_mad_send_wr_private
*
1925 ib_find_send_mad(const struct ib_mad_agent_private
*mad_agent_priv
,
1926 const struct ib_mad_recv_wc
*wc
)
1928 struct ib_mad_send_wr_private
*wr
;
1929 const struct ib_mad_hdr
*mad_hdr
;
1931 mad_hdr
= &wc
->recv_buf
.mad
->mad_hdr
;
1933 list_for_each_entry(wr
, &mad_agent_priv
->wait_list
, agent_list
) {
1934 if ((wr
->tid
== mad_hdr
->tid
) &&
1935 rcv_has_same_class(wr
, wc
) &&
1937 * Don't check GID for direct routed MADs.
1938 * These might have permissive LIDs.
1940 (is_direct(mad_hdr
->mgmt_class
) ||
1941 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1942 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1946 * It's possible to receive the response before we've
1947 * been notified that the send has completed
1949 list_for_each_entry(wr
, &mad_agent_priv
->send_list
, agent_list
) {
1950 if (is_rmpp_data_mad(mad_agent_priv
, wr
->send_buf
.mad
) &&
1951 wr
->tid
== mad_hdr
->tid
&&
1953 rcv_has_same_class(wr
, wc
) &&
1955 * Don't check GID for direct routed MADs.
1956 * These might have permissive LIDs.
1958 (is_direct(mad_hdr
->mgmt_class
) ||
1959 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1960 /* Verify request has not been canceled */
1961 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1966 void ib_mark_mad_done(struct ib_mad_send_wr_private
*mad_send_wr
)
1968 mad_send_wr
->timeout
= 0;
1969 if (mad_send_wr
->refcount
== 1)
1970 list_move_tail(&mad_send_wr
->agent_list
,
1971 &mad_send_wr
->mad_agent_priv
->done_list
);
1974 static void ib_mad_complete_recv(struct ib_mad_agent_private
*mad_agent_priv
,
1975 struct ib_mad_recv_wc
*mad_recv_wc
)
1977 struct ib_mad_send_wr_private
*mad_send_wr
;
1978 struct ib_mad_send_wc mad_send_wc
;
1979 unsigned long flags
;
1982 INIT_LIST_HEAD(&mad_recv_wc
->rmpp_list
);
1983 ret
= ib_mad_enforce_security(mad_agent_priv
,
1984 mad_recv_wc
->wc
->pkey_index
);
1986 ib_free_recv_mad(mad_recv_wc
);
1987 deref_mad_agent(mad_agent_priv
);
1991 list_add(&mad_recv_wc
->recv_buf
.list
, &mad_recv_wc
->rmpp_list
);
1992 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
1993 mad_recv_wc
= ib_process_rmpp_recv_wc(mad_agent_priv
,
1996 deref_mad_agent(mad_agent_priv
);
2001 /* Complete corresponding request */
2002 if (ib_response_mad(&mad_recv_wc
->recv_buf
.mad
->mad_hdr
)) {
2003 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2004 mad_send_wr
= ib_find_send_mad(mad_agent_priv
, mad_recv_wc
);
2006 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2007 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)
2008 && ib_is_mad_class_rmpp(mad_recv_wc
->recv_buf
.mad
->mad_hdr
.mgmt_class
)
2009 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
)->rmpp_hdr
)
2010 & IB_MGMT_RMPP_FLAG_ACTIVE
)) {
2011 /* user rmpp is in effect
2012 * and this is an active RMPP MAD
2014 mad_agent_priv
->agent
.recv_handler(
2015 &mad_agent_priv
->agent
, NULL
,
2017 atomic_dec(&mad_agent_priv
->refcount
);
2019 /* not user rmpp, revert to normal behavior and
2021 ib_free_recv_mad(mad_recv_wc
);
2022 deref_mad_agent(mad_agent_priv
);
2026 ib_mark_mad_done(mad_send_wr
);
2027 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2029 /* Defined behavior is to complete response before request */
2030 mad_agent_priv
->agent
.recv_handler(
2031 &mad_agent_priv
->agent
,
2032 &mad_send_wr
->send_buf
,
2034 atomic_dec(&mad_agent_priv
->refcount
);
2036 mad_send_wc
.status
= IB_WC_SUCCESS
;
2037 mad_send_wc
.vendor_err
= 0;
2038 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2039 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
2042 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
, NULL
,
2044 deref_mad_agent(mad_agent_priv
);
2050 static enum smi_action
handle_ib_smi(const struct ib_mad_port_private
*port_priv
,
2051 const struct ib_mad_qp_info
*qp_info
,
2052 const struct ib_wc
*wc
,
2054 struct ib_mad_private
*recv
,
2055 struct ib_mad_private
*response
)
2057 enum smi_forward_action retsmi
;
2058 struct ib_smp
*smp
= (struct ib_smp
*)recv
->mad
;
2060 if (smi_handle_dr_smp_recv(smp
,
2061 rdma_cap_ib_switch(port_priv
->device
),
2063 port_priv
->device
->phys_port_cnt
) ==
2065 return IB_SMI_DISCARD
;
2067 retsmi
= smi_check_forward_dr_smp(smp
);
2068 if (retsmi
== IB_SMI_LOCAL
)
2069 return IB_SMI_HANDLE
;
2071 if (retsmi
== IB_SMI_SEND
) { /* don't forward */
2072 if (smi_handle_dr_smp_send(smp
,
2073 rdma_cap_ib_switch(port_priv
->device
),
2074 port_num
) == IB_SMI_DISCARD
)
2075 return IB_SMI_DISCARD
;
2077 if (smi_check_local_smp(smp
, port_priv
->device
) == IB_SMI_DISCARD
)
2078 return IB_SMI_DISCARD
;
2079 } else if (rdma_cap_ib_switch(port_priv
->device
)) {
2080 /* forward case for switches */
2081 memcpy(response
, recv
, mad_priv_size(response
));
2082 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
2083 response
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)response
->mad
;
2084 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
2086 agent_send_response((const struct ib_mad_hdr
*)response
->mad
,
2089 smi_get_fwd_port(smp
),
2090 qp_info
->qp
->qp_num
,
2094 return IB_SMI_DISCARD
;
2096 return IB_SMI_HANDLE
;
2099 static bool generate_unmatched_resp(const struct ib_mad_private
*recv
,
2100 struct ib_mad_private
*response
,
2101 size_t *resp_len
, bool opa
)
2103 const struct ib_mad_hdr
*recv_hdr
= (const struct ib_mad_hdr
*)recv
->mad
;
2104 struct ib_mad_hdr
*resp_hdr
= (struct ib_mad_hdr
*)response
->mad
;
2106 if (recv_hdr
->method
== IB_MGMT_METHOD_GET
||
2107 recv_hdr
->method
== IB_MGMT_METHOD_SET
) {
2108 memcpy(response
, recv
, mad_priv_size(response
));
2109 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
2110 response
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)response
->mad
;
2111 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
2112 resp_hdr
->method
= IB_MGMT_METHOD_GET_RESP
;
2113 resp_hdr
->status
= cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB
);
2114 if (recv_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
2115 resp_hdr
->status
|= IB_SMP_DIRECTION
;
2117 if (opa
&& recv_hdr
->base_version
== OPA_MGMT_BASE_VERSION
) {
2118 if (recv_hdr
->mgmt_class
==
2119 IB_MGMT_CLASS_SUBN_LID_ROUTED
||
2120 recv_hdr
->mgmt_class
==
2121 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
2122 *resp_len
= opa_get_smp_header_size(
2123 (struct opa_smp
*)recv
->mad
);
2125 *resp_len
= sizeof(struct ib_mad_hdr
);
2134 static enum smi_action
2135 handle_opa_smi(struct ib_mad_port_private
*port_priv
,
2136 struct ib_mad_qp_info
*qp_info
,
2139 struct ib_mad_private
*recv
,
2140 struct ib_mad_private
*response
)
2142 enum smi_forward_action retsmi
;
2143 struct opa_smp
*smp
= (struct opa_smp
*)recv
->mad
;
2145 if (opa_smi_handle_dr_smp_recv(smp
,
2146 rdma_cap_ib_switch(port_priv
->device
),
2148 port_priv
->device
->phys_port_cnt
) ==
2150 return IB_SMI_DISCARD
;
2152 retsmi
= opa_smi_check_forward_dr_smp(smp
);
2153 if (retsmi
== IB_SMI_LOCAL
)
2154 return IB_SMI_HANDLE
;
2156 if (retsmi
== IB_SMI_SEND
) { /* don't forward */
2157 if (opa_smi_handle_dr_smp_send(smp
,
2158 rdma_cap_ib_switch(port_priv
->device
),
2159 port_num
) == IB_SMI_DISCARD
)
2160 return IB_SMI_DISCARD
;
2162 if (opa_smi_check_local_smp(smp
, port_priv
->device
) ==
2164 return IB_SMI_DISCARD
;
2166 } else if (rdma_cap_ib_switch(port_priv
->device
)) {
2167 /* forward case for switches */
2168 memcpy(response
, recv
, mad_priv_size(response
));
2169 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
2170 response
->header
.recv_wc
.recv_buf
.opa_mad
=
2171 (struct opa_mad
*)response
->mad
;
2172 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
2174 agent_send_response((const struct ib_mad_hdr
*)response
->mad
,
2177 opa_smi_get_fwd_port(smp
),
2178 qp_info
->qp
->qp_num
,
2179 recv
->header
.wc
.byte_len
,
2182 return IB_SMI_DISCARD
;
2185 return IB_SMI_HANDLE
;
2188 static enum smi_action
2189 handle_smi(struct ib_mad_port_private
*port_priv
,
2190 struct ib_mad_qp_info
*qp_info
,
2193 struct ib_mad_private
*recv
,
2194 struct ib_mad_private
*response
,
2197 struct ib_mad_hdr
*mad_hdr
= (struct ib_mad_hdr
*)recv
->mad
;
2199 if (opa
&& mad_hdr
->base_version
== OPA_MGMT_BASE_VERSION
&&
2200 mad_hdr
->class_version
== OPA_SM_CLASS_VERSION
)
2201 return handle_opa_smi(port_priv
, qp_info
, wc
, port_num
, recv
,
2204 return handle_ib_smi(port_priv
, qp_info
, wc
, port_num
, recv
, response
);
2207 static void ib_mad_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
2209 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2210 struct ib_mad_list_head
*mad_list
=
2211 container_of(wc
->wr_cqe
, struct ib_mad_list_head
, cqe
);
2212 struct ib_mad_qp_info
*qp_info
;
2213 struct ib_mad_private_header
*mad_priv_hdr
;
2214 struct ib_mad_private
*recv
, *response
= NULL
;
2215 struct ib_mad_agent_private
*mad_agent
;
2217 int ret
= IB_MAD_RESULT_SUCCESS
;
2219 u16 resp_mad_pkey_index
= 0;
2222 if (list_empty_careful(&port_priv
->port_list
))
2225 if (wc
->status
!= IB_WC_SUCCESS
) {
2227 * Receive errors indicate that the QP has entered the error
2228 * state - error handling/shutdown code will cleanup
2233 qp_info
= mad_list
->mad_queue
->qp_info
;
2234 dequeue_mad(mad_list
);
2236 opa
= rdma_cap_opa_mad(qp_info
->port_priv
->device
,
2237 qp_info
->port_priv
->port_num
);
2239 mad_priv_hdr
= container_of(mad_list
, struct ib_mad_private_header
,
2241 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
, header
);
2242 ib_dma_unmap_single(port_priv
->device
,
2243 recv
->header
.mapping
,
2244 mad_priv_dma_size(recv
),
2247 /* Setup MAD receive work completion from "normal" work completion */
2248 recv
->header
.wc
= *wc
;
2249 recv
->header
.recv_wc
.wc
= &recv
->header
.wc
;
2251 if (opa
&& ((struct ib_mad_hdr
*)(recv
->mad
))->base_version
== OPA_MGMT_BASE_VERSION
) {
2252 recv
->header
.recv_wc
.mad_len
= wc
->byte_len
- sizeof(struct ib_grh
);
2253 recv
->header
.recv_wc
.mad_seg_size
= sizeof(struct opa_mad
);
2255 recv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
2256 recv
->header
.recv_wc
.mad_seg_size
= sizeof(struct ib_mad
);
2259 recv
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)recv
->mad
;
2260 recv
->header
.recv_wc
.recv_buf
.grh
= &recv
->grh
;
2262 if (atomic_read(&qp_info
->snoop_count
))
2263 snoop_recv(qp_info
, &recv
->header
.recv_wc
, IB_MAD_SNOOP_RECVS
);
2266 if (!validate_mad((const struct ib_mad_hdr
*)recv
->mad
, qp_info
, opa
))
2269 mad_size
= recv
->mad_size
;
2270 response
= alloc_mad_private(mad_size
, GFP_KERNEL
);
2274 if (rdma_cap_ib_switch(port_priv
->device
))
2275 port_num
= wc
->port_num
;
2277 port_num
= port_priv
->port_num
;
2279 if (((struct ib_mad_hdr
*)recv
->mad
)->mgmt_class
==
2280 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
2281 if (handle_smi(port_priv
, qp_info
, wc
, port_num
, recv
,
2287 /* Give driver "right of first refusal" on incoming MAD */
2288 if (port_priv
->device
->process_mad
) {
2289 ret
= port_priv
->device
->process_mad(port_priv
->device
, 0,
2290 port_priv
->port_num
,
2292 (const struct ib_mad_hdr
*)recv
->mad
,
2294 (struct ib_mad_hdr
*)response
->mad
,
2295 &mad_size
, &resp_mad_pkey_index
);
2298 wc
->pkey_index
= resp_mad_pkey_index
;
2300 if (ret
& IB_MAD_RESULT_SUCCESS
) {
2301 if (ret
& IB_MAD_RESULT_CONSUMED
)
2303 if (ret
& IB_MAD_RESULT_REPLY
) {
2304 agent_send_response((const struct ib_mad_hdr
*)response
->mad
,
2308 qp_info
->qp
->qp_num
,
2315 mad_agent
= find_mad_agent(port_priv
, (const struct ib_mad_hdr
*)recv
->mad
);
2317 ib_mad_complete_recv(mad_agent
, &recv
->header
.recv_wc
);
2319 * recv is freed up in error cases in ib_mad_complete_recv
2320 * or via recv_handler in ib_mad_complete_recv()
2323 } else if ((ret
& IB_MAD_RESULT_SUCCESS
) &&
2324 generate_unmatched_resp(recv
, response
, &mad_size
, opa
)) {
2325 agent_send_response((const struct ib_mad_hdr
*)response
->mad
, &recv
->grh
, wc
,
2326 port_priv
->device
, port_num
,
2327 qp_info
->qp
->qp_num
, mad_size
, opa
);
2331 /* Post another receive request for this QP */
2333 ib_mad_post_receive_mads(qp_info
, response
);
2336 ib_mad_post_receive_mads(qp_info
, recv
);
2339 static void adjust_timeout(struct ib_mad_agent_private
*mad_agent_priv
)
2341 struct ib_mad_send_wr_private
*mad_send_wr
;
2342 unsigned long delay
;
2344 if (list_empty(&mad_agent_priv
->wait_list
)) {
2345 cancel_delayed_work(&mad_agent_priv
->timed_work
);
2347 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2348 struct ib_mad_send_wr_private
,
2351 if (time_after(mad_agent_priv
->timeout
,
2352 mad_send_wr
->timeout
)) {
2353 mad_agent_priv
->timeout
= mad_send_wr
->timeout
;
2354 delay
= mad_send_wr
->timeout
- jiffies
;
2355 if ((long)delay
<= 0)
2357 mod_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2358 &mad_agent_priv
->timed_work
, delay
);
2363 static void wait_for_response(struct ib_mad_send_wr_private
*mad_send_wr
)
2365 struct ib_mad_agent_private
*mad_agent_priv
;
2366 struct ib_mad_send_wr_private
*temp_mad_send_wr
;
2367 struct list_head
*list_item
;
2368 unsigned long delay
;
2370 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2371 list_del(&mad_send_wr
->agent_list
);
2373 delay
= mad_send_wr
->timeout
;
2374 mad_send_wr
->timeout
+= jiffies
;
2377 list_for_each_prev(list_item
, &mad_agent_priv
->wait_list
) {
2378 temp_mad_send_wr
= list_entry(list_item
,
2379 struct ib_mad_send_wr_private
,
2381 if (time_after(mad_send_wr
->timeout
,
2382 temp_mad_send_wr
->timeout
))
2387 list_item
= &mad_agent_priv
->wait_list
;
2388 list_add(&mad_send_wr
->agent_list
, list_item
);
2390 /* Reschedule a work item if we have a shorter timeout */
2391 if (mad_agent_priv
->wait_list
.next
== &mad_send_wr
->agent_list
)
2392 mod_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2393 &mad_agent_priv
->timed_work
, delay
);
2396 void ib_reset_mad_timeout(struct ib_mad_send_wr_private
*mad_send_wr
,
2399 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2400 wait_for_response(mad_send_wr
);
2404 * Process a send work completion
2406 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private
*mad_send_wr
,
2407 struct ib_mad_send_wc
*mad_send_wc
)
2409 struct ib_mad_agent_private
*mad_agent_priv
;
2410 unsigned long flags
;
2413 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2414 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2415 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
2416 ret
= ib_process_rmpp_send_wc(mad_send_wr
, mad_send_wc
);
2417 if (ret
== IB_RMPP_RESULT_CONSUMED
)
2420 ret
= IB_RMPP_RESULT_UNHANDLED
;
2422 if (mad_send_wc
->status
!= IB_WC_SUCCESS
&&
2423 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2424 mad_send_wr
->status
= mad_send_wc
->status
;
2425 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2428 if (--mad_send_wr
->refcount
> 0) {
2429 if (mad_send_wr
->refcount
== 1 && mad_send_wr
->timeout
&&
2430 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2431 wait_for_response(mad_send_wr
);
2436 /* Remove send from MAD agent and notify client of completion */
2437 list_del(&mad_send_wr
->agent_list
);
2438 adjust_timeout(mad_agent_priv
);
2439 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2441 if (mad_send_wr
->status
!= IB_WC_SUCCESS
)
2442 mad_send_wc
->status
= mad_send_wr
->status
;
2443 if (ret
== IB_RMPP_RESULT_INTERNAL
)
2444 ib_rmpp_send_handler(mad_send_wc
);
2446 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2449 /* Release reference on agent taken when sending */
2450 deref_mad_agent(mad_agent_priv
);
2453 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2456 static void ib_mad_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
2458 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2459 struct ib_mad_list_head
*mad_list
=
2460 container_of(wc
->wr_cqe
, struct ib_mad_list_head
, cqe
);
2461 struct ib_mad_send_wr_private
*mad_send_wr
, *queued_send_wr
;
2462 struct ib_mad_qp_info
*qp_info
;
2463 struct ib_mad_queue
*send_queue
;
2464 struct ib_send_wr
*bad_send_wr
;
2465 struct ib_mad_send_wc mad_send_wc
;
2466 unsigned long flags
;
2469 if (list_empty_careful(&port_priv
->port_list
))
2472 if (wc
->status
!= IB_WC_SUCCESS
) {
2473 if (!ib_mad_send_error(port_priv
, wc
))
2477 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2479 send_queue
= mad_list
->mad_queue
;
2480 qp_info
= send_queue
->qp_info
;
2483 ib_dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
,
2484 mad_send_wr
->header_mapping
,
2485 mad_send_wr
->sg_list
[0].length
, DMA_TO_DEVICE
);
2486 ib_dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
,
2487 mad_send_wr
->payload_mapping
,
2488 mad_send_wr
->sg_list
[1].length
, DMA_TO_DEVICE
);
2489 queued_send_wr
= NULL
;
2490 spin_lock_irqsave(&send_queue
->lock
, flags
);
2491 list_del(&mad_list
->list
);
2493 /* Move queued send to the send queue */
2494 if (send_queue
->count
-- > send_queue
->max_active
) {
2495 mad_list
= container_of(qp_info
->overflow_list
.next
,
2496 struct ib_mad_list_head
, list
);
2497 queued_send_wr
= container_of(mad_list
,
2498 struct ib_mad_send_wr_private
,
2500 list_move_tail(&mad_list
->list
, &send_queue
->list
);
2502 spin_unlock_irqrestore(&send_queue
->lock
, flags
);
2504 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2505 mad_send_wc
.status
= wc
->status
;
2506 mad_send_wc
.vendor_err
= wc
->vendor_err
;
2507 if (atomic_read(&qp_info
->snoop_count
))
2508 snoop_send(qp_info
, &mad_send_wr
->send_buf
, &mad_send_wc
,
2509 IB_MAD_SNOOP_SEND_COMPLETIONS
);
2510 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
2512 if (queued_send_wr
) {
2513 ret
= ib_post_send(qp_info
->qp
, &queued_send_wr
->send_wr
.wr
,
2516 dev_err(&port_priv
->device
->dev
,
2517 "ib_post_send failed: %d\n", ret
);
2518 mad_send_wr
= queued_send_wr
;
2519 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
2525 static void mark_sends_for_retry(struct ib_mad_qp_info
*qp_info
)
2527 struct ib_mad_send_wr_private
*mad_send_wr
;
2528 struct ib_mad_list_head
*mad_list
;
2529 unsigned long flags
;
2531 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
2532 list_for_each_entry(mad_list
, &qp_info
->send_queue
.list
, list
) {
2533 mad_send_wr
= container_of(mad_list
,
2534 struct ib_mad_send_wr_private
,
2536 mad_send_wr
->retry
= 1;
2538 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
2541 static bool ib_mad_send_error(struct ib_mad_port_private
*port_priv
,
2544 struct ib_mad_list_head
*mad_list
=
2545 container_of(wc
->wr_cqe
, struct ib_mad_list_head
, cqe
);
2546 struct ib_mad_qp_info
*qp_info
= mad_list
->mad_queue
->qp_info
;
2547 struct ib_mad_send_wr_private
*mad_send_wr
;
2551 * Send errors will transition the QP to SQE - move
2552 * QP to RTS and repost flushed work requests
2554 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2556 if (wc
->status
== IB_WC_WR_FLUSH_ERR
) {
2557 if (mad_send_wr
->retry
) {
2559 struct ib_send_wr
*bad_send_wr
;
2561 mad_send_wr
->retry
= 0;
2562 ret
= ib_post_send(qp_info
->qp
, &mad_send_wr
->send_wr
.wr
,
2568 struct ib_qp_attr
*attr
;
2570 /* Transition QP to RTS and fail offending send */
2571 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2573 attr
->qp_state
= IB_QPS_RTS
;
2574 attr
->cur_qp_state
= IB_QPS_SQE
;
2575 ret
= ib_modify_qp(qp_info
->qp
, attr
,
2576 IB_QP_STATE
| IB_QP_CUR_STATE
);
2579 dev_err(&port_priv
->device
->dev
,
2580 "%s - ib_modify_qp to RTS: %d\n",
2583 mark_sends_for_retry(qp_info
);
2590 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
)
2592 unsigned long flags
;
2593 struct ib_mad_send_wr_private
*mad_send_wr
, *temp_mad_send_wr
;
2594 struct ib_mad_send_wc mad_send_wc
;
2595 struct list_head cancel_list
;
2597 INIT_LIST_HEAD(&cancel_list
);
2599 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2600 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2601 &mad_agent_priv
->send_list
, agent_list
) {
2602 if (mad_send_wr
->status
== IB_WC_SUCCESS
) {
2603 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2604 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2608 /* Empty wait list to prevent receives from finding a request */
2609 list_splice_init(&mad_agent_priv
->wait_list
, &cancel_list
);
2610 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2612 /* Report all cancelled requests */
2613 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
2614 mad_send_wc
.vendor_err
= 0;
2616 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2617 &cancel_list
, agent_list
) {
2618 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2619 list_del(&mad_send_wr
->agent_list
);
2620 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2622 atomic_dec(&mad_agent_priv
->refcount
);
2626 static struct ib_mad_send_wr_private
*
2627 find_send_wr(struct ib_mad_agent_private
*mad_agent_priv
,
2628 struct ib_mad_send_buf
*send_buf
)
2630 struct ib_mad_send_wr_private
*mad_send_wr
;
2632 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
2634 if (&mad_send_wr
->send_buf
== send_buf
)
2638 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
2640 if (is_rmpp_data_mad(mad_agent_priv
,
2641 mad_send_wr
->send_buf
.mad
) &&
2642 &mad_send_wr
->send_buf
== send_buf
)
2648 int ib_modify_mad(struct ib_mad_agent
*mad_agent
,
2649 struct ib_mad_send_buf
*send_buf
, u32 timeout_ms
)
2651 struct ib_mad_agent_private
*mad_agent_priv
;
2652 struct ib_mad_send_wr_private
*mad_send_wr
;
2653 unsigned long flags
;
2656 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
2658 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2659 mad_send_wr
= find_send_wr(mad_agent_priv
, send_buf
);
2660 if (!mad_send_wr
|| mad_send_wr
->status
!= IB_WC_SUCCESS
) {
2661 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2665 active
= (!mad_send_wr
->timeout
|| mad_send_wr
->refcount
> 1);
2667 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2668 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2671 mad_send_wr
->send_buf
.timeout_ms
= timeout_ms
;
2673 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2675 ib_reset_mad_timeout(mad_send_wr
, timeout_ms
);
2677 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2680 EXPORT_SYMBOL(ib_modify_mad
);
2682 void ib_cancel_mad(struct ib_mad_agent
*mad_agent
,
2683 struct ib_mad_send_buf
*send_buf
)
2685 ib_modify_mad(mad_agent
, send_buf
, 0);
2687 EXPORT_SYMBOL(ib_cancel_mad
);
2689 static void local_completions(struct work_struct
*work
)
2691 struct ib_mad_agent_private
*mad_agent_priv
;
2692 struct ib_mad_local_private
*local
;
2693 struct ib_mad_agent_private
*recv_mad_agent
;
2694 unsigned long flags
;
2697 struct ib_mad_send_wc mad_send_wc
;
2701 container_of(work
, struct ib_mad_agent_private
, local_work
);
2703 opa
= rdma_cap_opa_mad(mad_agent_priv
->qp_info
->port_priv
->device
,
2704 mad_agent_priv
->qp_info
->port_priv
->port_num
);
2706 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2707 while (!list_empty(&mad_agent_priv
->local_list
)) {
2708 local
= list_entry(mad_agent_priv
->local_list
.next
,
2709 struct ib_mad_local_private
,
2711 list_del(&local
->completion_list
);
2712 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2714 if (local
->mad_priv
) {
2716 recv_mad_agent
= local
->recv_mad_agent
;
2717 if (!recv_mad_agent
) {
2718 dev_err(&mad_agent_priv
->agent
.device
->dev
,
2719 "No receive MAD agent for local completion\n");
2721 goto local_send_completion
;
2725 * Defined behavior is to complete response
2728 build_smp_wc(recv_mad_agent
->agent
.qp
,
2729 local
->mad_send_wr
->send_wr
.wr
.wr_cqe
,
2730 be16_to_cpu(IB_LID_PERMISSIVE
),
2731 local
->mad_send_wr
->send_wr
.pkey_index
,
2732 recv_mad_agent
->agent
.port_num
, &wc
);
2734 local
->mad_priv
->header
.recv_wc
.wc
= &wc
;
2736 base_version
= ((struct ib_mad_hdr
*)(local
->mad_priv
->mad
))->base_version
;
2737 if (opa
&& base_version
== OPA_MGMT_BASE_VERSION
) {
2738 local
->mad_priv
->header
.recv_wc
.mad_len
= local
->return_wc_byte_len
;
2739 local
->mad_priv
->header
.recv_wc
.mad_seg_size
= sizeof(struct opa_mad
);
2741 local
->mad_priv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
2742 local
->mad_priv
->header
.recv_wc
.mad_seg_size
= sizeof(struct ib_mad
);
2745 INIT_LIST_HEAD(&local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2746 list_add(&local
->mad_priv
->header
.recv_wc
.recv_buf
.list
,
2747 &local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2748 local
->mad_priv
->header
.recv_wc
.recv_buf
.grh
= NULL
;
2749 local
->mad_priv
->header
.recv_wc
.recv_buf
.mad
=
2750 (struct ib_mad
*)local
->mad_priv
->mad
;
2751 if (atomic_read(&recv_mad_agent
->qp_info
->snoop_count
))
2752 snoop_recv(recv_mad_agent
->qp_info
,
2753 &local
->mad_priv
->header
.recv_wc
,
2754 IB_MAD_SNOOP_RECVS
);
2755 recv_mad_agent
->agent
.recv_handler(
2756 &recv_mad_agent
->agent
,
2757 &local
->mad_send_wr
->send_buf
,
2758 &local
->mad_priv
->header
.recv_wc
);
2759 spin_lock_irqsave(&recv_mad_agent
->lock
, flags
);
2760 atomic_dec(&recv_mad_agent
->refcount
);
2761 spin_unlock_irqrestore(&recv_mad_agent
->lock
, flags
);
2764 local_send_completion
:
2766 mad_send_wc
.status
= IB_WC_SUCCESS
;
2767 mad_send_wc
.vendor_err
= 0;
2768 mad_send_wc
.send_buf
= &local
->mad_send_wr
->send_buf
;
2769 if (atomic_read(&mad_agent_priv
->qp_info
->snoop_count
))
2770 snoop_send(mad_agent_priv
->qp_info
,
2771 &local
->mad_send_wr
->send_buf
,
2772 &mad_send_wc
, IB_MAD_SNOOP_SEND_COMPLETIONS
);
2773 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2776 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2777 atomic_dec(&mad_agent_priv
->refcount
);
2779 kfree(local
->mad_priv
);
2782 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2785 static int retry_send(struct ib_mad_send_wr_private
*mad_send_wr
)
2789 if (!mad_send_wr
->retries_left
)
2792 mad_send_wr
->retries_left
--;
2793 mad_send_wr
->send_buf
.retries
++;
2795 mad_send_wr
->timeout
= msecs_to_jiffies(mad_send_wr
->send_buf
.timeout_ms
);
2797 if (ib_mad_kernel_rmpp_agent(&mad_send_wr
->mad_agent_priv
->agent
)) {
2798 ret
= ib_retry_rmpp(mad_send_wr
);
2800 case IB_RMPP_RESULT_UNHANDLED
:
2801 ret
= ib_send_mad(mad_send_wr
);
2803 case IB_RMPP_RESULT_CONSUMED
:
2811 ret
= ib_send_mad(mad_send_wr
);
2814 mad_send_wr
->refcount
++;
2815 list_add_tail(&mad_send_wr
->agent_list
,
2816 &mad_send_wr
->mad_agent_priv
->send_list
);
2821 static void timeout_sends(struct work_struct
*work
)
2823 struct ib_mad_agent_private
*mad_agent_priv
;
2824 struct ib_mad_send_wr_private
*mad_send_wr
;
2825 struct ib_mad_send_wc mad_send_wc
;
2826 unsigned long flags
, delay
;
2828 mad_agent_priv
= container_of(work
, struct ib_mad_agent_private
,
2830 mad_send_wc
.vendor_err
= 0;
2832 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2833 while (!list_empty(&mad_agent_priv
->wait_list
)) {
2834 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2835 struct ib_mad_send_wr_private
,
2838 if (time_after(mad_send_wr
->timeout
, jiffies
)) {
2839 delay
= mad_send_wr
->timeout
- jiffies
;
2840 if ((long)delay
<= 0)
2842 queue_delayed_work(mad_agent_priv
->qp_info
->
2844 &mad_agent_priv
->timed_work
, delay
);
2848 list_del(&mad_send_wr
->agent_list
);
2849 if (mad_send_wr
->status
== IB_WC_SUCCESS
&&
2850 !retry_send(mad_send_wr
))
2853 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2855 if (mad_send_wr
->status
== IB_WC_SUCCESS
)
2856 mad_send_wc
.status
= IB_WC_RESP_TIMEOUT_ERR
;
2858 mad_send_wc
.status
= mad_send_wr
->status
;
2859 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2860 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2863 atomic_dec(&mad_agent_priv
->refcount
);
2864 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2866 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2870 * Allocate receive MADs and post receive WRs for them
2872 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
2873 struct ib_mad_private
*mad
)
2875 unsigned long flags
;
2877 struct ib_mad_private
*mad_priv
;
2878 struct ib_sge sg_list
;
2879 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
2880 struct ib_mad_queue
*recv_queue
= &qp_info
->recv_queue
;
2882 /* Initialize common scatter list fields */
2883 sg_list
.lkey
= qp_info
->port_priv
->pd
->local_dma_lkey
;
2885 /* Initialize common receive WR fields */
2886 recv_wr
.next
= NULL
;
2887 recv_wr
.sg_list
= &sg_list
;
2888 recv_wr
.num_sge
= 1;
2891 /* Allocate and map receive buffer */
2896 mad_priv
= alloc_mad_private(port_mad_size(qp_info
->port_priv
),
2903 sg_list
.length
= mad_priv_dma_size(mad_priv
);
2904 sg_list
.addr
= ib_dma_map_single(qp_info
->port_priv
->device
,
2906 mad_priv_dma_size(mad_priv
),
2908 if (unlikely(ib_dma_mapping_error(qp_info
->port_priv
->device
,
2913 mad_priv
->header
.mapping
= sg_list
.addr
;
2914 mad_priv
->header
.mad_list
.mad_queue
= recv_queue
;
2915 mad_priv
->header
.mad_list
.cqe
.done
= ib_mad_recv_done
;
2916 recv_wr
.wr_cqe
= &mad_priv
->header
.mad_list
.cqe
;
2918 /* Post receive WR */
2919 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2920 post
= (++recv_queue
->count
< recv_queue
->max_active
);
2921 list_add_tail(&mad_priv
->header
.mad_list
.list
, &recv_queue
->list
);
2922 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2923 ret
= ib_post_recv(qp_info
->qp
, &recv_wr
, &bad_recv_wr
);
2925 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2926 list_del(&mad_priv
->header
.mad_list
.list
);
2927 recv_queue
->count
--;
2928 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2929 ib_dma_unmap_single(qp_info
->port_priv
->device
,
2930 mad_priv
->header
.mapping
,
2931 mad_priv_dma_size(mad_priv
),
2934 dev_err(&qp_info
->port_priv
->device
->dev
,
2935 "ib_post_recv failed: %d\n", ret
);
2944 * Return all the posted receive MADs
2946 static void cleanup_recv_queue(struct ib_mad_qp_info
*qp_info
)
2948 struct ib_mad_private_header
*mad_priv_hdr
;
2949 struct ib_mad_private
*recv
;
2950 struct ib_mad_list_head
*mad_list
;
2955 while (!list_empty(&qp_info
->recv_queue
.list
)) {
2957 mad_list
= list_entry(qp_info
->recv_queue
.list
.next
,
2958 struct ib_mad_list_head
, list
);
2959 mad_priv_hdr
= container_of(mad_list
,
2960 struct ib_mad_private_header
,
2962 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
2965 /* Remove from posted receive MAD list */
2966 list_del(&mad_list
->list
);
2968 ib_dma_unmap_single(qp_info
->port_priv
->device
,
2969 recv
->header
.mapping
,
2970 mad_priv_dma_size(recv
),
2975 qp_info
->recv_queue
.count
= 0;
2981 static int ib_mad_port_start(struct ib_mad_port_private
*port_priv
)
2984 struct ib_qp_attr
*attr
;
2988 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2992 ret
= ib_find_pkey(port_priv
->device
, port_priv
->port_num
,
2993 IB_DEFAULT_PKEY_FULL
, &pkey_index
);
2997 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2998 qp
= port_priv
->qp_info
[i
].qp
;
3003 * PKey index for QP1 is irrelevant but
3004 * one is needed for the Reset to Init transition
3006 attr
->qp_state
= IB_QPS_INIT
;
3007 attr
->pkey_index
= pkey_index
;
3008 attr
->qkey
= (qp
->qp_num
== 0) ? 0 : IB_QP1_QKEY
;
3009 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
|
3010 IB_QP_PKEY_INDEX
| IB_QP_QKEY
);
3012 dev_err(&port_priv
->device
->dev
,
3013 "Couldn't change QP%d state to INIT: %d\n",
3018 attr
->qp_state
= IB_QPS_RTR
;
3019 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
);
3021 dev_err(&port_priv
->device
->dev
,
3022 "Couldn't change QP%d state to RTR: %d\n",
3027 attr
->qp_state
= IB_QPS_RTS
;
3028 attr
->sq_psn
= IB_MAD_SEND_Q_PSN
;
3029 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
3031 dev_err(&port_priv
->device
->dev
,
3032 "Couldn't change QP%d state to RTS: %d\n",
3038 ret
= ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
3040 dev_err(&port_priv
->device
->dev
,
3041 "Failed to request completion notification: %d\n",
3046 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
3047 if (!port_priv
->qp_info
[i
].qp
)
3050 ret
= ib_mad_post_receive_mads(&port_priv
->qp_info
[i
], NULL
);
3052 dev_err(&port_priv
->device
->dev
,
3053 "Couldn't post receive WRs\n");
3062 static void qp_event_handler(struct ib_event
*event
, void *qp_context
)
3064 struct ib_mad_qp_info
*qp_info
= qp_context
;
3066 /* It's worse than that! He's dead, Jim! */
3067 dev_err(&qp_info
->port_priv
->device
->dev
,
3068 "Fatal error (%d) on MAD QP (%d)\n",
3069 event
->event
, qp_info
->qp
->qp_num
);
3072 static void init_mad_queue(struct ib_mad_qp_info
*qp_info
,
3073 struct ib_mad_queue
*mad_queue
)
3075 mad_queue
->qp_info
= qp_info
;
3076 mad_queue
->count
= 0;
3077 spin_lock_init(&mad_queue
->lock
);
3078 INIT_LIST_HEAD(&mad_queue
->list
);
3081 static void init_mad_qp(struct ib_mad_port_private
*port_priv
,
3082 struct ib_mad_qp_info
*qp_info
)
3084 qp_info
->port_priv
= port_priv
;
3085 init_mad_queue(qp_info
, &qp_info
->send_queue
);
3086 init_mad_queue(qp_info
, &qp_info
->recv_queue
);
3087 INIT_LIST_HEAD(&qp_info
->overflow_list
);
3088 spin_lock_init(&qp_info
->snoop_lock
);
3089 qp_info
->snoop_table
= NULL
;
3090 qp_info
->snoop_table_size
= 0;
3091 atomic_set(&qp_info
->snoop_count
, 0);
3094 static int create_mad_qp(struct ib_mad_qp_info
*qp_info
,
3095 enum ib_qp_type qp_type
)
3097 struct ib_qp_init_attr qp_init_attr
;
3100 memset(&qp_init_attr
, 0, sizeof qp_init_attr
);
3101 qp_init_attr
.send_cq
= qp_info
->port_priv
->cq
;
3102 qp_init_attr
.recv_cq
= qp_info
->port_priv
->cq
;
3103 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
3104 qp_init_attr
.cap
.max_send_wr
= mad_sendq_size
;
3105 qp_init_attr
.cap
.max_recv_wr
= mad_recvq_size
;
3106 qp_init_attr
.cap
.max_send_sge
= IB_MAD_SEND_REQ_MAX_SG
;
3107 qp_init_attr
.cap
.max_recv_sge
= IB_MAD_RECV_REQ_MAX_SG
;
3108 qp_init_attr
.qp_type
= qp_type
;
3109 qp_init_attr
.port_num
= qp_info
->port_priv
->port_num
;
3110 qp_init_attr
.qp_context
= qp_info
;
3111 qp_init_attr
.event_handler
= qp_event_handler
;
3112 qp_info
->qp
= ib_create_qp(qp_info
->port_priv
->pd
, &qp_init_attr
);
3113 if (IS_ERR(qp_info
->qp
)) {
3114 dev_err(&qp_info
->port_priv
->device
->dev
,
3115 "Couldn't create ib_mad QP%d\n",
3116 get_spl_qp_index(qp_type
));
3117 ret
= PTR_ERR(qp_info
->qp
);
3120 /* Use minimum queue sizes unless the CQ is resized */
3121 qp_info
->send_queue
.max_active
= mad_sendq_size
;
3122 qp_info
->recv_queue
.max_active
= mad_recvq_size
;
3129 static void destroy_mad_qp(struct ib_mad_qp_info
*qp_info
)
3134 ib_destroy_qp(qp_info
->qp
);
3135 kfree(qp_info
->snoop_table
);
3140 * Create the QP, PD, MR, and CQ if needed
3142 static int ib_mad_port_open(struct ib_device
*device
,
3146 struct ib_mad_port_private
*port_priv
;
3147 unsigned long flags
;
3148 char name
[sizeof "ib_mad123"];
3151 if (WARN_ON(rdma_max_mad_size(device
, port_num
) < IB_MGMT_MAD_SIZE
))
3154 if (WARN_ON(rdma_cap_opa_mad(device
, port_num
) &&
3155 rdma_max_mad_size(device
, port_num
) < OPA_MGMT_MAD_SIZE
))
3158 /* Create new device info */
3159 port_priv
= kzalloc(sizeof *port_priv
, GFP_KERNEL
);
3163 port_priv
->device
= device
;
3164 port_priv
->port_num
= port_num
;
3165 spin_lock_init(&port_priv
->reg_lock
);
3166 INIT_LIST_HEAD(&port_priv
->agent_list
);
3167 init_mad_qp(port_priv
, &port_priv
->qp_info
[0]);
3168 init_mad_qp(port_priv
, &port_priv
->qp_info
[1]);
3170 cq_size
= mad_sendq_size
+ mad_recvq_size
;
3171 has_smi
= rdma_cap_ib_smi(device
, port_num
);
3175 port_priv
->pd
= ib_alloc_pd(device
, 0);
3176 if (IS_ERR(port_priv
->pd
)) {
3177 dev_err(&device
->dev
, "Couldn't create ib_mad PD\n");
3178 ret
= PTR_ERR(port_priv
->pd
);
3182 port_priv
->cq
= ib_alloc_cq(port_priv
->device
, port_priv
, cq_size
, 0,
3183 IB_POLL_UNBOUND_WORKQUEUE
);
3184 if (IS_ERR(port_priv
->cq
)) {
3185 dev_err(&device
->dev
, "Couldn't create ib_mad CQ\n");
3186 ret
= PTR_ERR(port_priv
->cq
);
3191 ret
= create_mad_qp(&port_priv
->qp_info
[0], IB_QPT_SMI
);
3195 ret
= create_mad_qp(&port_priv
->qp_info
[1], IB_QPT_GSI
);
3199 snprintf(name
, sizeof name
, "ib_mad%d", port_num
);
3200 port_priv
->wq
= alloc_ordered_workqueue(name
, WQ_MEM_RECLAIM
);
3201 if (!port_priv
->wq
) {
3206 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3207 list_add_tail(&port_priv
->port_list
, &ib_mad_port_list
);
3208 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3210 ret
= ib_mad_port_start(port_priv
);
3212 dev_err(&device
->dev
, "Couldn't start port\n");
3219 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3220 list_del_init(&port_priv
->port_list
);
3221 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3223 destroy_workqueue(port_priv
->wq
);
3225 destroy_mad_qp(&port_priv
->qp_info
[1]);
3227 destroy_mad_qp(&port_priv
->qp_info
[0]);
3229 ib_free_cq(port_priv
->cq
);
3230 cleanup_recv_queue(&port_priv
->qp_info
[1]);
3231 cleanup_recv_queue(&port_priv
->qp_info
[0]);
3233 ib_dealloc_pd(port_priv
->pd
);
3242 * If there are no classes using the port, free the port
3243 * resources (CQ, MR, PD, QP) and remove the port's info structure
3245 static int ib_mad_port_close(struct ib_device
*device
, int port_num
)
3247 struct ib_mad_port_private
*port_priv
;
3248 unsigned long flags
;
3250 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3251 port_priv
= __ib_get_mad_port(device
, port_num
);
3252 if (port_priv
== NULL
) {
3253 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3254 dev_err(&device
->dev
, "Port %d not found\n", port_num
);
3257 list_del_init(&port_priv
->port_list
);
3258 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3260 destroy_workqueue(port_priv
->wq
);
3261 destroy_mad_qp(&port_priv
->qp_info
[1]);
3262 destroy_mad_qp(&port_priv
->qp_info
[0]);
3263 ib_free_cq(port_priv
->cq
);
3264 ib_dealloc_pd(port_priv
->pd
);
3265 cleanup_recv_queue(&port_priv
->qp_info
[1]);
3266 cleanup_recv_queue(&port_priv
->qp_info
[0]);
3267 /* XXX: Handle deallocation of MAD registration tables */
3274 static void ib_mad_init_device(struct ib_device
*device
)
3278 start
= rdma_start_port(device
);
3280 for (i
= start
; i
<= rdma_end_port(device
); i
++) {
3281 if (!rdma_cap_ib_mad(device
, i
))
3284 if (ib_mad_port_open(device
, i
)) {
3285 dev_err(&device
->dev
, "Couldn't open port %d\n", i
);
3288 if (ib_agent_port_open(device
, i
)) {
3289 dev_err(&device
->dev
,
3290 "Couldn't open port %d for agents\n", i
);
3297 if (ib_mad_port_close(device
, i
))
3298 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3301 while (--i
>= start
) {
3302 if (!rdma_cap_ib_mad(device
, i
))
3305 if (ib_agent_port_close(device
, i
))
3306 dev_err(&device
->dev
,
3307 "Couldn't close port %d for agents\n", i
);
3308 if (ib_mad_port_close(device
, i
))
3309 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3313 static void ib_mad_remove_device(struct ib_device
*device
, void *client_data
)
3317 for (i
= rdma_start_port(device
); i
<= rdma_end_port(device
); i
++) {
3318 if (!rdma_cap_ib_mad(device
, i
))
3321 if (ib_agent_port_close(device
, i
))
3322 dev_err(&device
->dev
,
3323 "Couldn't close port %d for agents\n", i
);
3324 if (ib_mad_port_close(device
, i
))
3325 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3329 static struct ib_client mad_client
= {
3331 .add
= ib_mad_init_device
,
3332 .remove
= ib_mad_remove_device
3335 int ib_mad_init(void)
3337 mad_recvq_size
= min(mad_recvq_size
, IB_MAD_QP_MAX_SIZE
);
3338 mad_recvq_size
= max(mad_recvq_size
, IB_MAD_QP_MIN_SIZE
);
3340 mad_sendq_size
= min(mad_sendq_size
, IB_MAD_QP_MAX_SIZE
);
3341 mad_sendq_size
= max(mad_sendq_size
, IB_MAD_QP_MIN_SIZE
);
3343 INIT_LIST_HEAD(&ib_mad_port_list
);
3345 if (ib_register_client(&mad_client
)) {
3346 pr_err("Couldn't register ib_mad client\n");
3353 void ib_mad_cleanup(void)
3355 ib_unregister_client(&mad_client
);