2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/security.h>
44 #include <rdma/ib_cache.h>
47 #include "core_priv.h"
52 #include "core_priv.h"
54 static int mad_sendq_size
= IB_MAD_QP_SEND_SIZE
;
55 static int mad_recvq_size
= IB_MAD_QP_RECV_SIZE
;
57 module_param_named(send_queue_size
, mad_sendq_size
, int, 0444);
58 MODULE_PARM_DESC(send_queue_size
, "Size of send queue in number of work requests");
59 module_param_named(recv_queue_size
, mad_recvq_size
, int, 0444);
60 MODULE_PARM_DESC(recv_queue_size
, "Size of receive queue in number of work requests");
62 static struct list_head ib_mad_port_list
;
63 static atomic_t ib_mad_client_id
= ATOMIC_INIT(0);
66 static DEFINE_SPINLOCK(ib_mad_port_list_lock
);
68 /* Forward declarations */
69 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
70 struct ib_mad_reg_req
*mad_reg_req
);
71 static void remove_mad_reg_req(struct ib_mad_agent_private
*priv
);
72 static struct ib_mad_agent_private
*find_mad_agent(
73 struct ib_mad_port_private
*port_priv
,
74 const struct ib_mad_hdr
*mad
);
75 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
76 struct ib_mad_private
*mad
);
77 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
);
78 static void timeout_sends(struct work_struct
*work
);
79 static void local_completions(struct work_struct
*work
);
80 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
81 struct ib_mad_agent_private
*agent_priv
,
83 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
84 struct ib_mad_agent_private
*agent_priv
);
85 static bool ib_mad_send_error(struct ib_mad_port_private
*port_priv
,
87 static void ib_mad_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
90 * Returns a ib_mad_port_private structure or NULL for a device/port
91 * Assumes ib_mad_port_list_lock is being held
93 static inline struct ib_mad_port_private
*
94 __ib_get_mad_port(struct ib_device
*device
, int port_num
)
96 struct ib_mad_port_private
*entry
;
98 list_for_each_entry(entry
, &ib_mad_port_list
, port_list
) {
99 if (entry
->device
== device
&& entry
->port_num
== port_num
)
106 * Wrapper function to return a ib_mad_port_private structure or NULL
109 static inline struct ib_mad_port_private
*
110 ib_get_mad_port(struct ib_device
*device
, int port_num
)
112 struct ib_mad_port_private
*entry
;
115 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
116 entry
= __ib_get_mad_port(device
, port_num
);
117 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
122 static inline u8
convert_mgmt_class(u8 mgmt_class
)
124 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
125 return mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
?
129 static int get_spl_qp_index(enum ib_qp_type qp_type
)
142 static int vendor_class_index(u8 mgmt_class
)
144 return mgmt_class
- IB_MGMT_CLASS_VENDOR_RANGE2_START
;
147 static int is_vendor_class(u8 mgmt_class
)
149 if ((mgmt_class
< IB_MGMT_CLASS_VENDOR_RANGE2_START
) ||
150 (mgmt_class
> IB_MGMT_CLASS_VENDOR_RANGE2_END
))
155 static int is_vendor_oui(char *oui
)
157 if (oui
[0] || oui
[1] || oui
[2])
162 static int is_vendor_method_in_use(
163 struct ib_mad_mgmt_vendor_class
*vendor_class
,
164 struct ib_mad_reg_req
*mad_reg_req
)
166 struct ib_mad_mgmt_method_table
*method
;
169 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
170 if (!memcmp(vendor_class
->oui
[i
], mad_reg_req
->oui
, 3)) {
171 method
= vendor_class
->method_table
[i
];
173 if (method_in_use(&method
, mad_reg_req
))
183 int ib_response_mad(const struct ib_mad_hdr
*hdr
)
185 return ((hdr
->method
& IB_MGMT_METHOD_RESP
) ||
186 (hdr
->method
== IB_MGMT_METHOD_TRAP_REPRESS
) ||
187 ((hdr
->mgmt_class
== IB_MGMT_CLASS_BM
) &&
188 (hdr
->attr_mod
& IB_BM_ATTR_MOD_RESP
)));
190 EXPORT_SYMBOL(ib_response_mad
);
193 * ib_register_mad_agent - Register to send/receive MADs
195 struct ib_mad_agent
*ib_register_mad_agent(struct ib_device
*device
,
197 enum ib_qp_type qp_type
,
198 struct ib_mad_reg_req
*mad_reg_req
,
200 ib_mad_send_handler send_handler
,
201 ib_mad_recv_handler recv_handler
,
203 u32 registration_flags
)
205 struct ib_mad_port_private
*port_priv
;
206 struct ib_mad_agent
*ret
= ERR_PTR(-EINVAL
);
207 struct ib_mad_agent_private
*mad_agent_priv
;
208 struct ib_mad_reg_req
*reg_req
= NULL
;
209 struct ib_mad_mgmt_class_table
*class;
210 struct ib_mad_mgmt_vendor_class_table
*vendor
;
211 struct ib_mad_mgmt_vendor_class
*vendor_class
;
212 struct ib_mad_mgmt_method_table
*method
;
215 u8 mgmt_class
, vclass
;
217 /* Validate parameters */
218 qpn
= get_spl_qp_index(qp_type
);
220 dev_notice(&device
->dev
,
221 "ib_register_mad_agent: invalid QP Type %d\n",
226 if (rmpp_version
&& rmpp_version
!= IB_MGMT_RMPP_VERSION
) {
227 dev_notice(&device
->dev
,
228 "ib_register_mad_agent: invalid RMPP Version %u\n",
233 /* Validate MAD registration request if supplied */
235 if (mad_reg_req
->mgmt_class_version
>= MAX_MGMT_VERSION
) {
236 dev_notice(&device
->dev
,
237 "ib_register_mad_agent: invalid Class Version %u\n",
238 mad_reg_req
->mgmt_class_version
);
242 dev_notice(&device
->dev
,
243 "ib_register_mad_agent: no recv_handler\n");
246 if (mad_reg_req
->mgmt_class
>= MAX_MGMT_CLASS
) {
248 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
249 * one in this range currently allowed
251 if (mad_reg_req
->mgmt_class
!=
252 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
253 dev_notice(&device
->dev
,
254 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
255 mad_reg_req
->mgmt_class
);
258 } else if (mad_reg_req
->mgmt_class
== 0) {
260 * Class 0 is reserved in IBA and is used for
261 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
263 dev_notice(&device
->dev
,
264 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
266 } else if (is_vendor_class(mad_reg_req
->mgmt_class
)) {
268 * If class is in "new" vendor range,
269 * ensure supplied OUI is not zero
271 if (!is_vendor_oui(mad_reg_req
->oui
)) {
272 dev_notice(&device
->dev
,
273 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
274 mad_reg_req
->mgmt_class
);
278 /* Make sure class supplied is consistent with RMPP */
279 if (!ib_is_mad_class_rmpp(mad_reg_req
->mgmt_class
)) {
281 dev_notice(&device
->dev
,
282 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
283 mad_reg_req
->mgmt_class
);
288 /* Make sure class supplied is consistent with QP type */
289 if (qp_type
== IB_QPT_SMI
) {
290 if ((mad_reg_req
->mgmt_class
!=
291 IB_MGMT_CLASS_SUBN_LID_ROUTED
) &&
292 (mad_reg_req
->mgmt_class
!=
293 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
294 dev_notice(&device
->dev
,
295 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
296 mad_reg_req
->mgmt_class
);
300 if ((mad_reg_req
->mgmt_class
==
301 IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
302 (mad_reg_req
->mgmt_class
==
303 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
304 dev_notice(&device
->dev
,
305 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
306 mad_reg_req
->mgmt_class
);
311 /* No registration request supplied */
314 if (registration_flags
& IB_MAD_USER_RMPP
)
318 /* Validate device and port */
319 port_priv
= ib_get_mad_port(device
, port_num
);
321 dev_notice(&device
->dev
,
322 "ib_register_mad_agent: Invalid port %d\n",
324 ret
= ERR_PTR(-ENODEV
);
328 /* Verify the QP requested is supported. For example, Ethernet devices
329 * will not have QP0 */
330 if (!port_priv
->qp_info
[qpn
].qp
) {
331 dev_notice(&device
->dev
,
332 "ib_register_mad_agent: QP %d not supported\n", qpn
);
333 ret
= ERR_PTR(-EPROTONOSUPPORT
);
337 /* Allocate structures */
338 mad_agent_priv
= kzalloc(sizeof *mad_agent_priv
, GFP_KERNEL
);
339 if (!mad_agent_priv
) {
340 ret
= ERR_PTR(-ENOMEM
);
345 reg_req
= kmemdup(mad_reg_req
, sizeof *reg_req
, GFP_KERNEL
);
347 ret
= ERR_PTR(-ENOMEM
);
352 /* Now, fill in the various structures */
353 mad_agent_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
354 mad_agent_priv
->reg_req
= reg_req
;
355 mad_agent_priv
->agent
.rmpp_version
= rmpp_version
;
356 mad_agent_priv
->agent
.device
= device
;
357 mad_agent_priv
->agent
.recv_handler
= recv_handler
;
358 mad_agent_priv
->agent
.send_handler
= send_handler
;
359 mad_agent_priv
->agent
.context
= context
;
360 mad_agent_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
361 mad_agent_priv
->agent
.port_num
= port_num
;
362 mad_agent_priv
->agent
.flags
= registration_flags
;
363 spin_lock_init(&mad_agent_priv
->lock
);
364 INIT_LIST_HEAD(&mad_agent_priv
->send_list
);
365 INIT_LIST_HEAD(&mad_agent_priv
->wait_list
);
366 INIT_LIST_HEAD(&mad_agent_priv
->done_list
);
367 INIT_LIST_HEAD(&mad_agent_priv
->rmpp_list
);
368 INIT_DELAYED_WORK(&mad_agent_priv
->timed_work
, timeout_sends
);
369 INIT_LIST_HEAD(&mad_agent_priv
->local_list
);
370 INIT_WORK(&mad_agent_priv
->local_work
, local_completions
);
371 atomic_set(&mad_agent_priv
->refcount
, 1);
372 init_completion(&mad_agent_priv
->comp
);
374 ret2
= ib_mad_agent_security_setup(&mad_agent_priv
->agent
, qp_type
);
380 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
381 mad_agent_priv
->agent
.hi_tid
= atomic_inc_return(&ib_mad_client_id
);
384 * Make sure MAD registration (if supplied)
385 * is non overlapping with any existing ones
388 mgmt_class
= convert_mgmt_class(mad_reg_req
->mgmt_class
);
389 if (!is_vendor_class(mgmt_class
)) {
390 class = port_priv
->version
[mad_reg_req
->
391 mgmt_class_version
].class;
393 method
= class->method_table
[mgmt_class
];
395 if (method_in_use(&method
,
400 ret2
= add_nonoui_reg_req(mad_reg_req
, mad_agent_priv
,
403 /* "New" vendor class range */
404 vendor
= port_priv
->version
[mad_reg_req
->
405 mgmt_class_version
].vendor
;
407 vclass
= vendor_class_index(mgmt_class
);
408 vendor_class
= vendor
->vendor_class
[vclass
];
410 if (is_vendor_method_in_use(
416 ret2
= add_oui_reg_req(mad_reg_req
, mad_agent_priv
);
424 /* Add mad agent into port's agent list */
425 list_add_tail(&mad_agent_priv
->agent_list
, &port_priv
->agent_list
);
426 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
428 return &mad_agent_priv
->agent
;
430 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
431 ib_mad_agent_security_cleanup(&mad_agent_priv
->agent
);
435 kfree(mad_agent_priv
);
439 EXPORT_SYMBOL(ib_register_mad_agent
);
441 static inline int is_snooping_sends(int mad_snoop_flags
)
443 return (mad_snoop_flags
&
444 (/*IB_MAD_SNOOP_POSTED_SENDS |
445 IB_MAD_SNOOP_RMPP_SENDS |*/
446 IB_MAD_SNOOP_SEND_COMPLETIONS
/*|
447 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
450 static inline int is_snooping_recvs(int mad_snoop_flags
)
452 return (mad_snoop_flags
&
453 (IB_MAD_SNOOP_RECVS
/*|
454 IB_MAD_SNOOP_RMPP_RECVS*/));
457 static int register_snoop_agent(struct ib_mad_qp_info
*qp_info
,
458 struct ib_mad_snoop_private
*mad_snoop_priv
)
460 struct ib_mad_snoop_private
**new_snoop_table
;
464 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
465 /* Check for empty slot in array. */
466 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++)
467 if (!qp_info
->snoop_table
[i
])
470 if (i
== qp_info
->snoop_table_size
) {
472 new_snoop_table
= krealloc(qp_info
->snoop_table
,
473 sizeof mad_snoop_priv
*
474 (qp_info
->snoop_table_size
+ 1),
476 if (!new_snoop_table
) {
481 qp_info
->snoop_table
= new_snoop_table
;
482 qp_info
->snoop_table_size
++;
484 qp_info
->snoop_table
[i
] = mad_snoop_priv
;
485 atomic_inc(&qp_info
->snoop_count
);
487 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
491 struct ib_mad_agent
*ib_register_mad_snoop(struct ib_device
*device
,
493 enum ib_qp_type qp_type
,
495 ib_mad_snoop_handler snoop_handler
,
496 ib_mad_recv_handler recv_handler
,
499 struct ib_mad_port_private
*port_priv
;
500 struct ib_mad_agent
*ret
;
501 struct ib_mad_snoop_private
*mad_snoop_priv
;
505 /* Validate parameters */
506 if ((is_snooping_sends(mad_snoop_flags
) && !snoop_handler
) ||
507 (is_snooping_recvs(mad_snoop_flags
) && !recv_handler
)) {
508 ret
= ERR_PTR(-EINVAL
);
511 qpn
= get_spl_qp_index(qp_type
);
513 ret
= ERR_PTR(-EINVAL
);
516 port_priv
= ib_get_mad_port(device
, port_num
);
518 ret
= ERR_PTR(-ENODEV
);
521 /* Allocate structures */
522 mad_snoop_priv
= kzalloc(sizeof *mad_snoop_priv
, GFP_KERNEL
);
523 if (!mad_snoop_priv
) {
524 ret
= ERR_PTR(-ENOMEM
);
528 /* Now, fill in the various structures */
529 mad_snoop_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
530 mad_snoop_priv
->agent
.device
= device
;
531 mad_snoop_priv
->agent
.recv_handler
= recv_handler
;
532 mad_snoop_priv
->agent
.snoop_handler
= snoop_handler
;
533 mad_snoop_priv
->agent
.context
= context
;
534 mad_snoop_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
535 mad_snoop_priv
->agent
.port_num
= port_num
;
536 mad_snoop_priv
->mad_snoop_flags
= mad_snoop_flags
;
537 init_completion(&mad_snoop_priv
->comp
);
539 err
= ib_mad_agent_security_setup(&mad_snoop_priv
->agent
, qp_type
);
545 mad_snoop_priv
->snoop_index
= register_snoop_agent(
546 &port_priv
->qp_info
[qpn
],
548 if (mad_snoop_priv
->snoop_index
< 0) {
549 ret
= ERR_PTR(mad_snoop_priv
->snoop_index
);
553 atomic_set(&mad_snoop_priv
->refcount
, 1);
554 return &mad_snoop_priv
->agent
;
556 ib_mad_agent_security_cleanup(&mad_snoop_priv
->agent
);
558 kfree(mad_snoop_priv
);
562 EXPORT_SYMBOL(ib_register_mad_snoop
);
564 static inline void deref_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
566 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
567 complete(&mad_agent_priv
->comp
);
570 static inline void deref_snoop_agent(struct ib_mad_snoop_private
*mad_snoop_priv
)
572 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
573 complete(&mad_snoop_priv
->comp
);
576 static void unregister_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
578 struct ib_mad_port_private
*port_priv
;
581 /* Note that we could still be handling received MADs */
584 * Canceling all sends results in dropping received response
585 * MADs, preventing us from queuing additional work
587 cancel_mads(mad_agent_priv
);
588 port_priv
= mad_agent_priv
->qp_info
->port_priv
;
589 cancel_delayed_work(&mad_agent_priv
->timed_work
);
591 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
592 remove_mad_reg_req(mad_agent_priv
);
593 list_del(&mad_agent_priv
->agent_list
);
594 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
596 flush_workqueue(port_priv
->wq
);
597 ib_cancel_rmpp_recvs(mad_agent_priv
);
599 deref_mad_agent(mad_agent_priv
);
600 wait_for_completion(&mad_agent_priv
->comp
);
602 ib_mad_agent_security_cleanup(&mad_agent_priv
->agent
);
604 kfree(mad_agent_priv
->reg_req
);
605 kfree(mad_agent_priv
);
608 static void unregister_mad_snoop(struct ib_mad_snoop_private
*mad_snoop_priv
)
610 struct ib_mad_qp_info
*qp_info
;
613 qp_info
= mad_snoop_priv
->qp_info
;
614 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
615 qp_info
->snoop_table
[mad_snoop_priv
->snoop_index
] = NULL
;
616 atomic_dec(&qp_info
->snoop_count
);
617 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
619 deref_snoop_agent(mad_snoop_priv
);
620 wait_for_completion(&mad_snoop_priv
->comp
);
622 ib_mad_agent_security_cleanup(&mad_snoop_priv
->agent
);
624 kfree(mad_snoop_priv
);
628 * ib_unregister_mad_agent - Unregisters a client from using MAD services
630 void ib_unregister_mad_agent(struct ib_mad_agent
*mad_agent
)
632 struct ib_mad_agent_private
*mad_agent_priv
;
633 struct ib_mad_snoop_private
*mad_snoop_priv
;
635 /* If the TID is zero, the agent can only snoop. */
636 if (mad_agent
->hi_tid
) {
637 mad_agent_priv
= container_of(mad_agent
,
638 struct ib_mad_agent_private
,
640 unregister_mad_agent(mad_agent_priv
);
642 mad_snoop_priv
= container_of(mad_agent
,
643 struct ib_mad_snoop_private
,
645 unregister_mad_snoop(mad_snoop_priv
);
648 EXPORT_SYMBOL(ib_unregister_mad_agent
);
650 static void dequeue_mad(struct ib_mad_list_head
*mad_list
)
652 struct ib_mad_queue
*mad_queue
;
655 BUG_ON(!mad_list
->mad_queue
);
656 mad_queue
= mad_list
->mad_queue
;
657 spin_lock_irqsave(&mad_queue
->lock
, flags
);
658 list_del(&mad_list
->list
);
660 spin_unlock_irqrestore(&mad_queue
->lock
, flags
);
663 static void snoop_send(struct ib_mad_qp_info
*qp_info
,
664 struct ib_mad_send_buf
*send_buf
,
665 struct ib_mad_send_wc
*mad_send_wc
,
668 struct ib_mad_snoop_private
*mad_snoop_priv
;
672 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
673 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
674 mad_snoop_priv
= qp_info
->snoop_table
[i
];
675 if (!mad_snoop_priv
||
676 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
679 atomic_inc(&mad_snoop_priv
->refcount
);
680 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
681 mad_snoop_priv
->agent
.snoop_handler(&mad_snoop_priv
->agent
,
682 send_buf
, mad_send_wc
);
683 deref_snoop_agent(mad_snoop_priv
);
684 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
686 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
689 static void snoop_recv(struct ib_mad_qp_info
*qp_info
,
690 struct ib_mad_recv_wc
*mad_recv_wc
,
693 struct ib_mad_snoop_private
*mad_snoop_priv
;
697 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
698 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
699 mad_snoop_priv
= qp_info
->snoop_table
[i
];
700 if (!mad_snoop_priv
||
701 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
704 atomic_inc(&mad_snoop_priv
->refcount
);
705 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
706 mad_snoop_priv
->agent
.recv_handler(&mad_snoop_priv
->agent
, NULL
,
708 deref_snoop_agent(mad_snoop_priv
);
709 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
711 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
714 static void build_smp_wc(struct ib_qp
*qp
, struct ib_cqe
*cqe
, u16 slid
,
715 u16 pkey_index
, u8 port_num
, struct ib_wc
*wc
)
717 memset(wc
, 0, sizeof *wc
);
719 wc
->status
= IB_WC_SUCCESS
;
720 wc
->opcode
= IB_WC_RECV
;
721 wc
->pkey_index
= pkey_index
;
722 wc
->byte_len
= sizeof(struct ib_mad
) + sizeof(struct ib_grh
);
727 wc
->dlid_path_bits
= 0;
728 wc
->port_num
= port_num
;
731 static size_t mad_priv_size(const struct ib_mad_private
*mp
)
733 return sizeof(struct ib_mad_private
) + mp
->mad_size
;
736 static struct ib_mad_private
*alloc_mad_private(size_t mad_size
, gfp_t flags
)
738 size_t size
= sizeof(struct ib_mad_private
) + mad_size
;
739 struct ib_mad_private
*ret
= kzalloc(size
, flags
);
742 ret
->mad_size
= mad_size
;
747 static size_t port_mad_size(const struct ib_mad_port_private
*port_priv
)
749 return rdma_max_mad_size(port_priv
->device
, port_priv
->port_num
);
752 static size_t mad_priv_dma_size(const struct ib_mad_private
*mp
)
754 return sizeof(struct ib_grh
) + mp
->mad_size
;
758 * Return 0 if SMP is to be sent
759 * Return 1 if SMP was consumed locally (whether or not solicited)
760 * Return < 0 if error
762 static int handle_outgoing_dr_smp(struct ib_mad_agent_private
*mad_agent_priv
,
763 struct ib_mad_send_wr_private
*mad_send_wr
)
766 struct ib_smp
*smp
= mad_send_wr
->send_buf
.mad
;
767 struct opa_smp
*opa_smp
= (struct opa_smp
*)smp
;
769 struct ib_mad_local_private
*local
;
770 struct ib_mad_private
*mad_priv
;
771 struct ib_mad_port_private
*port_priv
;
772 struct ib_mad_agent_private
*recv_mad_agent
= NULL
;
773 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
776 struct ib_ud_wr
*send_wr
= &mad_send_wr
->send_wr
;
777 size_t mad_size
= port_mad_size(mad_agent_priv
->qp_info
->port_priv
);
778 u16 out_mad_pkey_index
= 0;
780 bool opa
= rdma_cap_opa_mad(mad_agent_priv
->qp_info
->port_priv
->device
,
781 mad_agent_priv
->qp_info
->port_priv
->port_num
);
783 if (rdma_cap_ib_switch(device
) &&
784 smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
785 port_num
= send_wr
->port_num
;
787 port_num
= mad_agent_priv
->agent
.port_num
;
790 * Directed route handling starts if the initial LID routed part of
791 * a request or the ending LID routed part of a response is empty.
792 * If we are at the start of the LID routed part, don't update the
793 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
795 if (opa
&& smp
->class_version
== OPA_SM_CLASS_VERSION
) {
798 if ((opa_get_smp_direction(opa_smp
)
799 ? opa_smp
->route
.dr
.dr_dlid
: opa_smp
->route
.dr
.dr_slid
) ==
800 OPA_LID_PERMISSIVE
&&
801 opa_smi_handle_dr_smp_send(opa_smp
,
802 rdma_cap_ib_switch(device
),
803 port_num
) == IB_SMI_DISCARD
) {
805 dev_err(&device
->dev
, "OPA Invalid directed route\n");
808 opa_drslid
= be32_to_cpu(opa_smp
->route
.dr
.dr_slid
);
809 if (opa_drslid
!= be32_to_cpu(OPA_LID_PERMISSIVE
) &&
810 opa_drslid
& 0xffff0000) {
812 dev_err(&device
->dev
, "OPA Invalid dr_slid 0x%x\n",
816 drslid
= (u16
)(opa_drslid
& 0x0000ffff);
818 /* Check to post send on QP or process locally */
819 if (opa_smi_check_local_smp(opa_smp
, device
) == IB_SMI_DISCARD
&&
820 opa_smi_check_local_returning_smp(opa_smp
, device
) == IB_SMI_DISCARD
)
823 if ((ib_get_smp_direction(smp
) ? smp
->dr_dlid
: smp
->dr_slid
) ==
825 smi_handle_dr_smp_send(smp
, rdma_cap_ib_switch(device
), port_num
) ==
828 dev_err(&device
->dev
, "Invalid directed route\n");
831 drslid
= be16_to_cpu(smp
->dr_slid
);
833 /* Check to post send on QP or process locally */
834 if (smi_check_local_smp(smp
, device
) == IB_SMI_DISCARD
&&
835 smi_check_local_returning_smp(smp
, device
) == IB_SMI_DISCARD
)
839 local
= kmalloc(sizeof *local
, GFP_ATOMIC
);
844 local
->mad_priv
= NULL
;
845 local
->recv_mad_agent
= NULL
;
846 mad_priv
= alloc_mad_private(mad_size
, GFP_ATOMIC
);
853 build_smp_wc(mad_agent_priv
->agent
.qp
,
854 send_wr
->wr
.wr_cqe
, drslid
,
856 send_wr
->port_num
, &mad_wc
);
858 if (opa
&& smp
->base_version
== OPA_MGMT_BASE_VERSION
) {
859 mad_wc
.byte_len
= mad_send_wr
->send_buf
.hdr_len
860 + mad_send_wr
->send_buf
.data_len
861 + sizeof(struct ib_grh
);
864 /* No GRH for DR SMP */
865 ret
= device
->process_mad(device
, 0, port_num
, &mad_wc
, NULL
,
866 (const struct ib_mad_hdr
*)smp
, mad_size
,
867 (struct ib_mad_hdr
*)mad_priv
->mad
,
868 &mad_size
, &out_mad_pkey_index
);
871 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
:
872 if (ib_response_mad((const struct ib_mad_hdr
*)mad_priv
->mad
) &&
873 mad_agent_priv
->agent
.recv_handler
) {
874 local
->mad_priv
= mad_priv
;
875 local
->recv_mad_agent
= mad_agent_priv
;
877 * Reference MAD agent until receive
878 * side of local completion handled
880 atomic_inc(&mad_agent_priv
->refcount
);
884 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
:
887 case IB_MAD_RESULT_SUCCESS
:
888 /* Treat like an incoming receive MAD */
889 port_priv
= ib_get_mad_port(mad_agent_priv
->agent
.device
,
890 mad_agent_priv
->agent
.port_num
);
892 memcpy(mad_priv
->mad
, smp
, mad_priv
->mad_size
);
893 recv_mad_agent
= find_mad_agent(port_priv
,
894 (const struct ib_mad_hdr
*)mad_priv
->mad
);
896 if (!port_priv
|| !recv_mad_agent
) {
898 * No receiving agent so drop packet and
899 * generate send completion.
904 local
->mad_priv
= mad_priv
;
905 local
->recv_mad_agent
= recv_mad_agent
;
914 local
->mad_send_wr
= mad_send_wr
;
916 local
->mad_send_wr
->send_wr
.pkey_index
= out_mad_pkey_index
;
917 local
->return_wc_byte_len
= mad_size
;
919 /* Reference MAD agent until send side of local completion handled */
920 atomic_inc(&mad_agent_priv
->refcount
);
921 /* Queue local completion to local list */
922 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
923 list_add_tail(&local
->completion_list
, &mad_agent_priv
->local_list
);
924 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
925 queue_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
926 &mad_agent_priv
->local_work
);
932 static int get_pad_size(int hdr_len
, int data_len
, size_t mad_size
)
936 seg_size
= mad_size
- hdr_len
;
937 if (data_len
&& seg_size
) {
938 pad
= seg_size
- data_len
% seg_size
;
939 return pad
== seg_size
? 0 : pad
;
944 static void free_send_rmpp_list(struct ib_mad_send_wr_private
*mad_send_wr
)
946 struct ib_rmpp_segment
*s
, *t
;
948 list_for_each_entry_safe(s
, t
, &mad_send_wr
->rmpp_list
, list
) {
954 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private
*send_wr
,
955 size_t mad_size
, gfp_t gfp_mask
)
957 struct ib_mad_send_buf
*send_buf
= &send_wr
->send_buf
;
958 struct ib_rmpp_mad
*rmpp_mad
= send_buf
->mad
;
959 struct ib_rmpp_segment
*seg
= NULL
;
960 int left
, seg_size
, pad
;
962 send_buf
->seg_size
= mad_size
- send_buf
->hdr_len
;
963 send_buf
->seg_rmpp_size
= mad_size
- IB_MGMT_RMPP_HDR
;
964 seg_size
= send_buf
->seg_size
;
967 /* Allocate data segments. */
968 for (left
= send_buf
->data_len
+ pad
; left
> 0; left
-= seg_size
) {
969 seg
= kmalloc(sizeof (*seg
) + seg_size
, gfp_mask
);
971 free_send_rmpp_list(send_wr
);
974 seg
->num
= ++send_buf
->seg_count
;
975 list_add_tail(&seg
->list
, &send_wr
->rmpp_list
);
978 /* Zero any padding */
980 memset(seg
->data
+ seg_size
- pad
, 0, pad
);
982 rmpp_mad
->rmpp_hdr
.rmpp_version
= send_wr
->mad_agent_priv
->
984 rmpp_mad
->rmpp_hdr
.rmpp_type
= IB_MGMT_RMPP_TYPE_DATA
;
985 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
987 send_wr
->cur_seg
= container_of(send_wr
->rmpp_list
.next
,
988 struct ib_rmpp_segment
, list
);
989 send_wr
->last_ack_seg
= send_wr
->cur_seg
;
993 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent
*agent
)
995 return agent
->rmpp_version
&& !(agent
->flags
& IB_MAD_USER_RMPP
);
997 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent
);
999 struct ib_mad_send_buf
* ib_create_send_mad(struct ib_mad_agent
*mad_agent
,
1000 u32 remote_qpn
, u16 pkey_index
,
1002 int hdr_len
, int data_len
,
1006 struct ib_mad_agent_private
*mad_agent_priv
;
1007 struct ib_mad_send_wr_private
*mad_send_wr
;
1008 int pad
, message_size
, ret
, size
;
1013 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
1016 opa
= rdma_cap_opa_mad(mad_agent
->device
, mad_agent
->port_num
);
1018 if (opa
&& base_version
== OPA_MGMT_BASE_VERSION
)
1019 mad_size
= sizeof(struct opa_mad
);
1021 mad_size
= sizeof(struct ib_mad
);
1023 pad
= get_pad_size(hdr_len
, data_len
, mad_size
);
1024 message_size
= hdr_len
+ data_len
+ pad
;
1026 if (ib_mad_kernel_rmpp_agent(mad_agent
)) {
1027 if (!rmpp_active
&& message_size
> mad_size
)
1028 return ERR_PTR(-EINVAL
);
1030 if (rmpp_active
|| message_size
> mad_size
)
1031 return ERR_PTR(-EINVAL
);
1033 size
= rmpp_active
? hdr_len
: mad_size
;
1034 buf
= kzalloc(sizeof *mad_send_wr
+ size
, gfp_mask
);
1036 return ERR_PTR(-ENOMEM
);
1038 mad_send_wr
= buf
+ size
;
1039 INIT_LIST_HEAD(&mad_send_wr
->rmpp_list
);
1040 mad_send_wr
->send_buf
.mad
= buf
;
1041 mad_send_wr
->send_buf
.hdr_len
= hdr_len
;
1042 mad_send_wr
->send_buf
.data_len
= data_len
;
1043 mad_send_wr
->pad
= pad
;
1045 mad_send_wr
->mad_agent_priv
= mad_agent_priv
;
1046 mad_send_wr
->sg_list
[0].length
= hdr_len
;
1047 mad_send_wr
->sg_list
[0].lkey
= mad_agent
->qp
->pd
->local_dma_lkey
;
1049 /* OPA MADs don't have to be the full 2048 bytes */
1050 if (opa
&& base_version
== OPA_MGMT_BASE_VERSION
&&
1051 data_len
< mad_size
- hdr_len
)
1052 mad_send_wr
->sg_list
[1].length
= data_len
;
1054 mad_send_wr
->sg_list
[1].length
= mad_size
- hdr_len
;
1056 mad_send_wr
->sg_list
[1].lkey
= mad_agent
->qp
->pd
->local_dma_lkey
;
1058 mad_send_wr
->mad_list
.cqe
.done
= ib_mad_send_done
;
1060 mad_send_wr
->send_wr
.wr
.wr_cqe
= &mad_send_wr
->mad_list
.cqe
;
1061 mad_send_wr
->send_wr
.wr
.sg_list
= mad_send_wr
->sg_list
;
1062 mad_send_wr
->send_wr
.wr
.num_sge
= 2;
1063 mad_send_wr
->send_wr
.wr
.opcode
= IB_WR_SEND
;
1064 mad_send_wr
->send_wr
.wr
.send_flags
= IB_SEND_SIGNALED
;
1065 mad_send_wr
->send_wr
.remote_qpn
= remote_qpn
;
1066 mad_send_wr
->send_wr
.remote_qkey
= IB_QP_SET_QKEY
;
1067 mad_send_wr
->send_wr
.pkey_index
= pkey_index
;
1070 ret
= alloc_send_rmpp_list(mad_send_wr
, mad_size
, gfp_mask
);
1073 return ERR_PTR(ret
);
1077 mad_send_wr
->send_buf
.mad_agent
= mad_agent
;
1078 atomic_inc(&mad_agent_priv
->refcount
);
1079 return &mad_send_wr
->send_buf
;
1081 EXPORT_SYMBOL(ib_create_send_mad
);
1083 int ib_get_mad_data_offset(u8 mgmt_class
)
1085 if (mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
)
1086 return IB_MGMT_SA_HDR
;
1087 else if ((mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
1088 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
1089 (mgmt_class
== IB_MGMT_CLASS_BIS
))
1090 return IB_MGMT_DEVICE_HDR
;
1091 else if ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
1092 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
))
1093 return IB_MGMT_VENDOR_HDR
;
1095 return IB_MGMT_MAD_HDR
;
1097 EXPORT_SYMBOL(ib_get_mad_data_offset
);
1099 int ib_is_mad_class_rmpp(u8 mgmt_class
)
1101 if ((mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
) ||
1102 (mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
1103 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
1104 (mgmt_class
== IB_MGMT_CLASS_BIS
) ||
1105 ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
1106 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
)))
1110 EXPORT_SYMBOL(ib_is_mad_class_rmpp
);
1112 void *ib_get_rmpp_segment(struct ib_mad_send_buf
*send_buf
, int seg_num
)
1114 struct ib_mad_send_wr_private
*mad_send_wr
;
1115 struct list_head
*list
;
1117 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
1119 list
= &mad_send_wr
->cur_seg
->list
;
1121 if (mad_send_wr
->cur_seg
->num
< seg_num
) {
1122 list_for_each_entry(mad_send_wr
->cur_seg
, list
, list
)
1123 if (mad_send_wr
->cur_seg
->num
== seg_num
)
1125 } else if (mad_send_wr
->cur_seg
->num
> seg_num
) {
1126 list_for_each_entry_reverse(mad_send_wr
->cur_seg
, list
, list
)
1127 if (mad_send_wr
->cur_seg
->num
== seg_num
)
1130 return mad_send_wr
->cur_seg
->data
;
1132 EXPORT_SYMBOL(ib_get_rmpp_segment
);
1134 static inline void *ib_get_payload(struct ib_mad_send_wr_private
*mad_send_wr
)
1136 if (mad_send_wr
->send_buf
.seg_count
)
1137 return ib_get_rmpp_segment(&mad_send_wr
->send_buf
,
1138 mad_send_wr
->seg_num
);
1140 return mad_send_wr
->send_buf
.mad
+
1141 mad_send_wr
->send_buf
.hdr_len
;
1144 void ib_free_send_mad(struct ib_mad_send_buf
*send_buf
)
1146 struct ib_mad_agent_private
*mad_agent_priv
;
1147 struct ib_mad_send_wr_private
*mad_send_wr
;
1149 mad_agent_priv
= container_of(send_buf
->mad_agent
,
1150 struct ib_mad_agent_private
, agent
);
1151 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
1154 free_send_rmpp_list(mad_send_wr
);
1155 kfree(send_buf
->mad
);
1156 deref_mad_agent(mad_agent_priv
);
1158 EXPORT_SYMBOL(ib_free_send_mad
);
1160 int ib_send_mad(struct ib_mad_send_wr_private
*mad_send_wr
)
1162 struct ib_mad_qp_info
*qp_info
;
1163 struct list_head
*list
;
1164 struct ib_send_wr
*bad_send_wr
;
1165 struct ib_mad_agent
*mad_agent
;
1167 unsigned long flags
;
1170 /* Set WR ID to find mad_send_wr upon completion */
1171 qp_info
= mad_send_wr
->mad_agent_priv
->qp_info
;
1172 mad_send_wr
->mad_list
.mad_queue
= &qp_info
->send_queue
;
1173 mad_send_wr
->mad_list
.cqe
.done
= ib_mad_send_done
;
1174 mad_send_wr
->send_wr
.wr
.wr_cqe
= &mad_send_wr
->mad_list
.cqe
;
1176 mad_agent
= mad_send_wr
->send_buf
.mad_agent
;
1177 sge
= mad_send_wr
->sg_list
;
1178 sge
[0].addr
= ib_dma_map_single(mad_agent
->device
,
1179 mad_send_wr
->send_buf
.mad
,
1182 if (unlikely(ib_dma_mapping_error(mad_agent
->device
, sge
[0].addr
)))
1185 mad_send_wr
->header_mapping
= sge
[0].addr
;
1187 sge
[1].addr
= ib_dma_map_single(mad_agent
->device
,
1188 ib_get_payload(mad_send_wr
),
1191 if (unlikely(ib_dma_mapping_error(mad_agent
->device
, sge
[1].addr
))) {
1192 ib_dma_unmap_single(mad_agent
->device
,
1193 mad_send_wr
->header_mapping
,
1194 sge
[0].length
, DMA_TO_DEVICE
);
1197 mad_send_wr
->payload_mapping
= sge
[1].addr
;
1199 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
1200 if (qp_info
->send_queue
.count
< qp_info
->send_queue
.max_active
) {
1201 ret
= ib_post_send(mad_agent
->qp
, &mad_send_wr
->send_wr
.wr
,
1203 list
= &qp_info
->send_queue
.list
;
1206 list
= &qp_info
->overflow_list
;
1210 qp_info
->send_queue
.count
++;
1211 list_add_tail(&mad_send_wr
->mad_list
.list
, list
);
1213 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
1215 ib_dma_unmap_single(mad_agent
->device
,
1216 mad_send_wr
->header_mapping
,
1217 sge
[0].length
, DMA_TO_DEVICE
);
1218 ib_dma_unmap_single(mad_agent
->device
,
1219 mad_send_wr
->payload_mapping
,
1220 sge
[1].length
, DMA_TO_DEVICE
);
1226 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1227 * with the registered client
1229 int ib_post_send_mad(struct ib_mad_send_buf
*send_buf
,
1230 struct ib_mad_send_buf
**bad_send_buf
)
1232 struct ib_mad_agent_private
*mad_agent_priv
;
1233 struct ib_mad_send_buf
*next_send_buf
;
1234 struct ib_mad_send_wr_private
*mad_send_wr
;
1235 unsigned long flags
;
1238 /* Walk list of send WRs and post each on send list */
1239 for (; send_buf
; send_buf
= next_send_buf
) {
1240 mad_send_wr
= container_of(send_buf
,
1241 struct ib_mad_send_wr_private
,
1243 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1245 ret
= ib_mad_enforce_security(mad_agent_priv
,
1246 mad_send_wr
->send_wr
.pkey_index
);
1250 if (!send_buf
->mad_agent
->send_handler
||
1251 (send_buf
->timeout_ms
&&
1252 !send_buf
->mad_agent
->recv_handler
)) {
1257 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
)) {
1258 if (mad_agent_priv
->agent
.rmpp_version
) {
1265 * Save pointer to next work request to post in case the
1266 * current one completes, and the user modifies the work
1267 * request associated with the completion
1269 next_send_buf
= send_buf
->next
;
1270 mad_send_wr
->send_wr
.ah
= send_buf
->ah
;
1272 if (((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
==
1273 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1274 ret
= handle_outgoing_dr_smp(mad_agent_priv
,
1276 if (ret
< 0) /* error */
1278 else if (ret
== 1) /* locally consumed */
1282 mad_send_wr
->tid
= ((struct ib_mad_hdr
*) send_buf
->mad
)->tid
;
1283 /* Timeout will be updated after send completes */
1284 mad_send_wr
->timeout
= msecs_to_jiffies(send_buf
->timeout_ms
);
1285 mad_send_wr
->max_retries
= send_buf
->retries
;
1286 mad_send_wr
->retries_left
= send_buf
->retries
;
1287 send_buf
->retries
= 0;
1288 /* Reference for work request to QP + response */
1289 mad_send_wr
->refcount
= 1 + (mad_send_wr
->timeout
> 0);
1290 mad_send_wr
->status
= IB_WC_SUCCESS
;
1292 /* Reference MAD agent until send completes */
1293 atomic_inc(&mad_agent_priv
->refcount
);
1294 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1295 list_add_tail(&mad_send_wr
->agent_list
,
1296 &mad_agent_priv
->send_list
);
1297 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1299 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
1300 ret
= ib_send_rmpp_mad(mad_send_wr
);
1301 if (ret
>= 0 && ret
!= IB_RMPP_RESULT_CONSUMED
)
1302 ret
= ib_send_mad(mad_send_wr
);
1304 ret
= ib_send_mad(mad_send_wr
);
1306 /* Fail send request */
1307 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1308 list_del(&mad_send_wr
->agent_list
);
1309 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1310 atomic_dec(&mad_agent_priv
->refcount
);
1317 *bad_send_buf
= send_buf
;
1320 EXPORT_SYMBOL(ib_post_send_mad
);
1323 * ib_free_recv_mad - Returns data buffers used to receive
1324 * a MAD to the access layer
1326 void ib_free_recv_mad(struct ib_mad_recv_wc
*mad_recv_wc
)
1328 struct ib_mad_recv_buf
*mad_recv_buf
, *temp_recv_buf
;
1329 struct ib_mad_private_header
*mad_priv_hdr
;
1330 struct ib_mad_private
*priv
;
1331 struct list_head free_list
;
1333 INIT_LIST_HEAD(&free_list
);
1334 list_splice_init(&mad_recv_wc
->rmpp_list
, &free_list
);
1336 list_for_each_entry_safe(mad_recv_buf
, temp_recv_buf
,
1338 mad_recv_wc
= container_of(mad_recv_buf
, struct ib_mad_recv_wc
,
1340 mad_priv_hdr
= container_of(mad_recv_wc
,
1341 struct ib_mad_private_header
,
1343 priv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
1348 EXPORT_SYMBOL(ib_free_recv_mad
);
1350 struct ib_mad_agent
*ib_redirect_mad_qp(struct ib_qp
*qp
,
1352 ib_mad_send_handler send_handler
,
1353 ib_mad_recv_handler recv_handler
,
1356 return ERR_PTR(-EINVAL
); /* XXX: for now */
1358 EXPORT_SYMBOL(ib_redirect_mad_qp
);
1360 int ib_process_mad_wc(struct ib_mad_agent
*mad_agent
,
1363 dev_err(&mad_agent
->device
->dev
,
1364 "ib_process_mad_wc() not implemented yet\n");
1367 EXPORT_SYMBOL(ib_process_mad_wc
);
1369 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
1370 struct ib_mad_reg_req
*mad_reg_req
)
1374 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
) {
1375 if ((*method
)->agent
[i
]) {
1376 pr_err("Method %d already in use\n", i
);
1383 static int allocate_method_table(struct ib_mad_mgmt_method_table
**method
)
1385 /* Allocate management method table */
1386 *method
= kzalloc(sizeof **method
, GFP_ATOMIC
);
1387 return (*method
) ? 0 : (-ENOMEM
);
1391 * Check to see if there are any methods still in use
1393 static int check_method_table(struct ib_mad_mgmt_method_table
*method
)
1397 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++)
1398 if (method
->agent
[i
])
1404 * Check to see if there are any method tables for this class still in use
1406 static int check_class_table(struct ib_mad_mgmt_class_table
*class)
1410 for (i
= 0; i
< MAX_MGMT_CLASS
; i
++)
1411 if (class->method_table
[i
])
1416 static int check_vendor_class(struct ib_mad_mgmt_vendor_class
*vendor_class
)
1420 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1421 if (vendor_class
->method_table
[i
])
1426 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class
*vendor_class
,
1431 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1432 /* Is there matching OUI for this vendor class ? */
1433 if (!memcmp(vendor_class
->oui
[i
], oui
, 3))
1439 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table
*vendor
)
1443 for (i
= 0; i
< MAX_MGMT_VENDOR_RANGE2
; i
++)
1444 if (vendor
->vendor_class
[i
])
1450 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table
*method
,
1451 struct ib_mad_agent_private
*agent
)
1455 /* Remove any methods for this mad agent */
1456 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++) {
1457 if (method
->agent
[i
] == agent
) {
1458 method
->agent
[i
] = NULL
;
1463 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1464 struct ib_mad_agent_private
*agent_priv
,
1467 struct ib_mad_port_private
*port_priv
;
1468 struct ib_mad_mgmt_class_table
**class;
1469 struct ib_mad_mgmt_method_table
**method
;
1472 port_priv
= agent_priv
->qp_info
->port_priv
;
1473 class = &port_priv
->version
[mad_reg_req
->mgmt_class_version
].class;
1475 /* Allocate management class table for "new" class version */
1476 *class = kzalloc(sizeof **class, GFP_ATOMIC
);
1482 /* Allocate method table for this management class */
1483 method
= &(*class)->method_table
[mgmt_class
];
1484 if ((ret
= allocate_method_table(method
)))
1487 method
= &(*class)->method_table
[mgmt_class
];
1489 /* Allocate method table for this management class */
1490 if ((ret
= allocate_method_table(method
)))
1495 /* Now, make sure methods are not already in use */
1496 if (method_in_use(method
, mad_reg_req
))
1499 /* Finally, add in methods being registered */
1500 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
)
1501 (*method
)->agent
[i
] = agent_priv
;
1506 /* Remove any methods for this mad agent */
1507 remove_methods_mad_agent(*method
, agent_priv
);
1508 /* Now, check to see if there are any methods in use */
1509 if (!check_method_table(*method
)) {
1510 /* If not, release management method table */
1523 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1524 struct ib_mad_agent_private
*agent_priv
)
1526 struct ib_mad_port_private
*port_priv
;
1527 struct ib_mad_mgmt_vendor_class_table
**vendor_table
;
1528 struct ib_mad_mgmt_vendor_class_table
*vendor
= NULL
;
1529 struct ib_mad_mgmt_vendor_class
*vendor_class
= NULL
;
1530 struct ib_mad_mgmt_method_table
**method
;
1531 int i
, ret
= -ENOMEM
;
1534 /* "New" vendor (with OUI) class */
1535 vclass
= vendor_class_index(mad_reg_req
->mgmt_class
);
1536 port_priv
= agent_priv
->qp_info
->port_priv
;
1537 vendor_table
= &port_priv
->version
[
1538 mad_reg_req
->mgmt_class_version
].vendor
;
1539 if (!*vendor_table
) {
1540 /* Allocate mgmt vendor class table for "new" class version */
1541 vendor
= kzalloc(sizeof *vendor
, GFP_ATOMIC
);
1545 *vendor_table
= vendor
;
1547 if (!(*vendor_table
)->vendor_class
[vclass
]) {
1548 /* Allocate table for this management vendor class */
1549 vendor_class
= kzalloc(sizeof *vendor_class
, GFP_ATOMIC
);
1553 (*vendor_table
)->vendor_class
[vclass
] = vendor_class
;
1555 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1556 /* Is there matching OUI for this vendor class ? */
1557 if (!memcmp((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1558 mad_reg_req
->oui
, 3)) {
1559 method
= &(*vendor_table
)->vendor_class
[
1560 vclass
]->method_table
[i
];
1566 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1567 /* OUI slot available ? */
1568 if (!is_vendor_oui((*vendor_table
)->vendor_class
[
1570 method
= &(*vendor_table
)->vendor_class
[
1571 vclass
]->method_table
[i
];
1572 /* Allocate method table for this OUI */
1574 ret
= allocate_method_table(method
);
1578 memcpy((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1579 mad_reg_req
->oui
, 3);
1583 dev_err(&agent_priv
->agent
.device
->dev
, "All OUI slots in use\n");
1587 /* Now, make sure methods are not already in use */
1588 if (method_in_use(method
, mad_reg_req
))
1591 /* Finally, add in methods being registered */
1592 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
)
1593 (*method
)->agent
[i
] = agent_priv
;
1598 /* Remove any methods for this mad agent */
1599 remove_methods_mad_agent(*method
, agent_priv
);
1600 /* Now, check to see if there are any methods in use */
1601 if (!check_method_table(*method
)) {
1602 /* If not, release management method table */
1609 (*vendor_table
)->vendor_class
[vclass
] = NULL
;
1610 kfree(vendor_class
);
1614 *vendor_table
= NULL
;
1621 static void remove_mad_reg_req(struct ib_mad_agent_private
*agent_priv
)
1623 struct ib_mad_port_private
*port_priv
;
1624 struct ib_mad_mgmt_class_table
*class;
1625 struct ib_mad_mgmt_method_table
*method
;
1626 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1627 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1632 * Was MAD registration request supplied
1633 * with original registration ?
1635 if (!agent_priv
->reg_req
) {
1639 port_priv
= agent_priv
->qp_info
->port_priv
;
1640 mgmt_class
= convert_mgmt_class(agent_priv
->reg_req
->mgmt_class
);
1641 class = port_priv
->version
[
1642 agent_priv
->reg_req
->mgmt_class_version
].class;
1646 method
= class->method_table
[mgmt_class
];
1648 /* Remove any methods for this mad agent */
1649 remove_methods_mad_agent(method
, agent_priv
);
1650 /* Now, check to see if there are any methods still in use */
1651 if (!check_method_table(method
)) {
1652 /* If not, release management method table */
1654 class->method_table
[mgmt_class
] = NULL
;
1655 /* Any management classes left ? */
1656 if (!check_class_table(class)) {
1657 /* If not, release management class table */
1660 agent_priv
->reg_req
->
1661 mgmt_class_version
].class = NULL
;
1667 if (!is_vendor_class(mgmt_class
))
1670 /* normalize mgmt_class to vendor range 2 */
1671 mgmt_class
= vendor_class_index(agent_priv
->reg_req
->mgmt_class
);
1672 vendor
= port_priv
->version
[
1673 agent_priv
->reg_req
->mgmt_class_version
].vendor
;
1678 vendor_class
= vendor
->vendor_class
[mgmt_class
];
1680 index
= find_vendor_oui(vendor_class
, agent_priv
->reg_req
->oui
);
1683 method
= vendor_class
->method_table
[index
];
1685 /* Remove any methods for this mad agent */
1686 remove_methods_mad_agent(method
, agent_priv
);
1688 * Now, check to see if there are
1689 * any methods still in use
1691 if (!check_method_table(method
)) {
1692 /* If not, release management method table */
1694 vendor_class
->method_table
[index
] = NULL
;
1695 memset(vendor_class
->oui
[index
], 0, 3);
1696 /* Any OUIs left ? */
1697 if (!check_vendor_class(vendor_class
)) {
1698 /* If not, release vendor class table */
1699 kfree(vendor_class
);
1700 vendor
->vendor_class
[mgmt_class
] = NULL
;
1701 /* Any other vendor classes left ? */
1702 if (!check_vendor_table(vendor
)) {
1705 agent_priv
->reg_req
->
1706 mgmt_class_version
].
1718 static struct ib_mad_agent_private
*
1719 find_mad_agent(struct ib_mad_port_private
*port_priv
,
1720 const struct ib_mad_hdr
*mad_hdr
)
1722 struct ib_mad_agent_private
*mad_agent
= NULL
;
1723 unsigned long flags
;
1725 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
1726 if (ib_response_mad(mad_hdr
)) {
1728 struct ib_mad_agent_private
*entry
;
1731 * Routing is based on high 32 bits of transaction ID
1734 hi_tid
= be64_to_cpu(mad_hdr
->tid
) >> 32;
1735 list_for_each_entry(entry
, &port_priv
->agent_list
, agent_list
) {
1736 if (entry
->agent
.hi_tid
== hi_tid
) {
1742 struct ib_mad_mgmt_class_table
*class;
1743 struct ib_mad_mgmt_method_table
*method
;
1744 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1745 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1746 const struct ib_vendor_mad
*vendor_mad
;
1750 * Routing is based on version, class, and method
1751 * For "newer" vendor MADs, also based on OUI
1753 if (mad_hdr
->class_version
>= MAX_MGMT_VERSION
)
1755 if (!is_vendor_class(mad_hdr
->mgmt_class
)) {
1756 class = port_priv
->version
[
1757 mad_hdr
->class_version
].class;
1760 if (convert_mgmt_class(mad_hdr
->mgmt_class
) >=
1761 ARRAY_SIZE(class->method_table
))
1763 method
= class->method_table
[convert_mgmt_class(
1764 mad_hdr
->mgmt_class
)];
1766 mad_agent
= method
->agent
[mad_hdr
->method
&
1767 ~IB_MGMT_METHOD_RESP
];
1769 vendor
= port_priv
->version
[
1770 mad_hdr
->class_version
].vendor
;
1773 vendor_class
= vendor
->vendor_class
[vendor_class_index(
1774 mad_hdr
->mgmt_class
)];
1777 /* Find matching OUI */
1778 vendor_mad
= (const struct ib_vendor_mad
*)mad_hdr
;
1779 index
= find_vendor_oui(vendor_class
, vendor_mad
->oui
);
1782 method
= vendor_class
->method_table
[index
];
1784 mad_agent
= method
->agent
[mad_hdr
->method
&
1785 ~IB_MGMT_METHOD_RESP
];
1791 if (mad_agent
->agent
.recv_handler
)
1792 atomic_inc(&mad_agent
->refcount
);
1794 dev_notice(&port_priv
->device
->dev
,
1795 "No receive handler for client %p on port %d\n",
1796 &mad_agent
->agent
, port_priv
->port_num
);
1801 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
1806 static int validate_mad(const struct ib_mad_hdr
*mad_hdr
,
1807 const struct ib_mad_qp_info
*qp_info
,
1811 u32 qp_num
= qp_info
->qp
->qp_num
;
1813 /* Make sure MAD base version is understood */
1814 if (mad_hdr
->base_version
!= IB_MGMT_BASE_VERSION
&&
1815 (!opa
|| mad_hdr
->base_version
!= OPA_MGMT_BASE_VERSION
)) {
1816 pr_err("MAD received with unsupported base version %d %s\n",
1817 mad_hdr
->base_version
, opa
? "(opa)" : "");
1821 /* Filter SMI packets sent to other than QP0 */
1822 if ((mad_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
1823 (mad_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
1827 /* CM attributes other than ClassPortInfo only use Send method */
1828 if ((mad_hdr
->mgmt_class
== IB_MGMT_CLASS_CM
) &&
1829 (mad_hdr
->attr_id
!= IB_MGMT_CLASSPORTINFO_ATTR_ID
) &&
1830 (mad_hdr
->method
!= IB_MGMT_METHOD_SEND
))
1832 /* Filter GSI packets sent to QP0 */
1841 static int is_rmpp_data_mad(const struct ib_mad_agent_private
*mad_agent_priv
,
1842 const struct ib_mad_hdr
*mad_hdr
)
1844 struct ib_rmpp_mad
*rmpp_mad
;
1846 rmpp_mad
= (struct ib_rmpp_mad
*)mad_hdr
;
1847 return !mad_agent_priv
->agent
.rmpp_version
||
1848 !ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
) ||
1849 !(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
1850 IB_MGMT_RMPP_FLAG_ACTIVE
) ||
1851 (rmpp_mad
->rmpp_hdr
.rmpp_type
== IB_MGMT_RMPP_TYPE_DATA
);
1854 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private
*wr
,
1855 const struct ib_mad_recv_wc
*rwc
)
1857 return ((struct ib_mad_hdr
*)(wr
->send_buf
.mad
))->mgmt_class
==
1858 rwc
->recv_buf
.mad
->mad_hdr
.mgmt_class
;
1861 static inline int rcv_has_same_gid(const struct ib_mad_agent_private
*mad_agent_priv
,
1862 const struct ib_mad_send_wr_private
*wr
,
1863 const struct ib_mad_recv_wc
*rwc
)
1865 struct rdma_ah_attr attr
;
1866 u8 send_resp
, rcv_resp
;
1868 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
1869 u8 port_num
= mad_agent_priv
->agent
.port_num
;
1873 send_resp
= ib_response_mad((struct ib_mad_hdr
*)wr
->send_buf
.mad
);
1874 rcv_resp
= ib_response_mad(&rwc
->recv_buf
.mad
->mad_hdr
);
1876 if (send_resp
== rcv_resp
)
1877 /* both requests, or both responses. GIDs different */
1880 if (rdma_query_ah(wr
->send_buf
.ah
, &attr
))
1881 /* Assume not equal, to avoid false positives. */
1884 has_grh
= !!(rdma_ah_get_ah_flags(&attr
) & IB_AH_GRH
);
1885 if (has_grh
!= !!(rwc
->wc
->wc_flags
& IB_WC_GRH
))
1886 /* one has GID, other does not. Assume different */
1889 if (!send_resp
&& rcv_resp
) {
1890 /* is request/response. */
1892 if (ib_get_cached_lmc(device
, port_num
, &lmc
))
1894 return (!lmc
|| !((rdma_ah_get_path_bits(&attr
) ^
1895 rwc
->wc
->dlid_path_bits
) &
1898 const struct ib_global_route
*grh
=
1899 rdma_ah_read_grh(&attr
);
1901 if (ib_get_cached_gid(device
, port_num
,
1902 grh
->sgid_index
, &sgid
, NULL
))
1904 return !memcmp(sgid
.raw
, rwc
->recv_buf
.grh
->dgid
.raw
,
1910 return rdma_ah_get_dlid(&attr
) == rwc
->wc
->slid
;
1912 return !memcmp(rdma_ah_read_grh(&attr
)->dgid
.raw
,
1913 rwc
->recv_buf
.grh
->sgid
.raw
,
1917 static inline int is_direct(u8
class)
1919 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
);
1922 struct ib_mad_send_wr_private
*
1923 ib_find_send_mad(const struct ib_mad_agent_private
*mad_agent_priv
,
1924 const struct ib_mad_recv_wc
*wc
)
1926 struct ib_mad_send_wr_private
*wr
;
1927 const struct ib_mad_hdr
*mad_hdr
;
1929 mad_hdr
= &wc
->recv_buf
.mad
->mad_hdr
;
1931 list_for_each_entry(wr
, &mad_agent_priv
->wait_list
, agent_list
) {
1932 if ((wr
->tid
== mad_hdr
->tid
) &&
1933 rcv_has_same_class(wr
, wc
) &&
1935 * Don't check GID for direct routed MADs.
1936 * These might have permissive LIDs.
1938 (is_direct(mad_hdr
->mgmt_class
) ||
1939 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1940 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1944 * It's possible to receive the response before we've
1945 * been notified that the send has completed
1947 list_for_each_entry(wr
, &mad_agent_priv
->send_list
, agent_list
) {
1948 if (is_rmpp_data_mad(mad_agent_priv
, wr
->send_buf
.mad
) &&
1949 wr
->tid
== mad_hdr
->tid
&&
1951 rcv_has_same_class(wr
, wc
) &&
1953 * Don't check GID for direct routed MADs.
1954 * These might have permissive LIDs.
1956 (is_direct(mad_hdr
->mgmt_class
) ||
1957 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1958 /* Verify request has not been canceled */
1959 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1964 void ib_mark_mad_done(struct ib_mad_send_wr_private
*mad_send_wr
)
1966 mad_send_wr
->timeout
= 0;
1967 if (mad_send_wr
->refcount
== 1)
1968 list_move_tail(&mad_send_wr
->agent_list
,
1969 &mad_send_wr
->mad_agent_priv
->done_list
);
1972 static void ib_mad_complete_recv(struct ib_mad_agent_private
*mad_agent_priv
,
1973 struct ib_mad_recv_wc
*mad_recv_wc
)
1975 struct ib_mad_send_wr_private
*mad_send_wr
;
1976 struct ib_mad_send_wc mad_send_wc
;
1977 unsigned long flags
;
1980 INIT_LIST_HEAD(&mad_recv_wc
->rmpp_list
);
1981 ret
= ib_mad_enforce_security(mad_agent_priv
,
1982 mad_recv_wc
->wc
->pkey_index
);
1984 ib_free_recv_mad(mad_recv_wc
);
1985 deref_mad_agent(mad_agent_priv
);
1989 list_add(&mad_recv_wc
->recv_buf
.list
, &mad_recv_wc
->rmpp_list
);
1990 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
1991 mad_recv_wc
= ib_process_rmpp_recv_wc(mad_agent_priv
,
1994 deref_mad_agent(mad_agent_priv
);
1999 /* Complete corresponding request */
2000 if (ib_response_mad(&mad_recv_wc
->recv_buf
.mad
->mad_hdr
)) {
2001 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2002 mad_send_wr
= ib_find_send_mad(mad_agent_priv
, mad_recv_wc
);
2004 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2005 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)
2006 && ib_is_mad_class_rmpp(mad_recv_wc
->recv_buf
.mad
->mad_hdr
.mgmt_class
)
2007 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
)->rmpp_hdr
)
2008 & IB_MGMT_RMPP_FLAG_ACTIVE
)) {
2009 /* user rmpp is in effect
2010 * and this is an active RMPP MAD
2012 mad_agent_priv
->agent
.recv_handler(
2013 &mad_agent_priv
->agent
, NULL
,
2015 atomic_dec(&mad_agent_priv
->refcount
);
2017 /* not user rmpp, revert to normal behavior and
2019 ib_free_recv_mad(mad_recv_wc
);
2020 deref_mad_agent(mad_agent_priv
);
2024 ib_mark_mad_done(mad_send_wr
);
2025 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2027 /* Defined behavior is to complete response before request */
2028 mad_agent_priv
->agent
.recv_handler(
2029 &mad_agent_priv
->agent
,
2030 &mad_send_wr
->send_buf
,
2032 atomic_dec(&mad_agent_priv
->refcount
);
2034 mad_send_wc
.status
= IB_WC_SUCCESS
;
2035 mad_send_wc
.vendor_err
= 0;
2036 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2037 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
2040 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
, NULL
,
2042 deref_mad_agent(mad_agent_priv
);
2048 static enum smi_action
handle_ib_smi(const struct ib_mad_port_private
*port_priv
,
2049 const struct ib_mad_qp_info
*qp_info
,
2050 const struct ib_wc
*wc
,
2052 struct ib_mad_private
*recv
,
2053 struct ib_mad_private
*response
)
2055 enum smi_forward_action retsmi
;
2056 struct ib_smp
*smp
= (struct ib_smp
*)recv
->mad
;
2058 if (smi_handle_dr_smp_recv(smp
,
2059 rdma_cap_ib_switch(port_priv
->device
),
2061 port_priv
->device
->phys_port_cnt
) ==
2063 return IB_SMI_DISCARD
;
2065 retsmi
= smi_check_forward_dr_smp(smp
);
2066 if (retsmi
== IB_SMI_LOCAL
)
2067 return IB_SMI_HANDLE
;
2069 if (retsmi
== IB_SMI_SEND
) { /* don't forward */
2070 if (smi_handle_dr_smp_send(smp
,
2071 rdma_cap_ib_switch(port_priv
->device
),
2072 port_num
) == IB_SMI_DISCARD
)
2073 return IB_SMI_DISCARD
;
2075 if (smi_check_local_smp(smp
, port_priv
->device
) == IB_SMI_DISCARD
)
2076 return IB_SMI_DISCARD
;
2077 } else if (rdma_cap_ib_switch(port_priv
->device
)) {
2078 /* forward case for switches */
2079 memcpy(response
, recv
, mad_priv_size(response
));
2080 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
2081 response
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)response
->mad
;
2082 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
2084 agent_send_response((const struct ib_mad_hdr
*)response
->mad
,
2087 smi_get_fwd_port(smp
),
2088 qp_info
->qp
->qp_num
,
2092 return IB_SMI_DISCARD
;
2094 return IB_SMI_HANDLE
;
2097 static bool generate_unmatched_resp(const struct ib_mad_private
*recv
,
2098 struct ib_mad_private
*response
,
2099 size_t *resp_len
, bool opa
)
2101 const struct ib_mad_hdr
*recv_hdr
= (const struct ib_mad_hdr
*)recv
->mad
;
2102 struct ib_mad_hdr
*resp_hdr
= (struct ib_mad_hdr
*)response
->mad
;
2104 if (recv_hdr
->method
== IB_MGMT_METHOD_GET
||
2105 recv_hdr
->method
== IB_MGMT_METHOD_SET
) {
2106 memcpy(response
, recv
, mad_priv_size(response
));
2107 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
2108 response
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)response
->mad
;
2109 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
2110 resp_hdr
->method
= IB_MGMT_METHOD_GET_RESP
;
2111 resp_hdr
->status
= cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB
);
2112 if (recv_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
2113 resp_hdr
->status
|= IB_SMP_DIRECTION
;
2115 if (opa
&& recv_hdr
->base_version
== OPA_MGMT_BASE_VERSION
) {
2116 if (recv_hdr
->mgmt_class
==
2117 IB_MGMT_CLASS_SUBN_LID_ROUTED
||
2118 recv_hdr
->mgmt_class
==
2119 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
2120 *resp_len
= opa_get_smp_header_size(
2121 (struct opa_smp
*)recv
->mad
);
2123 *resp_len
= sizeof(struct ib_mad_hdr
);
2132 static enum smi_action
2133 handle_opa_smi(struct ib_mad_port_private
*port_priv
,
2134 struct ib_mad_qp_info
*qp_info
,
2137 struct ib_mad_private
*recv
,
2138 struct ib_mad_private
*response
)
2140 enum smi_forward_action retsmi
;
2141 struct opa_smp
*smp
= (struct opa_smp
*)recv
->mad
;
2143 if (opa_smi_handle_dr_smp_recv(smp
,
2144 rdma_cap_ib_switch(port_priv
->device
),
2146 port_priv
->device
->phys_port_cnt
) ==
2148 return IB_SMI_DISCARD
;
2150 retsmi
= opa_smi_check_forward_dr_smp(smp
);
2151 if (retsmi
== IB_SMI_LOCAL
)
2152 return IB_SMI_HANDLE
;
2154 if (retsmi
== IB_SMI_SEND
) { /* don't forward */
2155 if (opa_smi_handle_dr_smp_send(smp
,
2156 rdma_cap_ib_switch(port_priv
->device
),
2157 port_num
) == IB_SMI_DISCARD
)
2158 return IB_SMI_DISCARD
;
2160 if (opa_smi_check_local_smp(smp
, port_priv
->device
) ==
2162 return IB_SMI_DISCARD
;
2164 } else if (rdma_cap_ib_switch(port_priv
->device
)) {
2165 /* forward case for switches */
2166 memcpy(response
, recv
, mad_priv_size(response
));
2167 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
2168 response
->header
.recv_wc
.recv_buf
.opa_mad
=
2169 (struct opa_mad
*)response
->mad
;
2170 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
2172 agent_send_response((const struct ib_mad_hdr
*)response
->mad
,
2175 opa_smi_get_fwd_port(smp
),
2176 qp_info
->qp
->qp_num
,
2177 recv
->header
.wc
.byte_len
,
2180 return IB_SMI_DISCARD
;
2183 return IB_SMI_HANDLE
;
2186 static enum smi_action
2187 handle_smi(struct ib_mad_port_private
*port_priv
,
2188 struct ib_mad_qp_info
*qp_info
,
2191 struct ib_mad_private
*recv
,
2192 struct ib_mad_private
*response
,
2195 struct ib_mad_hdr
*mad_hdr
= (struct ib_mad_hdr
*)recv
->mad
;
2197 if (opa
&& mad_hdr
->base_version
== OPA_MGMT_BASE_VERSION
&&
2198 mad_hdr
->class_version
== OPA_SM_CLASS_VERSION
)
2199 return handle_opa_smi(port_priv
, qp_info
, wc
, port_num
, recv
,
2202 return handle_ib_smi(port_priv
, qp_info
, wc
, port_num
, recv
, response
);
2205 static void ib_mad_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
2207 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2208 struct ib_mad_list_head
*mad_list
=
2209 container_of(wc
->wr_cqe
, struct ib_mad_list_head
, cqe
);
2210 struct ib_mad_qp_info
*qp_info
;
2211 struct ib_mad_private_header
*mad_priv_hdr
;
2212 struct ib_mad_private
*recv
, *response
= NULL
;
2213 struct ib_mad_agent_private
*mad_agent
;
2215 int ret
= IB_MAD_RESULT_SUCCESS
;
2217 u16 resp_mad_pkey_index
= 0;
2220 if (list_empty_careful(&port_priv
->port_list
))
2223 if (wc
->status
!= IB_WC_SUCCESS
) {
2225 * Receive errors indicate that the QP has entered the error
2226 * state - error handling/shutdown code will cleanup
2231 qp_info
= mad_list
->mad_queue
->qp_info
;
2232 dequeue_mad(mad_list
);
2234 opa
= rdma_cap_opa_mad(qp_info
->port_priv
->device
,
2235 qp_info
->port_priv
->port_num
);
2237 mad_priv_hdr
= container_of(mad_list
, struct ib_mad_private_header
,
2239 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
, header
);
2240 ib_dma_unmap_single(port_priv
->device
,
2241 recv
->header
.mapping
,
2242 mad_priv_dma_size(recv
),
2245 /* Setup MAD receive work completion from "normal" work completion */
2246 recv
->header
.wc
= *wc
;
2247 recv
->header
.recv_wc
.wc
= &recv
->header
.wc
;
2249 if (opa
&& ((struct ib_mad_hdr
*)(recv
->mad
))->base_version
== OPA_MGMT_BASE_VERSION
) {
2250 recv
->header
.recv_wc
.mad_len
= wc
->byte_len
- sizeof(struct ib_grh
);
2251 recv
->header
.recv_wc
.mad_seg_size
= sizeof(struct opa_mad
);
2253 recv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
2254 recv
->header
.recv_wc
.mad_seg_size
= sizeof(struct ib_mad
);
2257 recv
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)recv
->mad
;
2258 recv
->header
.recv_wc
.recv_buf
.grh
= &recv
->grh
;
2260 if (atomic_read(&qp_info
->snoop_count
))
2261 snoop_recv(qp_info
, &recv
->header
.recv_wc
, IB_MAD_SNOOP_RECVS
);
2264 if (!validate_mad((const struct ib_mad_hdr
*)recv
->mad
, qp_info
, opa
))
2267 mad_size
= recv
->mad_size
;
2268 response
= alloc_mad_private(mad_size
, GFP_KERNEL
);
2272 if (rdma_cap_ib_switch(port_priv
->device
))
2273 port_num
= wc
->port_num
;
2275 port_num
= port_priv
->port_num
;
2277 if (((struct ib_mad_hdr
*)recv
->mad
)->mgmt_class
==
2278 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
2279 if (handle_smi(port_priv
, qp_info
, wc
, port_num
, recv
,
2285 /* Give driver "right of first refusal" on incoming MAD */
2286 if (port_priv
->device
->process_mad
) {
2287 ret
= port_priv
->device
->process_mad(port_priv
->device
, 0,
2288 port_priv
->port_num
,
2290 (const struct ib_mad_hdr
*)recv
->mad
,
2292 (struct ib_mad_hdr
*)response
->mad
,
2293 &mad_size
, &resp_mad_pkey_index
);
2296 wc
->pkey_index
= resp_mad_pkey_index
;
2298 if (ret
& IB_MAD_RESULT_SUCCESS
) {
2299 if (ret
& IB_MAD_RESULT_CONSUMED
)
2301 if (ret
& IB_MAD_RESULT_REPLY
) {
2302 agent_send_response((const struct ib_mad_hdr
*)response
->mad
,
2306 qp_info
->qp
->qp_num
,
2313 mad_agent
= find_mad_agent(port_priv
, (const struct ib_mad_hdr
*)recv
->mad
);
2315 ib_mad_complete_recv(mad_agent
, &recv
->header
.recv_wc
);
2317 * recv is freed up in error cases in ib_mad_complete_recv
2318 * or via recv_handler in ib_mad_complete_recv()
2321 } else if ((ret
& IB_MAD_RESULT_SUCCESS
) &&
2322 generate_unmatched_resp(recv
, response
, &mad_size
, opa
)) {
2323 agent_send_response((const struct ib_mad_hdr
*)response
->mad
, &recv
->grh
, wc
,
2324 port_priv
->device
, port_num
,
2325 qp_info
->qp
->qp_num
, mad_size
, opa
);
2329 /* Post another receive request for this QP */
2331 ib_mad_post_receive_mads(qp_info
, response
);
2334 ib_mad_post_receive_mads(qp_info
, recv
);
2337 static void adjust_timeout(struct ib_mad_agent_private
*mad_agent_priv
)
2339 struct ib_mad_send_wr_private
*mad_send_wr
;
2340 unsigned long delay
;
2342 if (list_empty(&mad_agent_priv
->wait_list
)) {
2343 cancel_delayed_work(&mad_agent_priv
->timed_work
);
2345 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2346 struct ib_mad_send_wr_private
,
2349 if (time_after(mad_agent_priv
->timeout
,
2350 mad_send_wr
->timeout
)) {
2351 mad_agent_priv
->timeout
= mad_send_wr
->timeout
;
2352 delay
= mad_send_wr
->timeout
- jiffies
;
2353 if ((long)delay
<= 0)
2355 mod_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2356 &mad_agent_priv
->timed_work
, delay
);
2361 static void wait_for_response(struct ib_mad_send_wr_private
*mad_send_wr
)
2363 struct ib_mad_agent_private
*mad_agent_priv
;
2364 struct ib_mad_send_wr_private
*temp_mad_send_wr
;
2365 struct list_head
*list_item
;
2366 unsigned long delay
;
2368 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2369 list_del(&mad_send_wr
->agent_list
);
2371 delay
= mad_send_wr
->timeout
;
2372 mad_send_wr
->timeout
+= jiffies
;
2375 list_for_each_prev(list_item
, &mad_agent_priv
->wait_list
) {
2376 temp_mad_send_wr
= list_entry(list_item
,
2377 struct ib_mad_send_wr_private
,
2379 if (time_after(mad_send_wr
->timeout
,
2380 temp_mad_send_wr
->timeout
))
2385 list_item
= &mad_agent_priv
->wait_list
;
2386 list_add(&mad_send_wr
->agent_list
, list_item
);
2388 /* Reschedule a work item if we have a shorter timeout */
2389 if (mad_agent_priv
->wait_list
.next
== &mad_send_wr
->agent_list
)
2390 mod_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2391 &mad_agent_priv
->timed_work
, delay
);
2394 void ib_reset_mad_timeout(struct ib_mad_send_wr_private
*mad_send_wr
,
2397 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2398 wait_for_response(mad_send_wr
);
2402 * Process a send work completion
2404 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private
*mad_send_wr
,
2405 struct ib_mad_send_wc
*mad_send_wc
)
2407 struct ib_mad_agent_private
*mad_agent_priv
;
2408 unsigned long flags
;
2411 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2412 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2413 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
2414 ret
= ib_process_rmpp_send_wc(mad_send_wr
, mad_send_wc
);
2415 if (ret
== IB_RMPP_RESULT_CONSUMED
)
2418 ret
= IB_RMPP_RESULT_UNHANDLED
;
2420 if (mad_send_wc
->status
!= IB_WC_SUCCESS
&&
2421 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2422 mad_send_wr
->status
= mad_send_wc
->status
;
2423 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2426 if (--mad_send_wr
->refcount
> 0) {
2427 if (mad_send_wr
->refcount
== 1 && mad_send_wr
->timeout
&&
2428 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2429 wait_for_response(mad_send_wr
);
2434 /* Remove send from MAD agent and notify client of completion */
2435 list_del(&mad_send_wr
->agent_list
);
2436 adjust_timeout(mad_agent_priv
);
2437 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2439 if (mad_send_wr
->status
!= IB_WC_SUCCESS
)
2440 mad_send_wc
->status
= mad_send_wr
->status
;
2441 if (ret
== IB_RMPP_RESULT_INTERNAL
)
2442 ib_rmpp_send_handler(mad_send_wc
);
2444 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2447 /* Release reference on agent taken when sending */
2448 deref_mad_agent(mad_agent_priv
);
2451 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2454 static void ib_mad_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
2456 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2457 struct ib_mad_list_head
*mad_list
=
2458 container_of(wc
->wr_cqe
, struct ib_mad_list_head
, cqe
);
2459 struct ib_mad_send_wr_private
*mad_send_wr
, *queued_send_wr
;
2460 struct ib_mad_qp_info
*qp_info
;
2461 struct ib_mad_queue
*send_queue
;
2462 struct ib_send_wr
*bad_send_wr
;
2463 struct ib_mad_send_wc mad_send_wc
;
2464 unsigned long flags
;
2467 if (list_empty_careful(&port_priv
->port_list
))
2470 if (wc
->status
!= IB_WC_SUCCESS
) {
2471 if (!ib_mad_send_error(port_priv
, wc
))
2475 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2477 send_queue
= mad_list
->mad_queue
;
2478 qp_info
= send_queue
->qp_info
;
2481 ib_dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
,
2482 mad_send_wr
->header_mapping
,
2483 mad_send_wr
->sg_list
[0].length
, DMA_TO_DEVICE
);
2484 ib_dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
,
2485 mad_send_wr
->payload_mapping
,
2486 mad_send_wr
->sg_list
[1].length
, DMA_TO_DEVICE
);
2487 queued_send_wr
= NULL
;
2488 spin_lock_irqsave(&send_queue
->lock
, flags
);
2489 list_del(&mad_list
->list
);
2491 /* Move queued send to the send queue */
2492 if (send_queue
->count
-- > send_queue
->max_active
) {
2493 mad_list
= container_of(qp_info
->overflow_list
.next
,
2494 struct ib_mad_list_head
, list
);
2495 queued_send_wr
= container_of(mad_list
,
2496 struct ib_mad_send_wr_private
,
2498 list_move_tail(&mad_list
->list
, &send_queue
->list
);
2500 spin_unlock_irqrestore(&send_queue
->lock
, flags
);
2502 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2503 mad_send_wc
.status
= wc
->status
;
2504 mad_send_wc
.vendor_err
= wc
->vendor_err
;
2505 if (atomic_read(&qp_info
->snoop_count
))
2506 snoop_send(qp_info
, &mad_send_wr
->send_buf
, &mad_send_wc
,
2507 IB_MAD_SNOOP_SEND_COMPLETIONS
);
2508 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
2510 if (queued_send_wr
) {
2511 ret
= ib_post_send(qp_info
->qp
, &queued_send_wr
->send_wr
.wr
,
2514 dev_err(&port_priv
->device
->dev
,
2515 "ib_post_send failed: %d\n", ret
);
2516 mad_send_wr
= queued_send_wr
;
2517 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
2523 static void mark_sends_for_retry(struct ib_mad_qp_info
*qp_info
)
2525 struct ib_mad_send_wr_private
*mad_send_wr
;
2526 struct ib_mad_list_head
*mad_list
;
2527 unsigned long flags
;
2529 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
2530 list_for_each_entry(mad_list
, &qp_info
->send_queue
.list
, list
) {
2531 mad_send_wr
= container_of(mad_list
,
2532 struct ib_mad_send_wr_private
,
2534 mad_send_wr
->retry
= 1;
2536 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
2539 static bool ib_mad_send_error(struct ib_mad_port_private
*port_priv
,
2542 struct ib_mad_list_head
*mad_list
=
2543 container_of(wc
->wr_cqe
, struct ib_mad_list_head
, cqe
);
2544 struct ib_mad_qp_info
*qp_info
= mad_list
->mad_queue
->qp_info
;
2545 struct ib_mad_send_wr_private
*mad_send_wr
;
2549 * Send errors will transition the QP to SQE - move
2550 * QP to RTS and repost flushed work requests
2552 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2554 if (wc
->status
== IB_WC_WR_FLUSH_ERR
) {
2555 if (mad_send_wr
->retry
) {
2557 struct ib_send_wr
*bad_send_wr
;
2559 mad_send_wr
->retry
= 0;
2560 ret
= ib_post_send(qp_info
->qp
, &mad_send_wr
->send_wr
.wr
,
2566 struct ib_qp_attr
*attr
;
2568 /* Transition QP to RTS and fail offending send */
2569 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2571 attr
->qp_state
= IB_QPS_RTS
;
2572 attr
->cur_qp_state
= IB_QPS_SQE
;
2573 ret
= ib_modify_qp(qp_info
->qp
, attr
,
2574 IB_QP_STATE
| IB_QP_CUR_STATE
);
2577 dev_err(&port_priv
->device
->dev
,
2578 "%s - ib_modify_qp to RTS: %d\n",
2581 mark_sends_for_retry(qp_info
);
2588 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
)
2590 unsigned long flags
;
2591 struct ib_mad_send_wr_private
*mad_send_wr
, *temp_mad_send_wr
;
2592 struct ib_mad_send_wc mad_send_wc
;
2593 struct list_head cancel_list
;
2595 INIT_LIST_HEAD(&cancel_list
);
2597 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2598 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2599 &mad_agent_priv
->send_list
, agent_list
) {
2600 if (mad_send_wr
->status
== IB_WC_SUCCESS
) {
2601 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2602 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2606 /* Empty wait list to prevent receives from finding a request */
2607 list_splice_init(&mad_agent_priv
->wait_list
, &cancel_list
);
2608 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2610 /* Report all cancelled requests */
2611 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
2612 mad_send_wc
.vendor_err
= 0;
2614 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2615 &cancel_list
, agent_list
) {
2616 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2617 list_del(&mad_send_wr
->agent_list
);
2618 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2620 atomic_dec(&mad_agent_priv
->refcount
);
2624 static struct ib_mad_send_wr_private
*
2625 find_send_wr(struct ib_mad_agent_private
*mad_agent_priv
,
2626 struct ib_mad_send_buf
*send_buf
)
2628 struct ib_mad_send_wr_private
*mad_send_wr
;
2630 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
2632 if (&mad_send_wr
->send_buf
== send_buf
)
2636 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
2638 if (is_rmpp_data_mad(mad_agent_priv
,
2639 mad_send_wr
->send_buf
.mad
) &&
2640 &mad_send_wr
->send_buf
== send_buf
)
2646 int ib_modify_mad(struct ib_mad_agent
*mad_agent
,
2647 struct ib_mad_send_buf
*send_buf
, u32 timeout_ms
)
2649 struct ib_mad_agent_private
*mad_agent_priv
;
2650 struct ib_mad_send_wr_private
*mad_send_wr
;
2651 unsigned long flags
;
2654 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
2656 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2657 mad_send_wr
= find_send_wr(mad_agent_priv
, send_buf
);
2658 if (!mad_send_wr
|| mad_send_wr
->status
!= IB_WC_SUCCESS
) {
2659 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2663 active
= (!mad_send_wr
->timeout
|| mad_send_wr
->refcount
> 1);
2665 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2666 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2669 mad_send_wr
->send_buf
.timeout_ms
= timeout_ms
;
2671 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2673 ib_reset_mad_timeout(mad_send_wr
, timeout_ms
);
2675 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2678 EXPORT_SYMBOL(ib_modify_mad
);
2680 void ib_cancel_mad(struct ib_mad_agent
*mad_agent
,
2681 struct ib_mad_send_buf
*send_buf
)
2683 ib_modify_mad(mad_agent
, send_buf
, 0);
2685 EXPORT_SYMBOL(ib_cancel_mad
);
2687 static void local_completions(struct work_struct
*work
)
2689 struct ib_mad_agent_private
*mad_agent_priv
;
2690 struct ib_mad_local_private
*local
;
2691 struct ib_mad_agent_private
*recv_mad_agent
;
2692 unsigned long flags
;
2695 struct ib_mad_send_wc mad_send_wc
;
2699 container_of(work
, struct ib_mad_agent_private
, local_work
);
2701 opa
= rdma_cap_opa_mad(mad_agent_priv
->qp_info
->port_priv
->device
,
2702 mad_agent_priv
->qp_info
->port_priv
->port_num
);
2704 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2705 while (!list_empty(&mad_agent_priv
->local_list
)) {
2706 local
= list_entry(mad_agent_priv
->local_list
.next
,
2707 struct ib_mad_local_private
,
2709 list_del(&local
->completion_list
);
2710 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2712 if (local
->mad_priv
) {
2714 recv_mad_agent
= local
->recv_mad_agent
;
2715 if (!recv_mad_agent
) {
2716 dev_err(&mad_agent_priv
->agent
.device
->dev
,
2717 "No receive MAD agent for local completion\n");
2719 goto local_send_completion
;
2723 * Defined behavior is to complete response
2726 build_smp_wc(recv_mad_agent
->agent
.qp
,
2727 local
->mad_send_wr
->send_wr
.wr
.wr_cqe
,
2728 be16_to_cpu(IB_LID_PERMISSIVE
),
2729 local
->mad_send_wr
->send_wr
.pkey_index
,
2730 recv_mad_agent
->agent
.port_num
, &wc
);
2732 local
->mad_priv
->header
.recv_wc
.wc
= &wc
;
2734 base_version
= ((struct ib_mad_hdr
*)(local
->mad_priv
->mad
))->base_version
;
2735 if (opa
&& base_version
== OPA_MGMT_BASE_VERSION
) {
2736 local
->mad_priv
->header
.recv_wc
.mad_len
= local
->return_wc_byte_len
;
2737 local
->mad_priv
->header
.recv_wc
.mad_seg_size
= sizeof(struct opa_mad
);
2739 local
->mad_priv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
2740 local
->mad_priv
->header
.recv_wc
.mad_seg_size
= sizeof(struct ib_mad
);
2743 INIT_LIST_HEAD(&local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2744 list_add(&local
->mad_priv
->header
.recv_wc
.recv_buf
.list
,
2745 &local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2746 local
->mad_priv
->header
.recv_wc
.recv_buf
.grh
= NULL
;
2747 local
->mad_priv
->header
.recv_wc
.recv_buf
.mad
=
2748 (struct ib_mad
*)local
->mad_priv
->mad
;
2749 if (atomic_read(&recv_mad_agent
->qp_info
->snoop_count
))
2750 snoop_recv(recv_mad_agent
->qp_info
,
2751 &local
->mad_priv
->header
.recv_wc
,
2752 IB_MAD_SNOOP_RECVS
);
2753 recv_mad_agent
->agent
.recv_handler(
2754 &recv_mad_agent
->agent
,
2755 &local
->mad_send_wr
->send_buf
,
2756 &local
->mad_priv
->header
.recv_wc
);
2757 spin_lock_irqsave(&recv_mad_agent
->lock
, flags
);
2758 atomic_dec(&recv_mad_agent
->refcount
);
2759 spin_unlock_irqrestore(&recv_mad_agent
->lock
, flags
);
2762 local_send_completion
:
2764 mad_send_wc
.status
= IB_WC_SUCCESS
;
2765 mad_send_wc
.vendor_err
= 0;
2766 mad_send_wc
.send_buf
= &local
->mad_send_wr
->send_buf
;
2767 if (atomic_read(&mad_agent_priv
->qp_info
->snoop_count
))
2768 snoop_send(mad_agent_priv
->qp_info
,
2769 &local
->mad_send_wr
->send_buf
,
2770 &mad_send_wc
, IB_MAD_SNOOP_SEND_COMPLETIONS
);
2771 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2774 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2775 atomic_dec(&mad_agent_priv
->refcount
);
2777 kfree(local
->mad_priv
);
2780 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2783 static int retry_send(struct ib_mad_send_wr_private
*mad_send_wr
)
2787 if (!mad_send_wr
->retries_left
)
2790 mad_send_wr
->retries_left
--;
2791 mad_send_wr
->send_buf
.retries
++;
2793 mad_send_wr
->timeout
= msecs_to_jiffies(mad_send_wr
->send_buf
.timeout_ms
);
2795 if (ib_mad_kernel_rmpp_agent(&mad_send_wr
->mad_agent_priv
->agent
)) {
2796 ret
= ib_retry_rmpp(mad_send_wr
);
2798 case IB_RMPP_RESULT_UNHANDLED
:
2799 ret
= ib_send_mad(mad_send_wr
);
2801 case IB_RMPP_RESULT_CONSUMED
:
2809 ret
= ib_send_mad(mad_send_wr
);
2812 mad_send_wr
->refcount
++;
2813 list_add_tail(&mad_send_wr
->agent_list
,
2814 &mad_send_wr
->mad_agent_priv
->send_list
);
2819 static void timeout_sends(struct work_struct
*work
)
2821 struct ib_mad_agent_private
*mad_agent_priv
;
2822 struct ib_mad_send_wr_private
*mad_send_wr
;
2823 struct ib_mad_send_wc mad_send_wc
;
2824 unsigned long flags
, delay
;
2826 mad_agent_priv
= container_of(work
, struct ib_mad_agent_private
,
2828 mad_send_wc
.vendor_err
= 0;
2830 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2831 while (!list_empty(&mad_agent_priv
->wait_list
)) {
2832 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2833 struct ib_mad_send_wr_private
,
2836 if (time_after(mad_send_wr
->timeout
, jiffies
)) {
2837 delay
= mad_send_wr
->timeout
- jiffies
;
2838 if ((long)delay
<= 0)
2840 queue_delayed_work(mad_agent_priv
->qp_info
->
2842 &mad_agent_priv
->timed_work
, delay
);
2846 list_del(&mad_send_wr
->agent_list
);
2847 if (mad_send_wr
->status
== IB_WC_SUCCESS
&&
2848 !retry_send(mad_send_wr
))
2851 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2853 if (mad_send_wr
->status
== IB_WC_SUCCESS
)
2854 mad_send_wc
.status
= IB_WC_RESP_TIMEOUT_ERR
;
2856 mad_send_wc
.status
= mad_send_wr
->status
;
2857 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2858 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2861 atomic_dec(&mad_agent_priv
->refcount
);
2862 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2864 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2868 * Allocate receive MADs and post receive WRs for them
2870 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
2871 struct ib_mad_private
*mad
)
2873 unsigned long flags
;
2875 struct ib_mad_private
*mad_priv
;
2876 struct ib_sge sg_list
;
2877 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
2878 struct ib_mad_queue
*recv_queue
= &qp_info
->recv_queue
;
2880 /* Initialize common scatter list fields */
2881 sg_list
.lkey
= qp_info
->port_priv
->pd
->local_dma_lkey
;
2883 /* Initialize common receive WR fields */
2884 recv_wr
.next
= NULL
;
2885 recv_wr
.sg_list
= &sg_list
;
2886 recv_wr
.num_sge
= 1;
2889 /* Allocate and map receive buffer */
2894 mad_priv
= alloc_mad_private(port_mad_size(qp_info
->port_priv
),
2901 sg_list
.length
= mad_priv_dma_size(mad_priv
);
2902 sg_list
.addr
= ib_dma_map_single(qp_info
->port_priv
->device
,
2904 mad_priv_dma_size(mad_priv
),
2906 if (unlikely(ib_dma_mapping_error(qp_info
->port_priv
->device
,
2911 mad_priv
->header
.mapping
= sg_list
.addr
;
2912 mad_priv
->header
.mad_list
.mad_queue
= recv_queue
;
2913 mad_priv
->header
.mad_list
.cqe
.done
= ib_mad_recv_done
;
2914 recv_wr
.wr_cqe
= &mad_priv
->header
.mad_list
.cqe
;
2916 /* Post receive WR */
2917 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2918 post
= (++recv_queue
->count
< recv_queue
->max_active
);
2919 list_add_tail(&mad_priv
->header
.mad_list
.list
, &recv_queue
->list
);
2920 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2921 ret
= ib_post_recv(qp_info
->qp
, &recv_wr
, &bad_recv_wr
);
2923 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2924 list_del(&mad_priv
->header
.mad_list
.list
);
2925 recv_queue
->count
--;
2926 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2927 ib_dma_unmap_single(qp_info
->port_priv
->device
,
2928 mad_priv
->header
.mapping
,
2929 mad_priv_dma_size(mad_priv
),
2932 dev_err(&qp_info
->port_priv
->device
->dev
,
2933 "ib_post_recv failed: %d\n", ret
);
2942 * Return all the posted receive MADs
2944 static void cleanup_recv_queue(struct ib_mad_qp_info
*qp_info
)
2946 struct ib_mad_private_header
*mad_priv_hdr
;
2947 struct ib_mad_private
*recv
;
2948 struct ib_mad_list_head
*mad_list
;
2953 while (!list_empty(&qp_info
->recv_queue
.list
)) {
2955 mad_list
= list_entry(qp_info
->recv_queue
.list
.next
,
2956 struct ib_mad_list_head
, list
);
2957 mad_priv_hdr
= container_of(mad_list
,
2958 struct ib_mad_private_header
,
2960 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
2963 /* Remove from posted receive MAD list */
2964 list_del(&mad_list
->list
);
2966 ib_dma_unmap_single(qp_info
->port_priv
->device
,
2967 recv
->header
.mapping
,
2968 mad_priv_dma_size(recv
),
2973 qp_info
->recv_queue
.count
= 0;
2979 static int ib_mad_port_start(struct ib_mad_port_private
*port_priv
)
2982 struct ib_qp_attr
*attr
;
2986 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2990 ret
= ib_find_pkey(port_priv
->device
, port_priv
->port_num
,
2991 IB_DEFAULT_PKEY_FULL
, &pkey_index
);
2995 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2996 qp
= port_priv
->qp_info
[i
].qp
;
3001 * PKey index for QP1 is irrelevant but
3002 * one is needed for the Reset to Init transition
3004 attr
->qp_state
= IB_QPS_INIT
;
3005 attr
->pkey_index
= pkey_index
;
3006 attr
->qkey
= (qp
->qp_num
== 0) ? 0 : IB_QP1_QKEY
;
3007 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
|
3008 IB_QP_PKEY_INDEX
| IB_QP_QKEY
);
3010 dev_err(&port_priv
->device
->dev
,
3011 "Couldn't change QP%d state to INIT: %d\n",
3016 attr
->qp_state
= IB_QPS_RTR
;
3017 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
);
3019 dev_err(&port_priv
->device
->dev
,
3020 "Couldn't change QP%d state to RTR: %d\n",
3025 attr
->qp_state
= IB_QPS_RTS
;
3026 attr
->sq_psn
= IB_MAD_SEND_Q_PSN
;
3027 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
3029 dev_err(&port_priv
->device
->dev
,
3030 "Couldn't change QP%d state to RTS: %d\n",
3036 ret
= ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
3038 dev_err(&port_priv
->device
->dev
,
3039 "Failed to request completion notification: %d\n",
3044 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
3045 if (!port_priv
->qp_info
[i
].qp
)
3048 ret
= ib_mad_post_receive_mads(&port_priv
->qp_info
[i
], NULL
);
3050 dev_err(&port_priv
->device
->dev
,
3051 "Couldn't post receive WRs\n");
3060 static void qp_event_handler(struct ib_event
*event
, void *qp_context
)
3062 struct ib_mad_qp_info
*qp_info
= qp_context
;
3064 /* It's worse than that! He's dead, Jim! */
3065 dev_err(&qp_info
->port_priv
->device
->dev
,
3066 "Fatal error (%d) on MAD QP (%d)\n",
3067 event
->event
, qp_info
->qp
->qp_num
);
3070 static void init_mad_queue(struct ib_mad_qp_info
*qp_info
,
3071 struct ib_mad_queue
*mad_queue
)
3073 mad_queue
->qp_info
= qp_info
;
3074 mad_queue
->count
= 0;
3075 spin_lock_init(&mad_queue
->lock
);
3076 INIT_LIST_HEAD(&mad_queue
->list
);
3079 static void init_mad_qp(struct ib_mad_port_private
*port_priv
,
3080 struct ib_mad_qp_info
*qp_info
)
3082 qp_info
->port_priv
= port_priv
;
3083 init_mad_queue(qp_info
, &qp_info
->send_queue
);
3084 init_mad_queue(qp_info
, &qp_info
->recv_queue
);
3085 INIT_LIST_HEAD(&qp_info
->overflow_list
);
3086 spin_lock_init(&qp_info
->snoop_lock
);
3087 qp_info
->snoop_table
= NULL
;
3088 qp_info
->snoop_table_size
= 0;
3089 atomic_set(&qp_info
->snoop_count
, 0);
3092 static int create_mad_qp(struct ib_mad_qp_info
*qp_info
,
3093 enum ib_qp_type qp_type
)
3095 struct ib_qp_init_attr qp_init_attr
;
3098 memset(&qp_init_attr
, 0, sizeof qp_init_attr
);
3099 qp_init_attr
.send_cq
= qp_info
->port_priv
->cq
;
3100 qp_init_attr
.recv_cq
= qp_info
->port_priv
->cq
;
3101 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
3102 qp_init_attr
.cap
.max_send_wr
= mad_sendq_size
;
3103 qp_init_attr
.cap
.max_recv_wr
= mad_recvq_size
;
3104 qp_init_attr
.cap
.max_send_sge
= IB_MAD_SEND_REQ_MAX_SG
;
3105 qp_init_attr
.cap
.max_recv_sge
= IB_MAD_RECV_REQ_MAX_SG
;
3106 qp_init_attr
.qp_type
= qp_type
;
3107 qp_init_attr
.port_num
= qp_info
->port_priv
->port_num
;
3108 qp_init_attr
.qp_context
= qp_info
;
3109 qp_init_attr
.event_handler
= qp_event_handler
;
3110 qp_info
->qp
= ib_create_qp(qp_info
->port_priv
->pd
, &qp_init_attr
);
3111 if (IS_ERR(qp_info
->qp
)) {
3112 dev_err(&qp_info
->port_priv
->device
->dev
,
3113 "Couldn't create ib_mad QP%d\n",
3114 get_spl_qp_index(qp_type
));
3115 ret
= PTR_ERR(qp_info
->qp
);
3118 /* Use minimum queue sizes unless the CQ is resized */
3119 qp_info
->send_queue
.max_active
= mad_sendq_size
;
3120 qp_info
->recv_queue
.max_active
= mad_recvq_size
;
3127 static void destroy_mad_qp(struct ib_mad_qp_info
*qp_info
)
3132 ib_destroy_qp(qp_info
->qp
);
3133 kfree(qp_info
->snoop_table
);
3138 * Create the QP, PD, MR, and CQ if needed
3140 static int ib_mad_port_open(struct ib_device
*device
,
3144 struct ib_mad_port_private
*port_priv
;
3145 unsigned long flags
;
3146 char name
[sizeof "ib_mad123"];
3149 if (WARN_ON(rdma_max_mad_size(device
, port_num
) < IB_MGMT_MAD_SIZE
))
3152 if (WARN_ON(rdma_cap_opa_mad(device
, port_num
) &&
3153 rdma_max_mad_size(device
, port_num
) < OPA_MGMT_MAD_SIZE
))
3156 /* Create new device info */
3157 port_priv
= kzalloc(sizeof *port_priv
, GFP_KERNEL
);
3161 port_priv
->device
= device
;
3162 port_priv
->port_num
= port_num
;
3163 spin_lock_init(&port_priv
->reg_lock
);
3164 INIT_LIST_HEAD(&port_priv
->agent_list
);
3165 init_mad_qp(port_priv
, &port_priv
->qp_info
[0]);
3166 init_mad_qp(port_priv
, &port_priv
->qp_info
[1]);
3168 cq_size
= mad_sendq_size
+ mad_recvq_size
;
3169 has_smi
= rdma_cap_ib_smi(device
, port_num
);
3173 port_priv
->cq
= ib_alloc_cq(port_priv
->device
, port_priv
, cq_size
, 0,
3175 if (IS_ERR(port_priv
->cq
)) {
3176 dev_err(&device
->dev
, "Couldn't create ib_mad CQ\n");
3177 ret
= PTR_ERR(port_priv
->cq
);
3181 port_priv
->pd
= ib_alloc_pd(device
, 0);
3182 if (IS_ERR(port_priv
->pd
)) {
3183 dev_err(&device
->dev
, "Couldn't create ib_mad PD\n");
3184 ret
= PTR_ERR(port_priv
->pd
);
3189 ret
= create_mad_qp(&port_priv
->qp_info
[0], IB_QPT_SMI
);
3193 ret
= create_mad_qp(&port_priv
->qp_info
[1], IB_QPT_GSI
);
3197 snprintf(name
, sizeof name
, "ib_mad%d", port_num
);
3198 port_priv
->wq
= alloc_ordered_workqueue(name
, WQ_MEM_RECLAIM
);
3199 if (!port_priv
->wq
) {
3204 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3205 list_add_tail(&port_priv
->port_list
, &ib_mad_port_list
);
3206 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3208 ret
= ib_mad_port_start(port_priv
);
3210 dev_err(&device
->dev
, "Couldn't start port\n");
3217 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3218 list_del_init(&port_priv
->port_list
);
3219 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3221 destroy_workqueue(port_priv
->wq
);
3223 destroy_mad_qp(&port_priv
->qp_info
[1]);
3225 destroy_mad_qp(&port_priv
->qp_info
[0]);
3227 ib_dealloc_pd(port_priv
->pd
);
3229 ib_free_cq(port_priv
->cq
);
3230 cleanup_recv_queue(&port_priv
->qp_info
[1]);
3231 cleanup_recv_queue(&port_priv
->qp_info
[0]);
3240 * If there are no classes using the port, free the port
3241 * resources (CQ, MR, PD, QP) and remove the port's info structure
3243 static int ib_mad_port_close(struct ib_device
*device
, int port_num
)
3245 struct ib_mad_port_private
*port_priv
;
3246 unsigned long flags
;
3248 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3249 port_priv
= __ib_get_mad_port(device
, port_num
);
3250 if (port_priv
== NULL
) {
3251 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3252 dev_err(&device
->dev
, "Port %d not found\n", port_num
);
3255 list_del_init(&port_priv
->port_list
);
3256 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3258 destroy_workqueue(port_priv
->wq
);
3259 destroy_mad_qp(&port_priv
->qp_info
[1]);
3260 destroy_mad_qp(&port_priv
->qp_info
[0]);
3261 ib_dealloc_pd(port_priv
->pd
);
3262 ib_free_cq(port_priv
->cq
);
3263 cleanup_recv_queue(&port_priv
->qp_info
[1]);
3264 cleanup_recv_queue(&port_priv
->qp_info
[0]);
3265 /* XXX: Handle deallocation of MAD registration tables */
3272 static void ib_mad_init_device(struct ib_device
*device
)
3276 start
= rdma_start_port(device
);
3278 for (i
= start
; i
<= rdma_end_port(device
); i
++) {
3279 if (!rdma_cap_ib_mad(device
, i
))
3282 if (ib_mad_port_open(device
, i
)) {
3283 dev_err(&device
->dev
, "Couldn't open port %d\n", i
);
3286 if (ib_agent_port_open(device
, i
)) {
3287 dev_err(&device
->dev
,
3288 "Couldn't open port %d for agents\n", i
);
3295 if (ib_mad_port_close(device
, i
))
3296 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3299 while (--i
>= start
) {
3300 if (!rdma_cap_ib_mad(device
, i
))
3303 if (ib_agent_port_close(device
, i
))
3304 dev_err(&device
->dev
,
3305 "Couldn't close port %d for agents\n", i
);
3306 if (ib_mad_port_close(device
, i
))
3307 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3311 static void ib_mad_remove_device(struct ib_device
*device
, void *client_data
)
3315 for (i
= rdma_start_port(device
); i
<= rdma_end_port(device
); i
++) {
3316 if (!rdma_cap_ib_mad(device
, i
))
3319 if (ib_agent_port_close(device
, i
))
3320 dev_err(&device
->dev
,
3321 "Couldn't close port %d for agents\n", i
);
3322 if (ib_mad_port_close(device
, i
))
3323 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3327 static struct ib_client mad_client
= {
3329 .add
= ib_mad_init_device
,
3330 .remove
= ib_mad_remove_device
3333 int ib_mad_init(void)
3335 mad_recvq_size
= min(mad_recvq_size
, IB_MAD_QP_MAX_SIZE
);
3336 mad_recvq_size
= max(mad_recvq_size
, IB_MAD_QP_MIN_SIZE
);
3338 mad_sendq_size
= min(mad_sendq_size
, IB_MAD_QP_MAX_SIZE
);
3339 mad_sendq_size
= max(mad_sendq_size
, IB_MAD_QP_MIN_SIZE
);
3341 INIT_LIST_HEAD(&ib_mad_port_list
);
3343 if (ib_register_client(&mad_client
)) {
3344 pr_err("Couldn't register ib_mad client\n");
3351 void ib_mad_cleanup(void)
3353 ib_unregister_client(&mad_client
);