2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/module.h>
42 #include <rdma/ib_cache.h>
49 MODULE_LICENSE("Dual BSD/GPL");
50 MODULE_DESCRIPTION("kernel IB MAD API");
51 MODULE_AUTHOR("Hal Rosenstock");
52 MODULE_AUTHOR("Sean Hefty");
54 static int mad_sendq_size
= IB_MAD_QP_SEND_SIZE
;
55 static int mad_recvq_size
= IB_MAD_QP_RECV_SIZE
;
57 module_param_named(send_queue_size
, mad_sendq_size
, int, 0444);
58 MODULE_PARM_DESC(send_queue_size
, "Size of send queue in number of work requests");
59 module_param_named(recv_queue_size
, mad_recvq_size
, int, 0444);
60 MODULE_PARM_DESC(recv_queue_size
, "Size of receive queue in number of work requests");
62 static struct list_head ib_mad_port_list
;
63 static u32 ib_mad_client_id
= 0;
66 static DEFINE_SPINLOCK(ib_mad_port_list_lock
);
68 /* Forward declarations */
69 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
70 struct ib_mad_reg_req
*mad_reg_req
);
71 static void remove_mad_reg_req(struct ib_mad_agent_private
*priv
);
72 static struct ib_mad_agent_private
*find_mad_agent(
73 struct ib_mad_port_private
*port_priv
,
74 const struct ib_mad_hdr
*mad
);
75 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
76 struct ib_mad_private
*mad
);
77 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
);
78 static void timeout_sends(struct work_struct
*work
);
79 static void local_completions(struct work_struct
*work
);
80 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
81 struct ib_mad_agent_private
*agent_priv
,
83 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
84 struct ib_mad_agent_private
*agent_priv
);
87 * Returns a ib_mad_port_private structure or NULL for a device/port
88 * Assumes ib_mad_port_list_lock is being held
90 static inline struct ib_mad_port_private
*
91 __ib_get_mad_port(struct ib_device
*device
, int port_num
)
93 struct ib_mad_port_private
*entry
;
95 list_for_each_entry(entry
, &ib_mad_port_list
, port_list
) {
96 if (entry
->device
== device
&& entry
->port_num
== port_num
)
103 * Wrapper function to return a ib_mad_port_private structure or NULL
106 static inline struct ib_mad_port_private
*
107 ib_get_mad_port(struct ib_device
*device
, int port_num
)
109 struct ib_mad_port_private
*entry
;
112 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
113 entry
= __ib_get_mad_port(device
, port_num
);
114 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
119 static inline u8
convert_mgmt_class(u8 mgmt_class
)
121 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
122 return mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
?
126 static int get_spl_qp_index(enum ib_qp_type qp_type
)
139 static int vendor_class_index(u8 mgmt_class
)
141 return mgmt_class
- IB_MGMT_CLASS_VENDOR_RANGE2_START
;
144 static int is_vendor_class(u8 mgmt_class
)
146 if ((mgmt_class
< IB_MGMT_CLASS_VENDOR_RANGE2_START
) ||
147 (mgmt_class
> IB_MGMT_CLASS_VENDOR_RANGE2_END
))
152 static int is_vendor_oui(char *oui
)
154 if (oui
[0] || oui
[1] || oui
[2])
159 static int is_vendor_method_in_use(
160 struct ib_mad_mgmt_vendor_class
*vendor_class
,
161 struct ib_mad_reg_req
*mad_reg_req
)
163 struct ib_mad_mgmt_method_table
*method
;
166 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
167 if (!memcmp(vendor_class
->oui
[i
], mad_reg_req
->oui
, 3)) {
168 method
= vendor_class
->method_table
[i
];
170 if (method_in_use(&method
, mad_reg_req
))
180 int ib_response_mad(const struct ib_mad_hdr
*hdr
)
182 return ((hdr
->method
& IB_MGMT_METHOD_RESP
) ||
183 (hdr
->method
== IB_MGMT_METHOD_TRAP_REPRESS
) ||
184 ((hdr
->mgmt_class
== IB_MGMT_CLASS_BM
) &&
185 (hdr
->attr_mod
& IB_BM_ATTR_MOD_RESP
)));
187 EXPORT_SYMBOL(ib_response_mad
);
190 * ib_register_mad_agent - Register to send/receive MADs
192 struct ib_mad_agent
*ib_register_mad_agent(struct ib_device
*device
,
194 enum ib_qp_type qp_type
,
195 struct ib_mad_reg_req
*mad_reg_req
,
197 ib_mad_send_handler send_handler
,
198 ib_mad_recv_handler recv_handler
,
200 u32 registration_flags
)
202 struct ib_mad_port_private
*port_priv
;
203 struct ib_mad_agent
*ret
= ERR_PTR(-EINVAL
);
204 struct ib_mad_agent_private
*mad_agent_priv
;
205 struct ib_mad_reg_req
*reg_req
= NULL
;
206 struct ib_mad_mgmt_class_table
*class;
207 struct ib_mad_mgmt_vendor_class_table
*vendor
;
208 struct ib_mad_mgmt_vendor_class
*vendor_class
;
209 struct ib_mad_mgmt_method_table
*method
;
212 u8 mgmt_class
, vclass
;
214 /* Validate parameters */
215 qpn
= get_spl_qp_index(qp_type
);
217 dev_notice(&device
->dev
,
218 "ib_register_mad_agent: invalid QP Type %d\n",
223 if (rmpp_version
&& rmpp_version
!= IB_MGMT_RMPP_VERSION
) {
224 dev_notice(&device
->dev
,
225 "ib_register_mad_agent: invalid RMPP Version %u\n",
230 /* Validate MAD registration request if supplied */
232 if (mad_reg_req
->mgmt_class_version
>= MAX_MGMT_VERSION
) {
233 dev_notice(&device
->dev
,
234 "ib_register_mad_agent: invalid Class Version %u\n",
235 mad_reg_req
->mgmt_class_version
);
239 dev_notice(&device
->dev
,
240 "ib_register_mad_agent: no recv_handler\n");
243 if (mad_reg_req
->mgmt_class
>= MAX_MGMT_CLASS
) {
245 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
246 * one in this range currently allowed
248 if (mad_reg_req
->mgmt_class
!=
249 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
250 dev_notice(&device
->dev
,
251 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
252 mad_reg_req
->mgmt_class
);
255 } else if (mad_reg_req
->mgmt_class
== 0) {
257 * Class 0 is reserved in IBA and is used for
258 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
260 dev_notice(&device
->dev
,
261 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
263 } else if (is_vendor_class(mad_reg_req
->mgmt_class
)) {
265 * If class is in "new" vendor range,
266 * ensure supplied OUI is not zero
268 if (!is_vendor_oui(mad_reg_req
->oui
)) {
269 dev_notice(&device
->dev
,
270 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
271 mad_reg_req
->mgmt_class
);
275 /* Make sure class supplied is consistent with RMPP */
276 if (!ib_is_mad_class_rmpp(mad_reg_req
->mgmt_class
)) {
278 dev_notice(&device
->dev
,
279 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
280 mad_reg_req
->mgmt_class
);
285 /* Make sure class supplied is consistent with QP type */
286 if (qp_type
== IB_QPT_SMI
) {
287 if ((mad_reg_req
->mgmt_class
!=
288 IB_MGMT_CLASS_SUBN_LID_ROUTED
) &&
289 (mad_reg_req
->mgmt_class
!=
290 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
291 dev_notice(&device
->dev
,
292 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
293 mad_reg_req
->mgmt_class
);
297 if ((mad_reg_req
->mgmt_class
==
298 IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
299 (mad_reg_req
->mgmt_class
==
300 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
301 dev_notice(&device
->dev
,
302 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
303 mad_reg_req
->mgmt_class
);
308 /* No registration request supplied */
311 if (registration_flags
& IB_MAD_USER_RMPP
)
315 /* Validate device and port */
316 port_priv
= ib_get_mad_port(device
, port_num
);
318 dev_notice(&device
->dev
, "ib_register_mad_agent: Invalid port\n");
319 ret
= ERR_PTR(-ENODEV
);
323 /* Verify the QP requested is supported. For example, Ethernet devices
324 * will not have QP0 */
325 if (!port_priv
->qp_info
[qpn
].qp
) {
326 dev_notice(&device
->dev
,
327 "ib_register_mad_agent: QP %d not supported\n", qpn
);
328 ret
= ERR_PTR(-EPROTONOSUPPORT
);
332 /* Allocate structures */
333 mad_agent_priv
= kzalloc(sizeof *mad_agent_priv
, GFP_KERNEL
);
334 if (!mad_agent_priv
) {
335 ret
= ERR_PTR(-ENOMEM
);
339 mad_agent_priv
->agent
.mr
= ib_get_dma_mr(port_priv
->qp_info
[qpn
].qp
->pd
,
340 IB_ACCESS_LOCAL_WRITE
);
341 if (IS_ERR(mad_agent_priv
->agent
.mr
)) {
342 ret
= ERR_PTR(-ENOMEM
);
347 reg_req
= kmemdup(mad_reg_req
, sizeof *reg_req
, GFP_KERNEL
);
349 ret
= ERR_PTR(-ENOMEM
);
354 /* Now, fill in the various structures */
355 mad_agent_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
356 mad_agent_priv
->reg_req
= reg_req
;
357 mad_agent_priv
->agent
.rmpp_version
= rmpp_version
;
358 mad_agent_priv
->agent
.device
= device
;
359 mad_agent_priv
->agent
.recv_handler
= recv_handler
;
360 mad_agent_priv
->agent
.send_handler
= send_handler
;
361 mad_agent_priv
->agent
.context
= context
;
362 mad_agent_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
363 mad_agent_priv
->agent
.port_num
= port_num
;
364 mad_agent_priv
->agent
.flags
= registration_flags
;
365 spin_lock_init(&mad_agent_priv
->lock
);
366 INIT_LIST_HEAD(&mad_agent_priv
->send_list
);
367 INIT_LIST_HEAD(&mad_agent_priv
->wait_list
);
368 INIT_LIST_HEAD(&mad_agent_priv
->done_list
);
369 INIT_LIST_HEAD(&mad_agent_priv
->rmpp_list
);
370 INIT_DELAYED_WORK(&mad_agent_priv
->timed_work
, timeout_sends
);
371 INIT_LIST_HEAD(&mad_agent_priv
->local_list
);
372 INIT_WORK(&mad_agent_priv
->local_work
, local_completions
);
373 atomic_set(&mad_agent_priv
->refcount
, 1);
374 init_completion(&mad_agent_priv
->comp
);
376 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
377 mad_agent_priv
->agent
.hi_tid
= ++ib_mad_client_id
;
380 * Make sure MAD registration (if supplied)
381 * is non overlapping with any existing ones
384 mgmt_class
= convert_mgmt_class(mad_reg_req
->mgmt_class
);
385 if (!is_vendor_class(mgmt_class
)) {
386 class = port_priv
->version
[mad_reg_req
->
387 mgmt_class_version
].class;
389 method
= class->method_table
[mgmt_class
];
391 if (method_in_use(&method
,
396 ret2
= add_nonoui_reg_req(mad_reg_req
, mad_agent_priv
,
399 /* "New" vendor class range */
400 vendor
= port_priv
->version
[mad_reg_req
->
401 mgmt_class_version
].vendor
;
403 vclass
= vendor_class_index(mgmt_class
);
404 vendor_class
= vendor
->vendor_class
[vclass
];
406 if (is_vendor_method_in_use(
412 ret2
= add_oui_reg_req(mad_reg_req
, mad_agent_priv
);
420 /* Add mad agent into port's agent list */
421 list_add_tail(&mad_agent_priv
->agent_list
, &port_priv
->agent_list
);
422 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
424 return &mad_agent_priv
->agent
;
427 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
430 ib_dereg_mr(mad_agent_priv
->agent
.mr
);
432 kfree(mad_agent_priv
);
436 EXPORT_SYMBOL(ib_register_mad_agent
);
438 static inline int is_snooping_sends(int mad_snoop_flags
)
440 return (mad_snoop_flags
&
441 (/*IB_MAD_SNOOP_POSTED_SENDS |
442 IB_MAD_SNOOP_RMPP_SENDS |*/
443 IB_MAD_SNOOP_SEND_COMPLETIONS
/*|
444 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
447 static inline int is_snooping_recvs(int mad_snoop_flags
)
449 return (mad_snoop_flags
&
450 (IB_MAD_SNOOP_RECVS
/*|
451 IB_MAD_SNOOP_RMPP_RECVS*/));
454 static int register_snoop_agent(struct ib_mad_qp_info
*qp_info
,
455 struct ib_mad_snoop_private
*mad_snoop_priv
)
457 struct ib_mad_snoop_private
**new_snoop_table
;
461 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
462 /* Check for empty slot in array. */
463 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++)
464 if (!qp_info
->snoop_table
[i
])
467 if (i
== qp_info
->snoop_table_size
) {
469 new_snoop_table
= krealloc(qp_info
->snoop_table
,
470 sizeof mad_snoop_priv
*
471 (qp_info
->snoop_table_size
+ 1),
473 if (!new_snoop_table
) {
478 qp_info
->snoop_table
= new_snoop_table
;
479 qp_info
->snoop_table_size
++;
481 qp_info
->snoop_table
[i
] = mad_snoop_priv
;
482 atomic_inc(&qp_info
->snoop_count
);
484 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
488 struct ib_mad_agent
*ib_register_mad_snoop(struct ib_device
*device
,
490 enum ib_qp_type qp_type
,
492 ib_mad_snoop_handler snoop_handler
,
493 ib_mad_recv_handler recv_handler
,
496 struct ib_mad_port_private
*port_priv
;
497 struct ib_mad_agent
*ret
;
498 struct ib_mad_snoop_private
*mad_snoop_priv
;
501 /* Validate parameters */
502 if ((is_snooping_sends(mad_snoop_flags
) && !snoop_handler
) ||
503 (is_snooping_recvs(mad_snoop_flags
) && !recv_handler
)) {
504 ret
= ERR_PTR(-EINVAL
);
507 qpn
= get_spl_qp_index(qp_type
);
509 ret
= ERR_PTR(-EINVAL
);
512 port_priv
= ib_get_mad_port(device
, port_num
);
514 ret
= ERR_PTR(-ENODEV
);
517 /* Allocate structures */
518 mad_snoop_priv
= kzalloc(sizeof *mad_snoop_priv
, GFP_KERNEL
);
519 if (!mad_snoop_priv
) {
520 ret
= ERR_PTR(-ENOMEM
);
524 /* Now, fill in the various structures */
525 mad_snoop_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
526 mad_snoop_priv
->agent
.device
= device
;
527 mad_snoop_priv
->agent
.recv_handler
= recv_handler
;
528 mad_snoop_priv
->agent
.snoop_handler
= snoop_handler
;
529 mad_snoop_priv
->agent
.context
= context
;
530 mad_snoop_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
531 mad_snoop_priv
->agent
.port_num
= port_num
;
532 mad_snoop_priv
->mad_snoop_flags
= mad_snoop_flags
;
533 init_completion(&mad_snoop_priv
->comp
);
534 mad_snoop_priv
->snoop_index
= register_snoop_agent(
535 &port_priv
->qp_info
[qpn
],
537 if (mad_snoop_priv
->snoop_index
< 0) {
538 ret
= ERR_PTR(mad_snoop_priv
->snoop_index
);
542 atomic_set(&mad_snoop_priv
->refcount
, 1);
543 return &mad_snoop_priv
->agent
;
546 kfree(mad_snoop_priv
);
550 EXPORT_SYMBOL(ib_register_mad_snoop
);
552 static inline void deref_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
554 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
555 complete(&mad_agent_priv
->comp
);
558 static inline void deref_snoop_agent(struct ib_mad_snoop_private
*mad_snoop_priv
)
560 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
561 complete(&mad_snoop_priv
->comp
);
564 static void unregister_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
566 struct ib_mad_port_private
*port_priv
;
569 /* Note that we could still be handling received MADs */
572 * Canceling all sends results in dropping received response
573 * MADs, preventing us from queuing additional work
575 cancel_mads(mad_agent_priv
);
576 port_priv
= mad_agent_priv
->qp_info
->port_priv
;
577 cancel_delayed_work(&mad_agent_priv
->timed_work
);
579 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
580 remove_mad_reg_req(mad_agent_priv
);
581 list_del(&mad_agent_priv
->agent_list
);
582 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
584 flush_workqueue(port_priv
->wq
);
585 ib_cancel_rmpp_recvs(mad_agent_priv
);
587 deref_mad_agent(mad_agent_priv
);
588 wait_for_completion(&mad_agent_priv
->comp
);
590 kfree(mad_agent_priv
->reg_req
);
591 ib_dereg_mr(mad_agent_priv
->agent
.mr
);
592 kfree(mad_agent_priv
);
595 static void unregister_mad_snoop(struct ib_mad_snoop_private
*mad_snoop_priv
)
597 struct ib_mad_qp_info
*qp_info
;
600 qp_info
= mad_snoop_priv
->qp_info
;
601 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
602 qp_info
->snoop_table
[mad_snoop_priv
->snoop_index
] = NULL
;
603 atomic_dec(&qp_info
->snoop_count
);
604 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
606 deref_snoop_agent(mad_snoop_priv
);
607 wait_for_completion(&mad_snoop_priv
->comp
);
609 kfree(mad_snoop_priv
);
613 * ib_unregister_mad_agent - Unregisters a client from using MAD services
615 int ib_unregister_mad_agent(struct ib_mad_agent
*mad_agent
)
617 struct ib_mad_agent_private
*mad_agent_priv
;
618 struct ib_mad_snoop_private
*mad_snoop_priv
;
620 /* If the TID is zero, the agent can only snoop. */
621 if (mad_agent
->hi_tid
) {
622 mad_agent_priv
= container_of(mad_agent
,
623 struct ib_mad_agent_private
,
625 unregister_mad_agent(mad_agent_priv
);
627 mad_snoop_priv
= container_of(mad_agent
,
628 struct ib_mad_snoop_private
,
630 unregister_mad_snoop(mad_snoop_priv
);
634 EXPORT_SYMBOL(ib_unregister_mad_agent
);
636 static void dequeue_mad(struct ib_mad_list_head
*mad_list
)
638 struct ib_mad_queue
*mad_queue
;
641 BUG_ON(!mad_list
->mad_queue
);
642 mad_queue
= mad_list
->mad_queue
;
643 spin_lock_irqsave(&mad_queue
->lock
, flags
);
644 list_del(&mad_list
->list
);
646 spin_unlock_irqrestore(&mad_queue
->lock
, flags
);
649 static void snoop_send(struct ib_mad_qp_info
*qp_info
,
650 struct ib_mad_send_buf
*send_buf
,
651 struct ib_mad_send_wc
*mad_send_wc
,
654 struct ib_mad_snoop_private
*mad_snoop_priv
;
658 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
659 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
660 mad_snoop_priv
= qp_info
->snoop_table
[i
];
661 if (!mad_snoop_priv
||
662 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
665 atomic_inc(&mad_snoop_priv
->refcount
);
666 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
667 mad_snoop_priv
->agent
.snoop_handler(&mad_snoop_priv
->agent
,
668 send_buf
, mad_send_wc
);
669 deref_snoop_agent(mad_snoop_priv
);
670 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
672 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
675 static void snoop_recv(struct ib_mad_qp_info
*qp_info
,
676 struct ib_mad_recv_wc
*mad_recv_wc
,
679 struct ib_mad_snoop_private
*mad_snoop_priv
;
683 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
684 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
685 mad_snoop_priv
= qp_info
->snoop_table
[i
];
686 if (!mad_snoop_priv
||
687 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
690 atomic_inc(&mad_snoop_priv
->refcount
);
691 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
692 mad_snoop_priv
->agent
.recv_handler(&mad_snoop_priv
->agent
,
694 deref_snoop_agent(mad_snoop_priv
);
695 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
697 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
700 static void build_smp_wc(struct ib_qp
*qp
,
701 u64 wr_id
, u16 slid
, u16 pkey_index
, u8 port_num
,
704 memset(wc
, 0, sizeof *wc
);
706 wc
->status
= IB_WC_SUCCESS
;
707 wc
->opcode
= IB_WC_RECV
;
708 wc
->pkey_index
= pkey_index
;
709 wc
->byte_len
= sizeof(struct ib_mad
) + sizeof(struct ib_grh
);
714 wc
->dlid_path_bits
= 0;
715 wc
->port_num
= port_num
;
718 static size_t mad_priv_size(const struct ib_mad_private
*mp
)
720 return sizeof(struct ib_mad_private
) + mp
->mad_size
;
723 static struct ib_mad_private
*alloc_mad_private(size_t mad_size
, gfp_t flags
)
725 size_t size
= sizeof(struct ib_mad_private
) + mad_size
;
726 struct ib_mad_private
*ret
= kzalloc(size
, flags
);
729 ret
->mad_size
= mad_size
;
734 static size_t port_mad_size(const struct ib_mad_port_private
*port_priv
)
736 return rdma_max_mad_size(port_priv
->device
, port_priv
->port_num
);
739 static size_t mad_priv_dma_size(const struct ib_mad_private
*mp
)
741 return sizeof(struct ib_grh
) + mp
->mad_size
;
745 * Return 0 if SMP is to be sent
746 * Return 1 if SMP was consumed locally (whether or not solicited)
747 * Return < 0 if error
749 static int handle_outgoing_dr_smp(struct ib_mad_agent_private
*mad_agent_priv
,
750 struct ib_mad_send_wr_private
*mad_send_wr
)
753 struct ib_smp
*smp
= mad_send_wr
->send_buf
.mad
;
755 struct ib_mad_local_private
*local
;
756 struct ib_mad_private
*mad_priv
;
757 struct ib_mad_port_private
*port_priv
;
758 struct ib_mad_agent_private
*recv_mad_agent
= NULL
;
759 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
762 struct ib_send_wr
*send_wr
= &mad_send_wr
->send_wr
;
763 size_t mad_size
= port_mad_size(mad_agent_priv
->qp_info
->port_priv
);
765 if (device
->node_type
== RDMA_NODE_IB_SWITCH
&&
766 smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
767 port_num
= send_wr
->wr
.ud
.port_num
;
769 port_num
= mad_agent_priv
->agent
.port_num
;
772 * Directed route handling starts if the initial LID routed part of
773 * a request or the ending LID routed part of a response is empty.
774 * If we are at the start of the LID routed part, don't update the
775 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
777 if ((ib_get_smp_direction(smp
) ? smp
->dr_dlid
: smp
->dr_slid
) ==
779 smi_handle_dr_smp_send(smp
, device
->node_type
, port_num
) ==
782 dev_err(&device
->dev
, "Invalid directed route\n");
786 /* Check to post send on QP or process locally */
787 if (smi_check_local_smp(smp
, device
) == IB_SMI_DISCARD
&&
788 smi_check_local_returning_smp(smp
, device
) == IB_SMI_DISCARD
)
791 local
= kmalloc(sizeof *local
, GFP_ATOMIC
);
794 dev_err(&device
->dev
, "No memory for ib_mad_local_private\n");
797 local
->mad_priv
= NULL
;
798 local
->recv_mad_agent
= NULL
;
799 mad_priv
= alloc_mad_private(mad_size
, GFP_ATOMIC
);
802 dev_err(&device
->dev
, "No memory for local response MAD\n");
807 build_smp_wc(mad_agent_priv
->agent
.qp
,
808 send_wr
->wr_id
, be16_to_cpu(smp
->dr_slid
),
809 send_wr
->wr
.ud
.pkey_index
,
810 send_wr
->wr
.ud
.port_num
, &mad_wc
);
812 /* No GRH for DR SMP */
813 ret
= device
->process_mad(device
, 0, port_num
, &mad_wc
, NULL
,
814 (const struct ib_mad
*)smp
,
815 (struct ib_mad
*)mad_priv
->mad
);
818 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
:
819 if (ib_response_mad((const struct ib_mad_hdr
*)mad_priv
->mad
) &&
820 mad_agent_priv
->agent
.recv_handler
) {
821 local
->mad_priv
= mad_priv
;
822 local
->recv_mad_agent
= mad_agent_priv
;
824 * Reference MAD agent until receive
825 * side of local completion handled
827 atomic_inc(&mad_agent_priv
->refcount
);
831 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
:
834 case IB_MAD_RESULT_SUCCESS
:
835 /* Treat like an incoming receive MAD */
836 port_priv
= ib_get_mad_port(mad_agent_priv
->agent
.device
,
837 mad_agent_priv
->agent
.port_num
);
839 memcpy(mad_priv
->mad
, smp
, mad_priv
->mad_size
);
840 recv_mad_agent
= find_mad_agent(port_priv
,
841 (const struct ib_mad_hdr
*)mad_priv
->mad
);
843 if (!port_priv
|| !recv_mad_agent
) {
845 * No receiving agent so drop packet and
846 * generate send completion.
851 local
->mad_priv
= mad_priv
;
852 local
->recv_mad_agent
= recv_mad_agent
;
861 local
->mad_send_wr
= mad_send_wr
;
862 /* Reference MAD agent until send side of local completion handled */
863 atomic_inc(&mad_agent_priv
->refcount
);
864 /* Queue local completion to local list */
865 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
866 list_add_tail(&local
->completion_list
, &mad_agent_priv
->local_list
);
867 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
868 queue_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
869 &mad_agent_priv
->local_work
);
875 static int get_pad_size(int hdr_len
, int data_len
)
879 seg_size
= sizeof(struct ib_mad
) - hdr_len
;
880 if (data_len
&& seg_size
) {
881 pad
= seg_size
- data_len
% seg_size
;
882 return pad
== seg_size
? 0 : pad
;
887 static void free_send_rmpp_list(struct ib_mad_send_wr_private
*mad_send_wr
)
889 struct ib_rmpp_segment
*s
, *t
;
891 list_for_each_entry_safe(s
, t
, &mad_send_wr
->rmpp_list
, list
) {
897 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private
*send_wr
,
900 struct ib_mad_send_buf
*send_buf
= &send_wr
->send_buf
;
901 struct ib_rmpp_mad
*rmpp_mad
= send_buf
->mad
;
902 struct ib_rmpp_segment
*seg
= NULL
;
903 int left
, seg_size
, pad
;
905 send_buf
->seg_size
= sizeof(struct ib_mad
) - send_buf
->hdr_len
;
906 seg_size
= send_buf
->seg_size
;
909 /* Allocate data segments. */
910 for (left
= send_buf
->data_len
+ pad
; left
> 0; left
-= seg_size
) {
911 seg
= kmalloc(sizeof (*seg
) + seg_size
, gfp_mask
);
913 dev_err(&send_buf
->mad_agent
->device
->dev
,
914 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
915 sizeof (*seg
) + seg_size
, gfp_mask
);
916 free_send_rmpp_list(send_wr
);
919 seg
->num
= ++send_buf
->seg_count
;
920 list_add_tail(&seg
->list
, &send_wr
->rmpp_list
);
923 /* Zero any padding */
925 memset(seg
->data
+ seg_size
- pad
, 0, pad
);
927 rmpp_mad
->rmpp_hdr
.rmpp_version
= send_wr
->mad_agent_priv
->
929 rmpp_mad
->rmpp_hdr
.rmpp_type
= IB_MGMT_RMPP_TYPE_DATA
;
930 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
932 send_wr
->cur_seg
= container_of(send_wr
->rmpp_list
.next
,
933 struct ib_rmpp_segment
, list
);
934 send_wr
->last_ack_seg
= send_wr
->cur_seg
;
938 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent
*agent
)
940 return agent
->rmpp_version
&& !(agent
->flags
& IB_MAD_USER_RMPP
);
942 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent
);
944 struct ib_mad_send_buf
* ib_create_send_mad(struct ib_mad_agent
*mad_agent
,
945 u32 remote_qpn
, u16 pkey_index
,
947 int hdr_len
, int data_len
,
951 struct ib_mad_agent_private
*mad_agent_priv
;
952 struct ib_mad_send_wr_private
*mad_send_wr
;
953 int pad
, message_size
, ret
, size
;
956 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
958 pad
= get_pad_size(hdr_len
, data_len
);
959 message_size
= hdr_len
+ data_len
+ pad
;
961 if (ib_mad_kernel_rmpp_agent(mad_agent
)) {
962 if (!rmpp_active
&& message_size
> sizeof(struct ib_mad
))
963 return ERR_PTR(-EINVAL
);
965 if (rmpp_active
|| message_size
> sizeof(struct ib_mad
))
966 return ERR_PTR(-EINVAL
);
968 size
= rmpp_active
? hdr_len
: sizeof(struct ib_mad
);
969 buf
= kzalloc(sizeof *mad_send_wr
+ size
, gfp_mask
);
971 return ERR_PTR(-ENOMEM
);
973 mad_send_wr
= buf
+ size
;
974 INIT_LIST_HEAD(&mad_send_wr
->rmpp_list
);
975 mad_send_wr
->send_buf
.mad
= buf
;
976 mad_send_wr
->send_buf
.hdr_len
= hdr_len
;
977 mad_send_wr
->send_buf
.data_len
= data_len
;
978 mad_send_wr
->pad
= pad
;
980 mad_send_wr
->mad_agent_priv
= mad_agent_priv
;
981 mad_send_wr
->sg_list
[0].length
= hdr_len
;
982 mad_send_wr
->sg_list
[0].lkey
= mad_agent
->mr
->lkey
;
983 mad_send_wr
->sg_list
[1].length
= sizeof(struct ib_mad
) - hdr_len
;
984 mad_send_wr
->sg_list
[1].lkey
= mad_agent
->mr
->lkey
;
986 mad_send_wr
->send_wr
.wr_id
= (unsigned long) mad_send_wr
;
987 mad_send_wr
->send_wr
.sg_list
= mad_send_wr
->sg_list
;
988 mad_send_wr
->send_wr
.num_sge
= 2;
989 mad_send_wr
->send_wr
.opcode
= IB_WR_SEND
;
990 mad_send_wr
->send_wr
.send_flags
= IB_SEND_SIGNALED
;
991 mad_send_wr
->send_wr
.wr
.ud
.remote_qpn
= remote_qpn
;
992 mad_send_wr
->send_wr
.wr
.ud
.remote_qkey
= IB_QP_SET_QKEY
;
993 mad_send_wr
->send_wr
.wr
.ud
.pkey_index
= pkey_index
;
996 ret
= alloc_send_rmpp_list(mad_send_wr
, gfp_mask
);
1003 mad_send_wr
->send_buf
.mad_agent
= mad_agent
;
1004 atomic_inc(&mad_agent_priv
->refcount
);
1005 return &mad_send_wr
->send_buf
;
1007 EXPORT_SYMBOL(ib_create_send_mad
);
1009 int ib_get_mad_data_offset(u8 mgmt_class
)
1011 if (mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
)
1012 return IB_MGMT_SA_HDR
;
1013 else if ((mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
1014 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
1015 (mgmt_class
== IB_MGMT_CLASS_BIS
))
1016 return IB_MGMT_DEVICE_HDR
;
1017 else if ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
1018 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
))
1019 return IB_MGMT_VENDOR_HDR
;
1021 return IB_MGMT_MAD_HDR
;
1023 EXPORT_SYMBOL(ib_get_mad_data_offset
);
1025 int ib_is_mad_class_rmpp(u8 mgmt_class
)
1027 if ((mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
) ||
1028 (mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
1029 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
1030 (mgmt_class
== IB_MGMT_CLASS_BIS
) ||
1031 ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
1032 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
)))
1036 EXPORT_SYMBOL(ib_is_mad_class_rmpp
);
1038 void *ib_get_rmpp_segment(struct ib_mad_send_buf
*send_buf
, int seg_num
)
1040 struct ib_mad_send_wr_private
*mad_send_wr
;
1041 struct list_head
*list
;
1043 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
1045 list
= &mad_send_wr
->cur_seg
->list
;
1047 if (mad_send_wr
->cur_seg
->num
< seg_num
) {
1048 list_for_each_entry(mad_send_wr
->cur_seg
, list
, list
)
1049 if (mad_send_wr
->cur_seg
->num
== seg_num
)
1051 } else if (mad_send_wr
->cur_seg
->num
> seg_num
) {
1052 list_for_each_entry_reverse(mad_send_wr
->cur_seg
, list
, list
)
1053 if (mad_send_wr
->cur_seg
->num
== seg_num
)
1056 return mad_send_wr
->cur_seg
->data
;
1058 EXPORT_SYMBOL(ib_get_rmpp_segment
);
1060 static inline void *ib_get_payload(struct ib_mad_send_wr_private
*mad_send_wr
)
1062 if (mad_send_wr
->send_buf
.seg_count
)
1063 return ib_get_rmpp_segment(&mad_send_wr
->send_buf
,
1064 mad_send_wr
->seg_num
);
1066 return mad_send_wr
->send_buf
.mad
+
1067 mad_send_wr
->send_buf
.hdr_len
;
1070 void ib_free_send_mad(struct ib_mad_send_buf
*send_buf
)
1072 struct ib_mad_agent_private
*mad_agent_priv
;
1073 struct ib_mad_send_wr_private
*mad_send_wr
;
1075 mad_agent_priv
= container_of(send_buf
->mad_agent
,
1076 struct ib_mad_agent_private
, agent
);
1077 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
1080 free_send_rmpp_list(mad_send_wr
);
1081 kfree(send_buf
->mad
);
1082 deref_mad_agent(mad_agent_priv
);
1084 EXPORT_SYMBOL(ib_free_send_mad
);
1086 int ib_send_mad(struct ib_mad_send_wr_private
*mad_send_wr
)
1088 struct ib_mad_qp_info
*qp_info
;
1089 struct list_head
*list
;
1090 struct ib_send_wr
*bad_send_wr
;
1091 struct ib_mad_agent
*mad_agent
;
1093 unsigned long flags
;
1096 /* Set WR ID to find mad_send_wr upon completion */
1097 qp_info
= mad_send_wr
->mad_agent_priv
->qp_info
;
1098 mad_send_wr
->send_wr
.wr_id
= (unsigned long)&mad_send_wr
->mad_list
;
1099 mad_send_wr
->mad_list
.mad_queue
= &qp_info
->send_queue
;
1101 mad_agent
= mad_send_wr
->send_buf
.mad_agent
;
1102 sge
= mad_send_wr
->sg_list
;
1103 sge
[0].addr
= ib_dma_map_single(mad_agent
->device
,
1104 mad_send_wr
->send_buf
.mad
,
1107 if (unlikely(ib_dma_mapping_error(mad_agent
->device
, sge
[0].addr
)))
1110 mad_send_wr
->header_mapping
= sge
[0].addr
;
1112 sge
[1].addr
= ib_dma_map_single(mad_agent
->device
,
1113 ib_get_payload(mad_send_wr
),
1116 if (unlikely(ib_dma_mapping_error(mad_agent
->device
, sge
[1].addr
))) {
1117 ib_dma_unmap_single(mad_agent
->device
,
1118 mad_send_wr
->header_mapping
,
1119 sge
[0].length
, DMA_TO_DEVICE
);
1122 mad_send_wr
->payload_mapping
= sge
[1].addr
;
1124 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
1125 if (qp_info
->send_queue
.count
< qp_info
->send_queue
.max_active
) {
1126 ret
= ib_post_send(mad_agent
->qp
, &mad_send_wr
->send_wr
,
1128 list
= &qp_info
->send_queue
.list
;
1131 list
= &qp_info
->overflow_list
;
1135 qp_info
->send_queue
.count
++;
1136 list_add_tail(&mad_send_wr
->mad_list
.list
, list
);
1138 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
1140 ib_dma_unmap_single(mad_agent
->device
,
1141 mad_send_wr
->header_mapping
,
1142 sge
[0].length
, DMA_TO_DEVICE
);
1143 ib_dma_unmap_single(mad_agent
->device
,
1144 mad_send_wr
->payload_mapping
,
1145 sge
[1].length
, DMA_TO_DEVICE
);
1151 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1152 * with the registered client
1154 int ib_post_send_mad(struct ib_mad_send_buf
*send_buf
,
1155 struct ib_mad_send_buf
**bad_send_buf
)
1157 struct ib_mad_agent_private
*mad_agent_priv
;
1158 struct ib_mad_send_buf
*next_send_buf
;
1159 struct ib_mad_send_wr_private
*mad_send_wr
;
1160 unsigned long flags
;
1163 /* Walk list of send WRs and post each on send list */
1164 for (; send_buf
; send_buf
= next_send_buf
) {
1166 mad_send_wr
= container_of(send_buf
,
1167 struct ib_mad_send_wr_private
,
1169 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1171 if (!send_buf
->mad_agent
->send_handler
||
1172 (send_buf
->timeout_ms
&&
1173 !send_buf
->mad_agent
->recv_handler
)) {
1178 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
)) {
1179 if (mad_agent_priv
->agent
.rmpp_version
) {
1186 * Save pointer to next work request to post in case the
1187 * current one completes, and the user modifies the work
1188 * request associated with the completion
1190 next_send_buf
= send_buf
->next
;
1191 mad_send_wr
->send_wr
.wr
.ud
.ah
= send_buf
->ah
;
1193 if (((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
==
1194 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1195 ret
= handle_outgoing_dr_smp(mad_agent_priv
,
1197 if (ret
< 0) /* error */
1199 else if (ret
== 1) /* locally consumed */
1203 mad_send_wr
->tid
= ((struct ib_mad_hdr
*) send_buf
->mad
)->tid
;
1204 /* Timeout will be updated after send completes */
1205 mad_send_wr
->timeout
= msecs_to_jiffies(send_buf
->timeout_ms
);
1206 mad_send_wr
->max_retries
= send_buf
->retries
;
1207 mad_send_wr
->retries_left
= send_buf
->retries
;
1208 send_buf
->retries
= 0;
1209 /* Reference for work request to QP + response */
1210 mad_send_wr
->refcount
= 1 + (mad_send_wr
->timeout
> 0);
1211 mad_send_wr
->status
= IB_WC_SUCCESS
;
1213 /* Reference MAD agent until send completes */
1214 atomic_inc(&mad_agent_priv
->refcount
);
1215 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1216 list_add_tail(&mad_send_wr
->agent_list
,
1217 &mad_agent_priv
->send_list
);
1218 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1220 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
1221 ret
= ib_send_rmpp_mad(mad_send_wr
);
1222 if (ret
>= 0 && ret
!= IB_RMPP_RESULT_CONSUMED
)
1223 ret
= ib_send_mad(mad_send_wr
);
1225 ret
= ib_send_mad(mad_send_wr
);
1227 /* Fail send request */
1228 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1229 list_del(&mad_send_wr
->agent_list
);
1230 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1231 atomic_dec(&mad_agent_priv
->refcount
);
1238 *bad_send_buf
= send_buf
;
1241 EXPORT_SYMBOL(ib_post_send_mad
);
1244 * ib_free_recv_mad - Returns data buffers used to receive
1245 * a MAD to the access layer
1247 void ib_free_recv_mad(struct ib_mad_recv_wc
*mad_recv_wc
)
1249 struct ib_mad_recv_buf
*mad_recv_buf
, *temp_recv_buf
;
1250 struct ib_mad_private_header
*mad_priv_hdr
;
1251 struct ib_mad_private
*priv
;
1252 struct list_head free_list
;
1254 INIT_LIST_HEAD(&free_list
);
1255 list_splice_init(&mad_recv_wc
->rmpp_list
, &free_list
);
1257 list_for_each_entry_safe(mad_recv_buf
, temp_recv_buf
,
1259 mad_recv_wc
= container_of(mad_recv_buf
, struct ib_mad_recv_wc
,
1261 mad_priv_hdr
= container_of(mad_recv_wc
,
1262 struct ib_mad_private_header
,
1264 priv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
1269 EXPORT_SYMBOL(ib_free_recv_mad
);
1271 struct ib_mad_agent
*ib_redirect_mad_qp(struct ib_qp
*qp
,
1273 ib_mad_send_handler send_handler
,
1274 ib_mad_recv_handler recv_handler
,
1277 return ERR_PTR(-EINVAL
); /* XXX: for now */
1279 EXPORT_SYMBOL(ib_redirect_mad_qp
);
1281 int ib_process_mad_wc(struct ib_mad_agent
*mad_agent
,
1284 dev_err(&mad_agent
->device
->dev
,
1285 "ib_process_mad_wc() not implemented yet\n");
1288 EXPORT_SYMBOL(ib_process_mad_wc
);
1290 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
1291 struct ib_mad_reg_req
*mad_reg_req
)
1295 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
) {
1296 if ((*method
)->agent
[i
]) {
1297 pr_err("Method %d already in use\n", i
);
1304 static int allocate_method_table(struct ib_mad_mgmt_method_table
**method
)
1306 /* Allocate management method table */
1307 *method
= kzalloc(sizeof **method
, GFP_ATOMIC
);
1309 pr_err("No memory for ib_mad_mgmt_method_table\n");
1317 * Check to see if there are any methods still in use
1319 static int check_method_table(struct ib_mad_mgmt_method_table
*method
)
1323 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++)
1324 if (method
->agent
[i
])
1330 * Check to see if there are any method tables for this class still in use
1332 static int check_class_table(struct ib_mad_mgmt_class_table
*class)
1336 for (i
= 0; i
< MAX_MGMT_CLASS
; i
++)
1337 if (class->method_table
[i
])
1342 static int check_vendor_class(struct ib_mad_mgmt_vendor_class
*vendor_class
)
1346 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1347 if (vendor_class
->method_table
[i
])
1352 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class
*vendor_class
,
1357 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1358 /* Is there matching OUI for this vendor class ? */
1359 if (!memcmp(vendor_class
->oui
[i
], oui
, 3))
1365 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table
*vendor
)
1369 for (i
= 0; i
< MAX_MGMT_VENDOR_RANGE2
; i
++)
1370 if (vendor
->vendor_class
[i
])
1376 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table
*method
,
1377 struct ib_mad_agent_private
*agent
)
1381 /* Remove any methods for this mad agent */
1382 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++) {
1383 if (method
->agent
[i
] == agent
) {
1384 method
->agent
[i
] = NULL
;
1389 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1390 struct ib_mad_agent_private
*agent_priv
,
1393 struct ib_mad_port_private
*port_priv
;
1394 struct ib_mad_mgmt_class_table
**class;
1395 struct ib_mad_mgmt_method_table
**method
;
1398 port_priv
= agent_priv
->qp_info
->port_priv
;
1399 class = &port_priv
->version
[mad_reg_req
->mgmt_class_version
].class;
1401 /* Allocate management class table for "new" class version */
1402 *class = kzalloc(sizeof **class, GFP_ATOMIC
);
1404 dev_err(&agent_priv
->agent
.device
->dev
,
1405 "No memory for ib_mad_mgmt_class_table\n");
1410 /* Allocate method table for this management class */
1411 method
= &(*class)->method_table
[mgmt_class
];
1412 if ((ret
= allocate_method_table(method
)))
1415 method
= &(*class)->method_table
[mgmt_class
];
1417 /* Allocate method table for this management class */
1418 if ((ret
= allocate_method_table(method
)))
1423 /* Now, make sure methods are not already in use */
1424 if (method_in_use(method
, mad_reg_req
))
1427 /* Finally, add in methods being registered */
1428 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
)
1429 (*method
)->agent
[i
] = agent_priv
;
1434 /* Remove any methods for this mad agent */
1435 remove_methods_mad_agent(*method
, agent_priv
);
1436 /* Now, check to see if there are any methods in use */
1437 if (!check_method_table(*method
)) {
1438 /* If not, release management method table */
1451 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1452 struct ib_mad_agent_private
*agent_priv
)
1454 struct ib_mad_port_private
*port_priv
;
1455 struct ib_mad_mgmt_vendor_class_table
**vendor_table
;
1456 struct ib_mad_mgmt_vendor_class_table
*vendor
= NULL
;
1457 struct ib_mad_mgmt_vendor_class
*vendor_class
= NULL
;
1458 struct ib_mad_mgmt_method_table
**method
;
1459 int i
, ret
= -ENOMEM
;
1462 /* "New" vendor (with OUI) class */
1463 vclass
= vendor_class_index(mad_reg_req
->mgmt_class
);
1464 port_priv
= agent_priv
->qp_info
->port_priv
;
1465 vendor_table
= &port_priv
->version
[
1466 mad_reg_req
->mgmt_class_version
].vendor
;
1467 if (!*vendor_table
) {
1468 /* Allocate mgmt vendor class table for "new" class version */
1469 vendor
= kzalloc(sizeof *vendor
, GFP_ATOMIC
);
1471 dev_err(&agent_priv
->agent
.device
->dev
,
1472 "No memory for ib_mad_mgmt_vendor_class_table\n");
1476 *vendor_table
= vendor
;
1478 if (!(*vendor_table
)->vendor_class
[vclass
]) {
1479 /* Allocate table for this management vendor class */
1480 vendor_class
= kzalloc(sizeof *vendor_class
, GFP_ATOMIC
);
1481 if (!vendor_class
) {
1482 dev_err(&agent_priv
->agent
.device
->dev
,
1483 "No memory for ib_mad_mgmt_vendor_class\n");
1487 (*vendor_table
)->vendor_class
[vclass
] = vendor_class
;
1489 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1490 /* Is there matching OUI for this vendor class ? */
1491 if (!memcmp((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1492 mad_reg_req
->oui
, 3)) {
1493 method
= &(*vendor_table
)->vendor_class
[
1494 vclass
]->method_table
[i
];
1499 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1500 /* OUI slot available ? */
1501 if (!is_vendor_oui((*vendor_table
)->vendor_class
[
1503 method
= &(*vendor_table
)->vendor_class
[
1504 vclass
]->method_table
[i
];
1506 /* Allocate method table for this OUI */
1507 if ((ret
= allocate_method_table(method
)))
1509 memcpy((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1510 mad_reg_req
->oui
, 3);
1514 dev_err(&agent_priv
->agent
.device
->dev
, "All OUI slots in use\n");
1518 /* Now, make sure methods are not already in use */
1519 if (method_in_use(method
, mad_reg_req
))
1522 /* Finally, add in methods being registered */
1523 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
)
1524 (*method
)->agent
[i
] = agent_priv
;
1529 /* Remove any methods for this mad agent */
1530 remove_methods_mad_agent(*method
, agent_priv
);
1531 /* Now, check to see if there are any methods in use */
1532 if (!check_method_table(*method
)) {
1533 /* If not, release management method table */
1540 (*vendor_table
)->vendor_class
[vclass
] = NULL
;
1541 kfree(vendor_class
);
1545 *vendor_table
= NULL
;
1552 static void remove_mad_reg_req(struct ib_mad_agent_private
*agent_priv
)
1554 struct ib_mad_port_private
*port_priv
;
1555 struct ib_mad_mgmt_class_table
*class;
1556 struct ib_mad_mgmt_method_table
*method
;
1557 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1558 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1563 * Was MAD registration request supplied
1564 * with original registration ?
1566 if (!agent_priv
->reg_req
) {
1570 port_priv
= agent_priv
->qp_info
->port_priv
;
1571 mgmt_class
= convert_mgmt_class(agent_priv
->reg_req
->mgmt_class
);
1572 class = port_priv
->version
[
1573 agent_priv
->reg_req
->mgmt_class_version
].class;
1577 method
= class->method_table
[mgmt_class
];
1579 /* Remove any methods for this mad agent */
1580 remove_methods_mad_agent(method
, agent_priv
);
1581 /* Now, check to see if there are any methods still in use */
1582 if (!check_method_table(method
)) {
1583 /* If not, release management method table */
1585 class->method_table
[mgmt_class
] = NULL
;
1586 /* Any management classes left ? */
1587 if (!check_class_table(class)) {
1588 /* If not, release management class table */
1591 agent_priv
->reg_req
->
1592 mgmt_class_version
].class = NULL
;
1598 if (!is_vendor_class(mgmt_class
))
1601 /* normalize mgmt_class to vendor range 2 */
1602 mgmt_class
= vendor_class_index(agent_priv
->reg_req
->mgmt_class
);
1603 vendor
= port_priv
->version
[
1604 agent_priv
->reg_req
->mgmt_class_version
].vendor
;
1609 vendor_class
= vendor
->vendor_class
[mgmt_class
];
1611 index
= find_vendor_oui(vendor_class
, agent_priv
->reg_req
->oui
);
1614 method
= vendor_class
->method_table
[index
];
1616 /* Remove any methods for this mad agent */
1617 remove_methods_mad_agent(method
, agent_priv
);
1619 * Now, check to see if there are
1620 * any methods still in use
1622 if (!check_method_table(method
)) {
1623 /* If not, release management method table */
1625 vendor_class
->method_table
[index
] = NULL
;
1626 memset(vendor_class
->oui
[index
], 0, 3);
1627 /* Any OUIs left ? */
1628 if (!check_vendor_class(vendor_class
)) {
1629 /* If not, release vendor class table */
1630 kfree(vendor_class
);
1631 vendor
->vendor_class
[mgmt_class
] = NULL
;
1632 /* Any other vendor classes left ? */
1633 if (!check_vendor_table(vendor
)) {
1636 agent_priv
->reg_req
->
1637 mgmt_class_version
].
1649 static struct ib_mad_agent_private
*
1650 find_mad_agent(struct ib_mad_port_private
*port_priv
,
1651 const struct ib_mad_hdr
*mad_hdr
)
1653 struct ib_mad_agent_private
*mad_agent
= NULL
;
1654 unsigned long flags
;
1656 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
1657 if (ib_response_mad(mad_hdr
)) {
1659 struct ib_mad_agent_private
*entry
;
1662 * Routing is based on high 32 bits of transaction ID
1665 hi_tid
= be64_to_cpu(mad_hdr
->tid
) >> 32;
1666 list_for_each_entry(entry
, &port_priv
->agent_list
, agent_list
) {
1667 if (entry
->agent
.hi_tid
== hi_tid
) {
1673 struct ib_mad_mgmt_class_table
*class;
1674 struct ib_mad_mgmt_method_table
*method
;
1675 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1676 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1677 const struct ib_vendor_mad
*vendor_mad
;
1681 * Routing is based on version, class, and method
1682 * For "newer" vendor MADs, also based on OUI
1684 if (mad_hdr
->class_version
>= MAX_MGMT_VERSION
)
1686 if (!is_vendor_class(mad_hdr
->mgmt_class
)) {
1687 class = port_priv
->version
[
1688 mad_hdr
->class_version
].class;
1691 if (convert_mgmt_class(mad_hdr
->mgmt_class
) >=
1692 IB_MGMT_MAX_METHODS
)
1694 method
= class->method_table
[convert_mgmt_class(
1695 mad_hdr
->mgmt_class
)];
1697 mad_agent
= method
->agent
[mad_hdr
->method
&
1698 ~IB_MGMT_METHOD_RESP
];
1700 vendor
= port_priv
->version
[
1701 mad_hdr
->class_version
].vendor
;
1704 vendor_class
= vendor
->vendor_class
[vendor_class_index(
1705 mad_hdr
->mgmt_class
)];
1708 /* Find matching OUI */
1709 vendor_mad
= (const struct ib_vendor_mad
*)mad_hdr
;
1710 index
= find_vendor_oui(vendor_class
, vendor_mad
->oui
);
1713 method
= vendor_class
->method_table
[index
];
1715 mad_agent
= method
->agent
[mad_hdr
->method
&
1716 ~IB_MGMT_METHOD_RESP
];
1722 if (mad_agent
->agent
.recv_handler
)
1723 atomic_inc(&mad_agent
->refcount
);
1725 dev_notice(&port_priv
->device
->dev
,
1726 "No receive handler for client %p on port %d\n",
1727 &mad_agent
->agent
, port_priv
->port_num
);
1732 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
1737 static int validate_mad(const struct ib_mad_hdr
*mad_hdr
, u32 qp_num
)
1741 /* Make sure MAD base version is understood */
1742 if (mad_hdr
->base_version
!= IB_MGMT_BASE_VERSION
) {
1743 pr_err("MAD received with unsupported base version %d\n",
1744 mad_hdr
->base_version
);
1748 /* Filter SMI packets sent to other than QP0 */
1749 if ((mad_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
1750 (mad_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
1754 /* Filter GSI packets sent to QP0 */
1763 static int is_rmpp_data_mad(const struct ib_mad_agent_private
*mad_agent_priv
,
1764 const struct ib_mad_hdr
*mad_hdr
)
1766 struct ib_rmpp_mad
*rmpp_mad
;
1768 rmpp_mad
= (struct ib_rmpp_mad
*)mad_hdr
;
1769 return !mad_agent_priv
->agent
.rmpp_version
||
1770 !ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
) ||
1771 !(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
1772 IB_MGMT_RMPP_FLAG_ACTIVE
) ||
1773 (rmpp_mad
->rmpp_hdr
.rmpp_type
== IB_MGMT_RMPP_TYPE_DATA
);
1776 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private
*wr
,
1777 const struct ib_mad_recv_wc
*rwc
)
1779 return ((struct ib_mad_hdr
*)(wr
->send_buf
.mad
))->mgmt_class
==
1780 rwc
->recv_buf
.mad
->mad_hdr
.mgmt_class
;
1783 static inline int rcv_has_same_gid(const struct ib_mad_agent_private
*mad_agent_priv
,
1784 const struct ib_mad_send_wr_private
*wr
,
1785 const struct ib_mad_recv_wc
*rwc
)
1787 struct ib_ah_attr attr
;
1788 u8 send_resp
, rcv_resp
;
1790 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
1791 u8 port_num
= mad_agent_priv
->agent
.port_num
;
1794 send_resp
= ib_response_mad((struct ib_mad_hdr
*)wr
->send_buf
.mad
);
1795 rcv_resp
= ib_response_mad(&rwc
->recv_buf
.mad
->mad_hdr
);
1797 if (send_resp
== rcv_resp
)
1798 /* both requests, or both responses. GIDs different */
1801 if (ib_query_ah(wr
->send_buf
.ah
, &attr
))
1802 /* Assume not equal, to avoid false positives. */
1805 if (!!(attr
.ah_flags
& IB_AH_GRH
) !=
1806 !!(rwc
->wc
->wc_flags
& IB_WC_GRH
))
1807 /* one has GID, other does not. Assume different */
1810 if (!send_resp
&& rcv_resp
) {
1811 /* is request/response. */
1812 if (!(attr
.ah_flags
& IB_AH_GRH
)) {
1813 if (ib_get_cached_lmc(device
, port_num
, &lmc
))
1815 return (!lmc
|| !((attr
.src_path_bits
^
1816 rwc
->wc
->dlid_path_bits
) &
1819 if (ib_get_cached_gid(device
, port_num
,
1820 attr
.grh
.sgid_index
, &sgid
))
1822 return !memcmp(sgid
.raw
, rwc
->recv_buf
.grh
->dgid
.raw
,
1827 if (!(attr
.ah_flags
& IB_AH_GRH
))
1828 return attr
.dlid
== rwc
->wc
->slid
;
1830 return !memcmp(attr
.grh
.dgid
.raw
, rwc
->recv_buf
.grh
->sgid
.raw
,
1834 static inline int is_direct(u8
class)
1836 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
);
1839 struct ib_mad_send_wr_private
*
1840 ib_find_send_mad(const struct ib_mad_agent_private
*mad_agent_priv
,
1841 const struct ib_mad_recv_wc
*wc
)
1843 struct ib_mad_send_wr_private
*wr
;
1844 const struct ib_mad_hdr
*mad_hdr
;
1846 mad_hdr
= &wc
->recv_buf
.mad
->mad_hdr
;
1848 list_for_each_entry(wr
, &mad_agent_priv
->wait_list
, agent_list
) {
1849 if ((wr
->tid
== mad_hdr
->tid
) &&
1850 rcv_has_same_class(wr
, wc
) &&
1852 * Don't check GID for direct routed MADs.
1853 * These might have permissive LIDs.
1855 (is_direct(mad_hdr
->mgmt_class
) ||
1856 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1857 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1861 * It's possible to receive the response before we've
1862 * been notified that the send has completed
1864 list_for_each_entry(wr
, &mad_agent_priv
->send_list
, agent_list
) {
1865 if (is_rmpp_data_mad(mad_agent_priv
, wr
->send_buf
.mad
) &&
1866 wr
->tid
== mad_hdr
->tid
&&
1868 rcv_has_same_class(wr
, wc
) &&
1870 * Don't check GID for direct routed MADs.
1871 * These might have permissive LIDs.
1873 (is_direct(mad_hdr
->mgmt_class
) ||
1874 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1875 /* Verify request has not been canceled */
1876 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1881 void ib_mark_mad_done(struct ib_mad_send_wr_private
*mad_send_wr
)
1883 mad_send_wr
->timeout
= 0;
1884 if (mad_send_wr
->refcount
== 1)
1885 list_move_tail(&mad_send_wr
->agent_list
,
1886 &mad_send_wr
->mad_agent_priv
->done_list
);
1889 static void ib_mad_complete_recv(struct ib_mad_agent_private
*mad_agent_priv
,
1890 struct ib_mad_recv_wc
*mad_recv_wc
)
1892 struct ib_mad_send_wr_private
*mad_send_wr
;
1893 struct ib_mad_send_wc mad_send_wc
;
1894 unsigned long flags
;
1896 INIT_LIST_HEAD(&mad_recv_wc
->rmpp_list
);
1897 list_add(&mad_recv_wc
->recv_buf
.list
, &mad_recv_wc
->rmpp_list
);
1898 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
1899 mad_recv_wc
= ib_process_rmpp_recv_wc(mad_agent_priv
,
1902 deref_mad_agent(mad_agent_priv
);
1907 /* Complete corresponding request */
1908 if (ib_response_mad(&mad_recv_wc
->recv_buf
.mad
->mad_hdr
)) {
1909 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1910 mad_send_wr
= ib_find_send_mad(mad_agent_priv
, mad_recv_wc
);
1912 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1913 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)
1914 && ib_is_mad_class_rmpp(mad_recv_wc
->recv_buf
.mad
->mad_hdr
.mgmt_class
)
1915 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
)->rmpp_hdr
)
1916 & IB_MGMT_RMPP_FLAG_ACTIVE
)) {
1917 /* user rmpp is in effect
1918 * and this is an active RMPP MAD
1920 mad_recv_wc
->wc
->wr_id
= 0;
1921 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1923 atomic_dec(&mad_agent_priv
->refcount
);
1925 /* not user rmpp, revert to normal behavior and
1927 ib_free_recv_mad(mad_recv_wc
);
1928 deref_mad_agent(mad_agent_priv
);
1932 ib_mark_mad_done(mad_send_wr
);
1933 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1935 /* Defined behavior is to complete response before request */
1936 mad_recv_wc
->wc
->wr_id
= (unsigned long) &mad_send_wr
->send_buf
;
1937 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1939 atomic_dec(&mad_agent_priv
->refcount
);
1941 mad_send_wc
.status
= IB_WC_SUCCESS
;
1942 mad_send_wc
.vendor_err
= 0;
1943 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
1944 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
1947 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1949 deref_mad_agent(mad_agent_priv
);
1953 static enum smi_action
handle_ib_smi(const struct ib_mad_port_private
*port_priv
,
1954 const struct ib_mad_qp_info
*qp_info
,
1955 const struct ib_wc
*wc
,
1957 struct ib_mad_private
*recv
,
1958 struct ib_mad_private
*response
)
1960 enum smi_forward_action retsmi
;
1961 struct ib_smp
*smp
= (struct ib_smp
*)recv
->mad
;
1963 if (smi_handle_dr_smp_recv(smp
,
1964 port_priv
->device
->node_type
,
1966 port_priv
->device
->phys_port_cnt
) ==
1968 return IB_SMI_DISCARD
;
1970 retsmi
= smi_check_forward_dr_smp(smp
);
1971 if (retsmi
== IB_SMI_LOCAL
)
1972 return IB_SMI_HANDLE
;
1974 if (retsmi
== IB_SMI_SEND
) { /* don't forward */
1975 if (smi_handle_dr_smp_send(smp
,
1976 port_priv
->device
->node_type
,
1977 port_num
) == IB_SMI_DISCARD
)
1978 return IB_SMI_DISCARD
;
1980 if (smi_check_local_smp(smp
, port_priv
->device
) == IB_SMI_DISCARD
)
1981 return IB_SMI_DISCARD
;
1982 } else if (port_priv
->device
->node_type
== RDMA_NODE_IB_SWITCH
) {
1983 /* forward case for switches */
1984 memcpy(response
, recv
, mad_priv_size(response
));
1985 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
1986 response
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)response
->mad
;
1987 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
1989 agent_send_response((const struct ib_mad_hdr
*)response
->mad
,
1992 smi_get_fwd_port(smp
),
1993 qp_info
->qp
->qp_num
,
1994 response
->mad_size
);
1996 return IB_SMI_DISCARD
;
1998 return IB_SMI_HANDLE
;
2001 static bool generate_unmatched_resp(const struct ib_mad_private
*recv
,
2002 struct ib_mad_private
*response
)
2004 const struct ib_mad_hdr
*recv_hdr
= (const struct ib_mad_hdr
*)recv
->mad
;
2005 struct ib_mad_hdr
*resp_hdr
= (struct ib_mad_hdr
*)response
->mad
;
2007 if (recv_hdr
->method
== IB_MGMT_METHOD_GET
||
2008 recv_hdr
->method
== IB_MGMT_METHOD_SET
) {
2009 memcpy(response
, recv
, mad_priv_size(response
));
2010 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
2011 response
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)response
->mad
;
2012 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
2013 resp_hdr
->method
= IB_MGMT_METHOD_GET_RESP
;
2014 resp_hdr
->status
= cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB
);
2015 if (recv_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
2016 resp_hdr
->status
|= IB_SMP_DIRECTION
;
2023 static void ib_mad_recv_done_handler(struct ib_mad_port_private
*port_priv
,
2026 struct ib_mad_qp_info
*qp_info
;
2027 struct ib_mad_private_header
*mad_priv_hdr
;
2028 struct ib_mad_private
*recv
, *response
= NULL
;
2029 struct ib_mad_list_head
*mad_list
;
2030 struct ib_mad_agent_private
*mad_agent
;
2032 int ret
= IB_MAD_RESULT_SUCCESS
;
2034 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2035 qp_info
= mad_list
->mad_queue
->qp_info
;
2036 dequeue_mad(mad_list
);
2038 mad_priv_hdr
= container_of(mad_list
, struct ib_mad_private_header
,
2040 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
, header
);
2041 ib_dma_unmap_single(port_priv
->device
,
2042 recv
->header
.mapping
,
2043 mad_priv_dma_size(recv
),
2046 /* Setup MAD receive work completion from "normal" work completion */
2047 recv
->header
.wc
= *wc
;
2048 recv
->header
.recv_wc
.wc
= &recv
->header
.wc
;
2049 recv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
2050 recv
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)recv
->mad
;
2051 recv
->header
.recv_wc
.recv_buf
.grh
= &recv
->grh
;
2053 if (atomic_read(&qp_info
->snoop_count
))
2054 snoop_recv(qp_info
, &recv
->header
.recv_wc
, IB_MAD_SNOOP_RECVS
);
2057 if (!validate_mad((const struct ib_mad_hdr
*)recv
->mad
, qp_info
->qp
->qp_num
))
2060 response
= alloc_mad_private(recv
->mad_size
, GFP_ATOMIC
);
2062 dev_err(&port_priv
->device
->dev
,
2063 "ib_mad_recv_done_handler no memory for response buffer\n");
2067 if (port_priv
->device
->node_type
== RDMA_NODE_IB_SWITCH
)
2068 port_num
= wc
->port_num
;
2070 port_num
= port_priv
->port_num
;
2072 if (((struct ib_mad_hdr
*)recv
->mad
)->mgmt_class
==
2073 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
2074 if (handle_ib_smi(port_priv
, qp_info
, wc
, port_num
, recv
,
2080 /* Give driver "right of first refusal" on incoming MAD */
2081 if (port_priv
->device
->process_mad
) {
2082 ret
= port_priv
->device
->process_mad(port_priv
->device
, 0,
2083 port_priv
->port_num
,
2085 (const struct ib_mad
*)recv
->mad
,
2086 (struct ib_mad
*)response
->mad
);
2087 if (ret
& IB_MAD_RESULT_SUCCESS
) {
2088 if (ret
& IB_MAD_RESULT_CONSUMED
)
2090 if (ret
& IB_MAD_RESULT_REPLY
) {
2091 agent_send_response((const struct ib_mad_hdr
*)response
->mad
,
2095 qp_info
->qp
->qp_num
,
2096 response
->mad_size
);
2102 mad_agent
= find_mad_agent(port_priv
, (const struct ib_mad_hdr
*)recv
->mad
);
2104 ib_mad_complete_recv(mad_agent
, &recv
->header
.recv_wc
);
2106 * recv is freed up in error cases in ib_mad_complete_recv
2107 * or via recv_handler in ib_mad_complete_recv()
2110 } else if ((ret
& IB_MAD_RESULT_SUCCESS
) &&
2111 generate_unmatched_resp(recv
, response
)) {
2112 agent_send_response((const struct ib_mad_hdr
*)response
->mad
, &recv
->grh
, wc
,
2113 port_priv
->device
, port_num
,
2114 qp_info
->qp
->qp_num
, response
->mad_size
);
2118 /* Post another receive request for this QP */
2120 ib_mad_post_receive_mads(qp_info
, response
);
2123 ib_mad_post_receive_mads(qp_info
, recv
);
2126 static void adjust_timeout(struct ib_mad_agent_private
*mad_agent_priv
)
2128 struct ib_mad_send_wr_private
*mad_send_wr
;
2129 unsigned long delay
;
2131 if (list_empty(&mad_agent_priv
->wait_list
)) {
2132 cancel_delayed_work(&mad_agent_priv
->timed_work
);
2134 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2135 struct ib_mad_send_wr_private
,
2138 if (time_after(mad_agent_priv
->timeout
,
2139 mad_send_wr
->timeout
)) {
2140 mad_agent_priv
->timeout
= mad_send_wr
->timeout
;
2141 delay
= mad_send_wr
->timeout
- jiffies
;
2142 if ((long)delay
<= 0)
2144 mod_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2145 &mad_agent_priv
->timed_work
, delay
);
2150 static void wait_for_response(struct ib_mad_send_wr_private
*mad_send_wr
)
2152 struct ib_mad_agent_private
*mad_agent_priv
;
2153 struct ib_mad_send_wr_private
*temp_mad_send_wr
;
2154 struct list_head
*list_item
;
2155 unsigned long delay
;
2157 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2158 list_del(&mad_send_wr
->agent_list
);
2160 delay
= mad_send_wr
->timeout
;
2161 mad_send_wr
->timeout
+= jiffies
;
2164 list_for_each_prev(list_item
, &mad_agent_priv
->wait_list
) {
2165 temp_mad_send_wr
= list_entry(list_item
,
2166 struct ib_mad_send_wr_private
,
2168 if (time_after(mad_send_wr
->timeout
,
2169 temp_mad_send_wr
->timeout
))
2174 list_item
= &mad_agent_priv
->wait_list
;
2175 list_add(&mad_send_wr
->agent_list
, list_item
);
2177 /* Reschedule a work item if we have a shorter timeout */
2178 if (mad_agent_priv
->wait_list
.next
== &mad_send_wr
->agent_list
)
2179 mod_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2180 &mad_agent_priv
->timed_work
, delay
);
2183 void ib_reset_mad_timeout(struct ib_mad_send_wr_private
*mad_send_wr
,
2186 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2187 wait_for_response(mad_send_wr
);
2191 * Process a send work completion
2193 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private
*mad_send_wr
,
2194 struct ib_mad_send_wc
*mad_send_wc
)
2196 struct ib_mad_agent_private
*mad_agent_priv
;
2197 unsigned long flags
;
2200 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2201 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2202 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
2203 ret
= ib_process_rmpp_send_wc(mad_send_wr
, mad_send_wc
);
2204 if (ret
== IB_RMPP_RESULT_CONSUMED
)
2207 ret
= IB_RMPP_RESULT_UNHANDLED
;
2209 if (mad_send_wc
->status
!= IB_WC_SUCCESS
&&
2210 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2211 mad_send_wr
->status
= mad_send_wc
->status
;
2212 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2215 if (--mad_send_wr
->refcount
> 0) {
2216 if (mad_send_wr
->refcount
== 1 && mad_send_wr
->timeout
&&
2217 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2218 wait_for_response(mad_send_wr
);
2223 /* Remove send from MAD agent and notify client of completion */
2224 list_del(&mad_send_wr
->agent_list
);
2225 adjust_timeout(mad_agent_priv
);
2226 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2228 if (mad_send_wr
->status
!= IB_WC_SUCCESS
)
2229 mad_send_wc
->status
= mad_send_wr
->status
;
2230 if (ret
== IB_RMPP_RESULT_INTERNAL
)
2231 ib_rmpp_send_handler(mad_send_wc
);
2233 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2236 /* Release reference on agent taken when sending */
2237 deref_mad_agent(mad_agent_priv
);
2240 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2243 static void ib_mad_send_done_handler(struct ib_mad_port_private
*port_priv
,
2246 struct ib_mad_send_wr_private
*mad_send_wr
, *queued_send_wr
;
2247 struct ib_mad_list_head
*mad_list
;
2248 struct ib_mad_qp_info
*qp_info
;
2249 struct ib_mad_queue
*send_queue
;
2250 struct ib_send_wr
*bad_send_wr
;
2251 struct ib_mad_send_wc mad_send_wc
;
2252 unsigned long flags
;
2255 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2256 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2258 send_queue
= mad_list
->mad_queue
;
2259 qp_info
= send_queue
->qp_info
;
2262 ib_dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
,
2263 mad_send_wr
->header_mapping
,
2264 mad_send_wr
->sg_list
[0].length
, DMA_TO_DEVICE
);
2265 ib_dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
,
2266 mad_send_wr
->payload_mapping
,
2267 mad_send_wr
->sg_list
[1].length
, DMA_TO_DEVICE
);
2268 queued_send_wr
= NULL
;
2269 spin_lock_irqsave(&send_queue
->lock
, flags
);
2270 list_del(&mad_list
->list
);
2272 /* Move queued send to the send queue */
2273 if (send_queue
->count
-- > send_queue
->max_active
) {
2274 mad_list
= container_of(qp_info
->overflow_list
.next
,
2275 struct ib_mad_list_head
, list
);
2276 queued_send_wr
= container_of(mad_list
,
2277 struct ib_mad_send_wr_private
,
2279 list_move_tail(&mad_list
->list
, &send_queue
->list
);
2281 spin_unlock_irqrestore(&send_queue
->lock
, flags
);
2283 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2284 mad_send_wc
.status
= wc
->status
;
2285 mad_send_wc
.vendor_err
= wc
->vendor_err
;
2286 if (atomic_read(&qp_info
->snoop_count
))
2287 snoop_send(qp_info
, &mad_send_wr
->send_buf
, &mad_send_wc
,
2288 IB_MAD_SNOOP_SEND_COMPLETIONS
);
2289 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
2291 if (queued_send_wr
) {
2292 ret
= ib_post_send(qp_info
->qp
, &queued_send_wr
->send_wr
,
2295 dev_err(&port_priv
->device
->dev
,
2296 "ib_post_send failed: %d\n", ret
);
2297 mad_send_wr
= queued_send_wr
;
2298 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
2304 static void mark_sends_for_retry(struct ib_mad_qp_info
*qp_info
)
2306 struct ib_mad_send_wr_private
*mad_send_wr
;
2307 struct ib_mad_list_head
*mad_list
;
2308 unsigned long flags
;
2310 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
2311 list_for_each_entry(mad_list
, &qp_info
->send_queue
.list
, list
) {
2312 mad_send_wr
= container_of(mad_list
,
2313 struct ib_mad_send_wr_private
,
2315 mad_send_wr
->retry
= 1;
2317 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
2320 static void mad_error_handler(struct ib_mad_port_private
*port_priv
,
2323 struct ib_mad_list_head
*mad_list
;
2324 struct ib_mad_qp_info
*qp_info
;
2325 struct ib_mad_send_wr_private
*mad_send_wr
;
2328 /* Determine if failure was a send or receive */
2329 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2330 qp_info
= mad_list
->mad_queue
->qp_info
;
2331 if (mad_list
->mad_queue
== &qp_info
->recv_queue
)
2333 * Receive errors indicate that the QP has entered the error
2334 * state - error handling/shutdown code will cleanup
2339 * Send errors will transition the QP to SQE - move
2340 * QP to RTS and repost flushed work requests
2342 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2344 if (wc
->status
== IB_WC_WR_FLUSH_ERR
) {
2345 if (mad_send_wr
->retry
) {
2347 struct ib_send_wr
*bad_send_wr
;
2349 mad_send_wr
->retry
= 0;
2350 ret
= ib_post_send(qp_info
->qp
, &mad_send_wr
->send_wr
,
2353 ib_mad_send_done_handler(port_priv
, wc
);
2355 ib_mad_send_done_handler(port_priv
, wc
);
2357 struct ib_qp_attr
*attr
;
2359 /* Transition QP to RTS and fail offending send */
2360 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2362 attr
->qp_state
= IB_QPS_RTS
;
2363 attr
->cur_qp_state
= IB_QPS_SQE
;
2364 ret
= ib_modify_qp(qp_info
->qp
, attr
,
2365 IB_QP_STATE
| IB_QP_CUR_STATE
);
2368 dev_err(&port_priv
->device
->dev
,
2369 "mad_error_handler - ib_modify_qp to RTS : %d\n",
2372 mark_sends_for_retry(qp_info
);
2374 ib_mad_send_done_handler(port_priv
, wc
);
2379 * IB MAD completion callback
2381 static void ib_mad_completion_handler(struct work_struct
*work
)
2383 struct ib_mad_port_private
*port_priv
;
2386 port_priv
= container_of(work
, struct ib_mad_port_private
, work
);
2387 ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
2389 while (ib_poll_cq(port_priv
->cq
, 1, &wc
) == 1) {
2390 if (wc
.status
== IB_WC_SUCCESS
) {
2391 switch (wc
.opcode
) {
2393 ib_mad_send_done_handler(port_priv
, &wc
);
2396 ib_mad_recv_done_handler(port_priv
, &wc
);
2403 mad_error_handler(port_priv
, &wc
);
2407 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
)
2409 unsigned long flags
;
2410 struct ib_mad_send_wr_private
*mad_send_wr
, *temp_mad_send_wr
;
2411 struct ib_mad_send_wc mad_send_wc
;
2412 struct list_head cancel_list
;
2414 INIT_LIST_HEAD(&cancel_list
);
2416 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2417 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2418 &mad_agent_priv
->send_list
, agent_list
) {
2419 if (mad_send_wr
->status
== IB_WC_SUCCESS
) {
2420 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2421 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2425 /* Empty wait list to prevent receives from finding a request */
2426 list_splice_init(&mad_agent_priv
->wait_list
, &cancel_list
);
2427 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2429 /* Report all cancelled requests */
2430 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
2431 mad_send_wc
.vendor_err
= 0;
2433 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2434 &cancel_list
, agent_list
) {
2435 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2436 list_del(&mad_send_wr
->agent_list
);
2437 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2439 atomic_dec(&mad_agent_priv
->refcount
);
2443 static struct ib_mad_send_wr_private
*
2444 find_send_wr(struct ib_mad_agent_private
*mad_agent_priv
,
2445 struct ib_mad_send_buf
*send_buf
)
2447 struct ib_mad_send_wr_private
*mad_send_wr
;
2449 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
2451 if (&mad_send_wr
->send_buf
== send_buf
)
2455 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
2457 if (is_rmpp_data_mad(mad_agent_priv
,
2458 mad_send_wr
->send_buf
.mad
) &&
2459 &mad_send_wr
->send_buf
== send_buf
)
2465 int ib_modify_mad(struct ib_mad_agent
*mad_agent
,
2466 struct ib_mad_send_buf
*send_buf
, u32 timeout_ms
)
2468 struct ib_mad_agent_private
*mad_agent_priv
;
2469 struct ib_mad_send_wr_private
*mad_send_wr
;
2470 unsigned long flags
;
2473 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
2475 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2476 mad_send_wr
= find_send_wr(mad_agent_priv
, send_buf
);
2477 if (!mad_send_wr
|| mad_send_wr
->status
!= IB_WC_SUCCESS
) {
2478 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2482 active
= (!mad_send_wr
->timeout
|| mad_send_wr
->refcount
> 1);
2484 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2485 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2488 mad_send_wr
->send_buf
.timeout_ms
= timeout_ms
;
2490 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2492 ib_reset_mad_timeout(mad_send_wr
, timeout_ms
);
2494 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2497 EXPORT_SYMBOL(ib_modify_mad
);
2499 void ib_cancel_mad(struct ib_mad_agent
*mad_agent
,
2500 struct ib_mad_send_buf
*send_buf
)
2502 ib_modify_mad(mad_agent
, send_buf
, 0);
2504 EXPORT_SYMBOL(ib_cancel_mad
);
2506 static void local_completions(struct work_struct
*work
)
2508 struct ib_mad_agent_private
*mad_agent_priv
;
2509 struct ib_mad_local_private
*local
;
2510 struct ib_mad_agent_private
*recv_mad_agent
;
2511 unsigned long flags
;
2514 struct ib_mad_send_wc mad_send_wc
;
2517 container_of(work
, struct ib_mad_agent_private
, local_work
);
2519 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2520 while (!list_empty(&mad_agent_priv
->local_list
)) {
2521 local
= list_entry(mad_agent_priv
->local_list
.next
,
2522 struct ib_mad_local_private
,
2524 list_del(&local
->completion_list
);
2525 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2527 if (local
->mad_priv
) {
2528 recv_mad_agent
= local
->recv_mad_agent
;
2529 if (!recv_mad_agent
) {
2530 dev_err(&mad_agent_priv
->agent
.device
->dev
,
2531 "No receive MAD agent for local completion\n");
2533 goto local_send_completion
;
2537 * Defined behavior is to complete response
2540 build_smp_wc(recv_mad_agent
->agent
.qp
,
2541 (unsigned long) local
->mad_send_wr
,
2542 be16_to_cpu(IB_LID_PERMISSIVE
),
2543 0, recv_mad_agent
->agent
.port_num
, &wc
);
2545 local
->mad_priv
->header
.recv_wc
.wc
= &wc
;
2546 local
->mad_priv
->header
.recv_wc
.mad_len
=
2547 sizeof(struct ib_mad
);
2548 INIT_LIST_HEAD(&local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2549 list_add(&local
->mad_priv
->header
.recv_wc
.recv_buf
.list
,
2550 &local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2551 local
->mad_priv
->header
.recv_wc
.recv_buf
.grh
= NULL
;
2552 local
->mad_priv
->header
.recv_wc
.recv_buf
.mad
=
2553 (struct ib_mad
*)local
->mad_priv
->mad
;
2554 if (atomic_read(&recv_mad_agent
->qp_info
->snoop_count
))
2555 snoop_recv(recv_mad_agent
->qp_info
,
2556 &local
->mad_priv
->header
.recv_wc
,
2557 IB_MAD_SNOOP_RECVS
);
2558 recv_mad_agent
->agent
.recv_handler(
2559 &recv_mad_agent
->agent
,
2560 &local
->mad_priv
->header
.recv_wc
);
2561 spin_lock_irqsave(&recv_mad_agent
->lock
, flags
);
2562 atomic_dec(&recv_mad_agent
->refcount
);
2563 spin_unlock_irqrestore(&recv_mad_agent
->lock
, flags
);
2566 local_send_completion
:
2568 mad_send_wc
.status
= IB_WC_SUCCESS
;
2569 mad_send_wc
.vendor_err
= 0;
2570 mad_send_wc
.send_buf
= &local
->mad_send_wr
->send_buf
;
2571 if (atomic_read(&mad_agent_priv
->qp_info
->snoop_count
))
2572 snoop_send(mad_agent_priv
->qp_info
,
2573 &local
->mad_send_wr
->send_buf
,
2574 &mad_send_wc
, IB_MAD_SNOOP_SEND_COMPLETIONS
);
2575 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2578 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2579 atomic_dec(&mad_agent_priv
->refcount
);
2581 kfree(local
->mad_priv
);
2584 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2587 static int retry_send(struct ib_mad_send_wr_private
*mad_send_wr
)
2591 if (!mad_send_wr
->retries_left
)
2594 mad_send_wr
->retries_left
--;
2595 mad_send_wr
->send_buf
.retries
++;
2597 mad_send_wr
->timeout
= msecs_to_jiffies(mad_send_wr
->send_buf
.timeout_ms
);
2599 if (ib_mad_kernel_rmpp_agent(&mad_send_wr
->mad_agent_priv
->agent
)) {
2600 ret
= ib_retry_rmpp(mad_send_wr
);
2602 case IB_RMPP_RESULT_UNHANDLED
:
2603 ret
= ib_send_mad(mad_send_wr
);
2605 case IB_RMPP_RESULT_CONSUMED
:
2613 ret
= ib_send_mad(mad_send_wr
);
2616 mad_send_wr
->refcount
++;
2617 list_add_tail(&mad_send_wr
->agent_list
,
2618 &mad_send_wr
->mad_agent_priv
->send_list
);
2623 static void timeout_sends(struct work_struct
*work
)
2625 struct ib_mad_agent_private
*mad_agent_priv
;
2626 struct ib_mad_send_wr_private
*mad_send_wr
;
2627 struct ib_mad_send_wc mad_send_wc
;
2628 unsigned long flags
, delay
;
2630 mad_agent_priv
= container_of(work
, struct ib_mad_agent_private
,
2632 mad_send_wc
.vendor_err
= 0;
2634 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2635 while (!list_empty(&mad_agent_priv
->wait_list
)) {
2636 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2637 struct ib_mad_send_wr_private
,
2640 if (time_after(mad_send_wr
->timeout
, jiffies
)) {
2641 delay
= mad_send_wr
->timeout
- jiffies
;
2642 if ((long)delay
<= 0)
2644 queue_delayed_work(mad_agent_priv
->qp_info
->
2646 &mad_agent_priv
->timed_work
, delay
);
2650 list_del(&mad_send_wr
->agent_list
);
2651 if (mad_send_wr
->status
== IB_WC_SUCCESS
&&
2652 !retry_send(mad_send_wr
))
2655 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2657 if (mad_send_wr
->status
== IB_WC_SUCCESS
)
2658 mad_send_wc
.status
= IB_WC_RESP_TIMEOUT_ERR
;
2660 mad_send_wc
.status
= mad_send_wr
->status
;
2661 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2662 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2665 atomic_dec(&mad_agent_priv
->refcount
);
2666 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2668 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2671 static void ib_mad_thread_completion_handler(struct ib_cq
*cq
, void *arg
)
2673 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2674 unsigned long flags
;
2676 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2677 if (!list_empty(&port_priv
->port_list
))
2678 queue_work(port_priv
->wq
, &port_priv
->work
);
2679 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2683 * Allocate receive MADs and post receive WRs for them
2685 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
2686 struct ib_mad_private
*mad
)
2688 unsigned long flags
;
2690 struct ib_mad_private
*mad_priv
;
2691 struct ib_sge sg_list
;
2692 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
2693 struct ib_mad_queue
*recv_queue
= &qp_info
->recv_queue
;
2695 /* Initialize common scatter list fields */
2696 sg_list
.lkey
= (*qp_info
->port_priv
->mr
).lkey
;
2698 /* Initialize common receive WR fields */
2699 recv_wr
.next
= NULL
;
2700 recv_wr
.sg_list
= &sg_list
;
2701 recv_wr
.num_sge
= 1;
2704 /* Allocate and map receive buffer */
2709 mad_priv
= alloc_mad_private(port_mad_size(qp_info
->port_priv
),
2712 dev_err(&qp_info
->port_priv
->device
->dev
,
2713 "No memory for receive buffer\n");
2718 sg_list
.length
= mad_priv_dma_size(mad_priv
);
2719 sg_list
.addr
= ib_dma_map_single(qp_info
->port_priv
->device
,
2721 mad_priv_dma_size(mad_priv
),
2723 if (unlikely(ib_dma_mapping_error(qp_info
->port_priv
->device
,
2728 mad_priv
->header
.mapping
= sg_list
.addr
;
2729 recv_wr
.wr_id
= (unsigned long)&mad_priv
->header
.mad_list
;
2730 mad_priv
->header
.mad_list
.mad_queue
= recv_queue
;
2732 /* Post receive WR */
2733 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2734 post
= (++recv_queue
->count
< recv_queue
->max_active
);
2735 list_add_tail(&mad_priv
->header
.mad_list
.list
, &recv_queue
->list
);
2736 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2737 ret
= ib_post_recv(qp_info
->qp
, &recv_wr
, &bad_recv_wr
);
2739 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2740 list_del(&mad_priv
->header
.mad_list
.list
);
2741 recv_queue
->count
--;
2742 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2743 ib_dma_unmap_single(qp_info
->port_priv
->device
,
2744 mad_priv
->header
.mapping
,
2745 mad_priv_dma_size(mad_priv
),
2748 dev_err(&qp_info
->port_priv
->device
->dev
,
2749 "ib_post_recv failed: %d\n", ret
);
2758 * Return all the posted receive MADs
2760 static void cleanup_recv_queue(struct ib_mad_qp_info
*qp_info
)
2762 struct ib_mad_private_header
*mad_priv_hdr
;
2763 struct ib_mad_private
*recv
;
2764 struct ib_mad_list_head
*mad_list
;
2769 while (!list_empty(&qp_info
->recv_queue
.list
)) {
2771 mad_list
= list_entry(qp_info
->recv_queue
.list
.next
,
2772 struct ib_mad_list_head
, list
);
2773 mad_priv_hdr
= container_of(mad_list
,
2774 struct ib_mad_private_header
,
2776 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
2779 /* Remove from posted receive MAD list */
2780 list_del(&mad_list
->list
);
2782 ib_dma_unmap_single(qp_info
->port_priv
->device
,
2783 recv
->header
.mapping
,
2784 mad_priv_dma_size(recv
),
2789 qp_info
->recv_queue
.count
= 0;
2795 static int ib_mad_port_start(struct ib_mad_port_private
*port_priv
)
2798 struct ib_qp_attr
*attr
;
2802 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2804 dev_err(&port_priv
->device
->dev
,
2805 "Couldn't kmalloc ib_qp_attr\n");
2809 ret
= ib_find_pkey(port_priv
->device
, port_priv
->port_num
,
2810 IB_DEFAULT_PKEY_FULL
, &pkey_index
);
2814 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2815 qp
= port_priv
->qp_info
[i
].qp
;
2820 * PKey index for QP1 is irrelevant but
2821 * one is needed for the Reset to Init transition
2823 attr
->qp_state
= IB_QPS_INIT
;
2824 attr
->pkey_index
= pkey_index
;
2825 attr
->qkey
= (qp
->qp_num
== 0) ? 0 : IB_QP1_QKEY
;
2826 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
|
2827 IB_QP_PKEY_INDEX
| IB_QP_QKEY
);
2829 dev_err(&port_priv
->device
->dev
,
2830 "Couldn't change QP%d state to INIT: %d\n",
2835 attr
->qp_state
= IB_QPS_RTR
;
2836 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
);
2838 dev_err(&port_priv
->device
->dev
,
2839 "Couldn't change QP%d state to RTR: %d\n",
2844 attr
->qp_state
= IB_QPS_RTS
;
2845 attr
->sq_psn
= IB_MAD_SEND_Q_PSN
;
2846 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
2848 dev_err(&port_priv
->device
->dev
,
2849 "Couldn't change QP%d state to RTS: %d\n",
2855 ret
= ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
2857 dev_err(&port_priv
->device
->dev
,
2858 "Failed to request completion notification: %d\n",
2863 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2864 if (!port_priv
->qp_info
[i
].qp
)
2867 ret
= ib_mad_post_receive_mads(&port_priv
->qp_info
[i
], NULL
);
2869 dev_err(&port_priv
->device
->dev
,
2870 "Couldn't post receive WRs\n");
2879 static void qp_event_handler(struct ib_event
*event
, void *qp_context
)
2881 struct ib_mad_qp_info
*qp_info
= qp_context
;
2883 /* It's worse than that! He's dead, Jim! */
2884 dev_err(&qp_info
->port_priv
->device
->dev
,
2885 "Fatal error (%d) on MAD QP (%d)\n",
2886 event
->event
, qp_info
->qp
->qp_num
);
2889 static void init_mad_queue(struct ib_mad_qp_info
*qp_info
,
2890 struct ib_mad_queue
*mad_queue
)
2892 mad_queue
->qp_info
= qp_info
;
2893 mad_queue
->count
= 0;
2894 spin_lock_init(&mad_queue
->lock
);
2895 INIT_LIST_HEAD(&mad_queue
->list
);
2898 static void init_mad_qp(struct ib_mad_port_private
*port_priv
,
2899 struct ib_mad_qp_info
*qp_info
)
2901 qp_info
->port_priv
= port_priv
;
2902 init_mad_queue(qp_info
, &qp_info
->send_queue
);
2903 init_mad_queue(qp_info
, &qp_info
->recv_queue
);
2904 INIT_LIST_HEAD(&qp_info
->overflow_list
);
2905 spin_lock_init(&qp_info
->snoop_lock
);
2906 qp_info
->snoop_table
= NULL
;
2907 qp_info
->snoop_table_size
= 0;
2908 atomic_set(&qp_info
->snoop_count
, 0);
2911 static int create_mad_qp(struct ib_mad_qp_info
*qp_info
,
2912 enum ib_qp_type qp_type
)
2914 struct ib_qp_init_attr qp_init_attr
;
2917 memset(&qp_init_attr
, 0, sizeof qp_init_attr
);
2918 qp_init_attr
.send_cq
= qp_info
->port_priv
->cq
;
2919 qp_init_attr
.recv_cq
= qp_info
->port_priv
->cq
;
2920 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
2921 qp_init_attr
.cap
.max_send_wr
= mad_sendq_size
;
2922 qp_init_attr
.cap
.max_recv_wr
= mad_recvq_size
;
2923 qp_init_attr
.cap
.max_send_sge
= IB_MAD_SEND_REQ_MAX_SG
;
2924 qp_init_attr
.cap
.max_recv_sge
= IB_MAD_RECV_REQ_MAX_SG
;
2925 qp_init_attr
.qp_type
= qp_type
;
2926 qp_init_attr
.port_num
= qp_info
->port_priv
->port_num
;
2927 qp_init_attr
.qp_context
= qp_info
;
2928 qp_init_attr
.event_handler
= qp_event_handler
;
2929 qp_info
->qp
= ib_create_qp(qp_info
->port_priv
->pd
, &qp_init_attr
);
2930 if (IS_ERR(qp_info
->qp
)) {
2931 dev_err(&qp_info
->port_priv
->device
->dev
,
2932 "Couldn't create ib_mad QP%d\n",
2933 get_spl_qp_index(qp_type
));
2934 ret
= PTR_ERR(qp_info
->qp
);
2937 /* Use minimum queue sizes unless the CQ is resized */
2938 qp_info
->send_queue
.max_active
= mad_sendq_size
;
2939 qp_info
->recv_queue
.max_active
= mad_recvq_size
;
2946 static void destroy_mad_qp(struct ib_mad_qp_info
*qp_info
)
2951 ib_destroy_qp(qp_info
->qp
);
2952 kfree(qp_info
->snoop_table
);
2957 * Create the QP, PD, MR, and CQ if needed
2959 static int ib_mad_port_open(struct ib_device
*device
,
2963 struct ib_mad_port_private
*port_priv
;
2964 unsigned long flags
;
2965 char name
[sizeof "ib_mad123"];
2967 struct ib_cq_init_attr cq_attr
= {};
2969 if (WARN_ON(rdma_max_mad_size(device
, port_num
) < IB_MGMT_MAD_SIZE
))
2972 /* Create new device info */
2973 port_priv
= kzalloc(sizeof *port_priv
, GFP_KERNEL
);
2975 dev_err(&device
->dev
, "No memory for ib_mad_port_private\n");
2979 port_priv
->device
= device
;
2980 port_priv
->port_num
= port_num
;
2981 spin_lock_init(&port_priv
->reg_lock
);
2982 INIT_LIST_HEAD(&port_priv
->agent_list
);
2983 init_mad_qp(port_priv
, &port_priv
->qp_info
[0]);
2984 init_mad_qp(port_priv
, &port_priv
->qp_info
[1]);
2986 cq_size
= mad_sendq_size
+ mad_recvq_size
;
2987 has_smi
= rdma_cap_ib_smi(device
, port_num
);
2991 cq_attr
.cqe
= cq_size
;
2992 port_priv
->cq
= ib_create_cq(port_priv
->device
,
2993 ib_mad_thread_completion_handler
,
2994 NULL
, port_priv
, &cq_attr
);
2995 if (IS_ERR(port_priv
->cq
)) {
2996 dev_err(&device
->dev
, "Couldn't create ib_mad CQ\n");
2997 ret
= PTR_ERR(port_priv
->cq
);
3001 port_priv
->pd
= ib_alloc_pd(device
);
3002 if (IS_ERR(port_priv
->pd
)) {
3003 dev_err(&device
->dev
, "Couldn't create ib_mad PD\n");
3004 ret
= PTR_ERR(port_priv
->pd
);
3008 port_priv
->mr
= ib_get_dma_mr(port_priv
->pd
, IB_ACCESS_LOCAL_WRITE
);
3009 if (IS_ERR(port_priv
->mr
)) {
3010 dev_err(&device
->dev
, "Couldn't get ib_mad DMA MR\n");
3011 ret
= PTR_ERR(port_priv
->mr
);
3016 ret
= create_mad_qp(&port_priv
->qp_info
[0], IB_QPT_SMI
);
3020 ret
= create_mad_qp(&port_priv
->qp_info
[1], IB_QPT_GSI
);
3024 snprintf(name
, sizeof name
, "ib_mad%d", port_num
);
3025 port_priv
->wq
= create_singlethread_workqueue(name
);
3026 if (!port_priv
->wq
) {
3030 INIT_WORK(&port_priv
->work
, ib_mad_completion_handler
);
3032 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3033 list_add_tail(&port_priv
->port_list
, &ib_mad_port_list
);
3034 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3036 ret
= ib_mad_port_start(port_priv
);
3038 dev_err(&device
->dev
, "Couldn't start port\n");
3045 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3046 list_del_init(&port_priv
->port_list
);
3047 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3049 destroy_workqueue(port_priv
->wq
);
3051 destroy_mad_qp(&port_priv
->qp_info
[1]);
3053 destroy_mad_qp(&port_priv
->qp_info
[0]);
3055 ib_dereg_mr(port_priv
->mr
);
3057 ib_dealloc_pd(port_priv
->pd
);
3059 ib_destroy_cq(port_priv
->cq
);
3060 cleanup_recv_queue(&port_priv
->qp_info
[1]);
3061 cleanup_recv_queue(&port_priv
->qp_info
[0]);
3070 * If there are no classes using the port, free the port
3071 * resources (CQ, MR, PD, QP) and remove the port's info structure
3073 static int ib_mad_port_close(struct ib_device
*device
, int port_num
)
3075 struct ib_mad_port_private
*port_priv
;
3076 unsigned long flags
;
3078 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3079 port_priv
= __ib_get_mad_port(device
, port_num
);
3080 if (port_priv
== NULL
) {
3081 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3082 dev_err(&device
->dev
, "Port %d not found\n", port_num
);
3085 list_del_init(&port_priv
->port_list
);
3086 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3088 destroy_workqueue(port_priv
->wq
);
3089 destroy_mad_qp(&port_priv
->qp_info
[1]);
3090 destroy_mad_qp(&port_priv
->qp_info
[0]);
3091 ib_dereg_mr(port_priv
->mr
);
3092 ib_dealloc_pd(port_priv
->pd
);
3093 ib_destroy_cq(port_priv
->cq
);
3094 cleanup_recv_queue(&port_priv
->qp_info
[1]);
3095 cleanup_recv_queue(&port_priv
->qp_info
[0]);
3096 /* XXX: Handle deallocation of MAD registration tables */
3103 static void ib_mad_init_device(struct ib_device
*device
)
3107 if (device
->node_type
== RDMA_NODE_IB_SWITCH
) {
3112 end
= device
->phys_port_cnt
;
3115 for (i
= start
; i
<= end
; i
++) {
3116 if (!rdma_cap_ib_mad(device
, i
))
3119 if (ib_mad_port_open(device
, i
)) {
3120 dev_err(&device
->dev
, "Couldn't open port %d\n", i
);
3123 if (ib_agent_port_open(device
, i
)) {
3124 dev_err(&device
->dev
,
3125 "Couldn't open port %d for agents\n", i
);
3132 if (ib_mad_port_close(device
, i
))
3133 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3136 while (--i
>= start
) {
3137 if (!rdma_cap_ib_mad(device
, i
))
3140 if (ib_agent_port_close(device
, i
))
3141 dev_err(&device
->dev
,
3142 "Couldn't close port %d for agents\n", i
);
3143 if (ib_mad_port_close(device
, i
))
3144 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3148 static void ib_mad_remove_device(struct ib_device
*device
)
3152 if (device
->node_type
== RDMA_NODE_IB_SWITCH
) {
3157 end
= device
->phys_port_cnt
;
3160 for (i
= start
; i
<= end
; i
++) {
3161 if (!rdma_cap_ib_mad(device
, i
))
3164 if (ib_agent_port_close(device
, i
))
3165 dev_err(&device
->dev
,
3166 "Couldn't close port %d for agents\n", i
);
3167 if (ib_mad_port_close(device
, i
))
3168 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3172 static struct ib_client mad_client
= {
3174 .add
= ib_mad_init_device
,
3175 .remove
= ib_mad_remove_device
3178 static int __init
ib_mad_init_module(void)
3180 mad_recvq_size
= min(mad_recvq_size
, IB_MAD_QP_MAX_SIZE
);
3181 mad_recvq_size
= max(mad_recvq_size
, IB_MAD_QP_MIN_SIZE
);
3183 mad_sendq_size
= min(mad_sendq_size
, IB_MAD_QP_MAX_SIZE
);
3184 mad_sendq_size
= max(mad_sendq_size
, IB_MAD_QP_MIN_SIZE
);
3186 INIT_LIST_HEAD(&ib_mad_port_list
);
3188 if (ib_register_client(&mad_client
)) {
3189 pr_err("Couldn't register ib_mad client\n");
3196 static void __exit
ib_mad_cleanup_module(void)
3198 ib_unregister_client(&mad_client
);
3201 module_init(ib_mad_init_module
);
3202 module_exit(ib_mad_cleanup_module
);