2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/dma-mapping.h>
37 #include <rdma/ib_cache.h>
44 MODULE_LICENSE("Dual BSD/GPL");
45 MODULE_DESCRIPTION("kernel IB MAD API");
46 MODULE_AUTHOR("Hal Rosenstock");
47 MODULE_AUTHOR("Sean Hefty");
49 int mad_sendq_size
= IB_MAD_QP_SEND_SIZE
;
50 int mad_recvq_size
= IB_MAD_QP_RECV_SIZE
;
52 module_param_named(send_queue_size
, mad_sendq_size
, int, 0444);
53 MODULE_PARM_DESC(send_queue_size
, "Size of send queue in number of work requests");
54 module_param_named(recv_queue_size
, mad_recvq_size
, int, 0444);
55 MODULE_PARM_DESC(recv_queue_size
, "Size of receive queue in number of work requests");
57 static struct kmem_cache
*ib_mad_cache
;
59 static struct list_head ib_mad_port_list
;
60 static u32 ib_mad_client_id
= 0;
63 static DEFINE_SPINLOCK(ib_mad_port_list_lock
);
65 /* Forward declarations */
66 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
67 struct ib_mad_reg_req
*mad_reg_req
);
68 static void remove_mad_reg_req(struct ib_mad_agent_private
*priv
);
69 static struct ib_mad_agent_private
*find_mad_agent(
70 struct ib_mad_port_private
*port_priv
,
72 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
73 struct ib_mad_private
*mad
);
74 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
);
75 static void timeout_sends(struct work_struct
*work
);
76 static void local_completions(struct work_struct
*work
);
77 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
78 struct ib_mad_agent_private
*agent_priv
,
80 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
81 struct ib_mad_agent_private
*agent_priv
);
84 * Returns a ib_mad_port_private structure or NULL for a device/port
85 * Assumes ib_mad_port_list_lock is being held
87 static inline struct ib_mad_port_private
*
88 __ib_get_mad_port(struct ib_device
*device
, int port_num
)
90 struct ib_mad_port_private
*entry
;
92 list_for_each_entry(entry
, &ib_mad_port_list
, port_list
) {
93 if (entry
->device
== device
&& entry
->port_num
== port_num
)
100 * Wrapper function to return a ib_mad_port_private structure or NULL
103 static inline struct ib_mad_port_private
*
104 ib_get_mad_port(struct ib_device
*device
, int port_num
)
106 struct ib_mad_port_private
*entry
;
109 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
110 entry
= __ib_get_mad_port(device
, port_num
);
111 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
116 static inline u8
convert_mgmt_class(u8 mgmt_class
)
118 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
119 return mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
?
123 static int get_spl_qp_index(enum ib_qp_type qp_type
)
136 static int vendor_class_index(u8 mgmt_class
)
138 return mgmt_class
- IB_MGMT_CLASS_VENDOR_RANGE2_START
;
141 static int is_vendor_class(u8 mgmt_class
)
143 if ((mgmt_class
< IB_MGMT_CLASS_VENDOR_RANGE2_START
) ||
144 (mgmt_class
> IB_MGMT_CLASS_VENDOR_RANGE2_END
))
149 static int is_vendor_oui(char *oui
)
151 if (oui
[0] || oui
[1] || oui
[2])
156 static int is_vendor_method_in_use(
157 struct ib_mad_mgmt_vendor_class
*vendor_class
,
158 struct ib_mad_reg_req
*mad_reg_req
)
160 struct ib_mad_mgmt_method_table
*method
;
163 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
164 if (!memcmp(vendor_class
->oui
[i
], mad_reg_req
->oui
, 3)) {
165 method
= vendor_class
->method_table
[i
];
167 if (method_in_use(&method
, mad_reg_req
))
177 int ib_response_mad(struct ib_mad
*mad
)
179 return ((mad
->mad_hdr
.method
& IB_MGMT_METHOD_RESP
) ||
180 (mad
->mad_hdr
.method
== IB_MGMT_METHOD_TRAP_REPRESS
) ||
181 ((mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_BM
) &&
182 (mad
->mad_hdr
.attr_mod
& IB_BM_ATTR_MOD_RESP
)));
184 EXPORT_SYMBOL(ib_response_mad
);
187 * ib_register_mad_agent - Register to send/receive MADs
189 struct ib_mad_agent
*ib_register_mad_agent(struct ib_device
*device
,
191 enum ib_qp_type qp_type
,
192 struct ib_mad_reg_req
*mad_reg_req
,
194 ib_mad_send_handler send_handler
,
195 ib_mad_recv_handler recv_handler
,
198 struct ib_mad_port_private
*port_priv
;
199 struct ib_mad_agent
*ret
= ERR_PTR(-EINVAL
);
200 struct ib_mad_agent_private
*mad_agent_priv
;
201 struct ib_mad_reg_req
*reg_req
= NULL
;
202 struct ib_mad_mgmt_class_table
*class;
203 struct ib_mad_mgmt_vendor_class_table
*vendor
;
204 struct ib_mad_mgmt_vendor_class
*vendor_class
;
205 struct ib_mad_mgmt_method_table
*method
;
208 u8 mgmt_class
, vclass
;
210 /* Validate parameters */
211 qpn
= get_spl_qp_index(qp_type
);
215 if (rmpp_version
&& rmpp_version
!= IB_MGMT_RMPP_VERSION
)
218 /* Validate MAD registration request if supplied */
220 if (mad_reg_req
->mgmt_class_version
>= MAX_MGMT_VERSION
)
224 if (mad_reg_req
->mgmt_class
>= MAX_MGMT_CLASS
) {
226 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
227 * one in this range currently allowed
229 if (mad_reg_req
->mgmt_class
!=
230 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
232 } else if (mad_reg_req
->mgmt_class
== 0) {
234 * Class 0 is reserved in IBA and is used for
235 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
238 } else if (is_vendor_class(mad_reg_req
->mgmt_class
)) {
240 * If class is in "new" vendor range,
241 * ensure supplied OUI is not zero
243 if (!is_vendor_oui(mad_reg_req
->oui
))
246 /* Make sure class supplied is consistent with RMPP */
247 if (!ib_is_mad_class_rmpp(mad_reg_req
->mgmt_class
)) {
251 /* Make sure class supplied is consistent with QP type */
252 if (qp_type
== IB_QPT_SMI
) {
253 if ((mad_reg_req
->mgmt_class
!=
254 IB_MGMT_CLASS_SUBN_LID_ROUTED
) &&
255 (mad_reg_req
->mgmt_class
!=
256 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
))
259 if ((mad_reg_req
->mgmt_class
==
260 IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
261 (mad_reg_req
->mgmt_class
==
262 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
))
266 /* No registration request supplied */
271 /* Validate device and port */
272 port_priv
= ib_get_mad_port(device
, port_num
);
274 ret
= ERR_PTR(-ENODEV
);
278 /* Allocate structures */
279 mad_agent_priv
= kzalloc(sizeof *mad_agent_priv
, GFP_KERNEL
);
280 if (!mad_agent_priv
) {
281 ret
= ERR_PTR(-ENOMEM
);
285 mad_agent_priv
->agent
.mr
= ib_get_dma_mr(port_priv
->qp_info
[qpn
].qp
->pd
,
286 IB_ACCESS_LOCAL_WRITE
);
287 if (IS_ERR(mad_agent_priv
->agent
.mr
)) {
288 ret
= ERR_PTR(-ENOMEM
);
293 reg_req
= kmalloc(sizeof *reg_req
, GFP_KERNEL
);
295 ret
= ERR_PTR(-ENOMEM
);
298 /* Make a copy of the MAD registration request */
299 memcpy(reg_req
, mad_reg_req
, sizeof *reg_req
);
302 /* Now, fill in the various structures */
303 mad_agent_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
304 mad_agent_priv
->reg_req
= reg_req
;
305 mad_agent_priv
->agent
.rmpp_version
= rmpp_version
;
306 mad_agent_priv
->agent
.device
= device
;
307 mad_agent_priv
->agent
.recv_handler
= recv_handler
;
308 mad_agent_priv
->agent
.send_handler
= send_handler
;
309 mad_agent_priv
->agent
.context
= context
;
310 mad_agent_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
311 mad_agent_priv
->agent
.port_num
= port_num
;
312 spin_lock_init(&mad_agent_priv
->lock
);
313 INIT_LIST_HEAD(&mad_agent_priv
->send_list
);
314 INIT_LIST_HEAD(&mad_agent_priv
->wait_list
);
315 INIT_LIST_HEAD(&mad_agent_priv
->done_list
);
316 INIT_LIST_HEAD(&mad_agent_priv
->rmpp_list
);
317 INIT_DELAYED_WORK(&mad_agent_priv
->timed_work
, timeout_sends
);
318 INIT_LIST_HEAD(&mad_agent_priv
->local_list
);
319 INIT_WORK(&mad_agent_priv
->local_work
, local_completions
);
320 atomic_set(&mad_agent_priv
->refcount
, 1);
321 init_completion(&mad_agent_priv
->comp
);
323 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
324 mad_agent_priv
->agent
.hi_tid
= ++ib_mad_client_id
;
327 * Make sure MAD registration (if supplied)
328 * is non overlapping with any existing ones
331 mgmt_class
= convert_mgmt_class(mad_reg_req
->mgmt_class
);
332 if (!is_vendor_class(mgmt_class
)) {
333 class = port_priv
->version
[mad_reg_req
->
334 mgmt_class_version
].class;
336 method
= class->method_table
[mgmt_class
];
338 if (method_in_use(&method
,
343 ret2
= add_nonoui_reg_req(mad_reg_req
, mad_agent_priv
,
346 /* "New" vendor class range */
347 vendor
= port_priv
->version
[mad_reg_req
->
348 mgmt_class_version
].vendor
;
350 vclass
= vendor_class_index(mgmt_class
);
351 vendor_class
= vendor
->vendor_class
[vclass
];
353 if (is_vendor_method_in_use(
359 ret2
= add_oui_reg_req(mad_reg_req
, mad_agent_priv
);
367 /* Add mad agent into port's agent list */
368 list_add_tail(&mad_agent_priv
->agent_list
, &port_priv
->agent_list
);
369 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
371 return &mad_agent_priv
->agent
;
374 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
377 ib_dereg_mr(mad_agent_priv
->agent
.mr
);
379 kfree(mad_agent_priv
);
383 EXPORT_SYMBOL(ib_register_mad_agent
);
385 static inline int is_snooping_sends(int mad_snoop_flags
)
387 return (mad_snoop_flags
&
388 (/*IB_MAD_SNOOP_POSTED_SENDS |
389 IB_MAD_SNOOP_RMPP_SENDS |*/
390 IB_MAD_SNOOP_SEND_COMPLETIONS
/*|
391 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
394 static inline int is_snooping_recvs(int mad_snoop_flags
)
396 return (mad_snoop_flags
&
397 (IB_MAD_SNOOP_RECVS
/*|
398 IB_MAD_SNOOP_RMPP_RECVS*/));
401 static int register_snoop_agent(struct ib_mad_qp_info
*qp_info
,
402 struct ib_mad_snoop_private
*mad_snoop_priv
)
404 struct ib_mad_snoop_private
**new_snoop_table
;
408 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
409 /* Check for empty slot in array. */
410 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++)
411 if (!qp_info
->snoop_table
[i
])
414 if (i
== qp_info
->snoop_table_size
) {
416 new_snoop_table
= krealloc(qp_info
->snoop_table
,
417 sizeof mad_snoop_priv
*
418 (qp_info
->snoop_table_size
+ 1),
420 if (!new_snoop_table
) {
425 qp_info
->snoop_table
= new_snoop_table
;
426 qp_info
->snoop_table_size
++;
428 qp_info
->snoop_table
[i
] = mad_snoop_priv
;
429 atomic_inc(&qp_info
->snoop_count
);
431 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
435 struct ib_mad_agent
*ib_register_mad_snoop(struct ib_device
*device
,
437 enum ib_qp_type qp_type
,
439 ib_mad_snoop_handler snoop_handler
,
440 ib_mad_recv_handler recv_handler
,
443 struct ib_mad_port_private
*port_priv
;
444 struct ib_mad_agent
*ret
;
445 struct ib_mad_snoop_private
*mad_snoop_priv
;
448 /* Validate parameters */
449 if ((is_snooping_sends(mad_snoop_flags
) && !snoop_handler
) ||
450 (is_snooping_recvs(mad_snoop_flags
) && !recv_handler
)) {
451 ret
= ERR_PTR(-EINVAL
);
454 qpn
= get_spl_qp_index(qp_type
);
456 ret
= ERR_PTR(-EINVAL
);
459 port_priv
= ib_get_mad_port(device
, port_num
);
461 ret
= ERR_PTR(-ENODEV
);
464 /* Allocate structures */
465 mad_snoop_priv
= kzalloc(sizeof *mad_snoop_priv
, GFP_KERNEL
);
466 if (!mad_snoop_priv
) {
467 ret
= ERR_PTR(-ENOMEM
);
471 /* Now, fill in the various structures */
472 mad_snoop_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
473 mad_snoop_priv
->agent
.device
= device
;
474 mad_snoop_priv
->agent
.recv_handler
= recv_handler
;
475 mad_snoop_priv
->agent
.snoop_handler
= snoop_handler
;
476 mad_snoop_priv
->agent
.context
= context
;
477 mad_snoop_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
478 mad_snoop_priv
->agent
.port_num
= port_num
;
479 mad_snoop_priv
->mad_snoop_flags
= mad_snoop_flags
;
480 init_completion(&mad_snoop_priv
->comp
);
481 mad_snoop_priv
->snoop_index
= register_snoop_agent(
482 &port_priv
->qp_info
[qpn
],
484 if (mad_snoop_priv
->snoop_index
< 0) {
485 ret
= ERR_PTR(mad_snoop_priv
->snoop_index
);
489 atomic_set(&mad_snoop_priv
->refcount
, 1);
490 return &mad_snoop_priv
->agent
;
493 kfree(mad_snoop_priv
);
497 EXPORT_SYMBOL(ib_register_mad_snoop
);
499 static inline void deref_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
501 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
502 complete(&mad_agent_priv
->comp
);
505 static inline void deref_snoop_agent(struct ib_mad_snoop_private
*mad_snoop_priv
)
507 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
508 complete(&mad_snoop_priv
->comp
);
511 static void unregister_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
513 struct ib_mad_port_private
*port_priv
;
516 /* Note that we could still be handling received MADs */
519 * Canceling all sends results in dropping received response
520 * MADs, preventing us from queuing additional work
522 cancel_mads(mad_agent_priv
);
523 port_priv
= mad_agent_priv
->qp_info
->port_priv
;
524 cancel_delayed_work(&mad_agent_priv
->timed_work
);
526 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
527 remove_mad_reg_req(mad_agent_priv
);
528 list_del(&mad_agent_priv
->agent_list
);
529 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
531 flush_workqueue(port_priv
->wq
);
532 ib_cancel_rmpp_recvs(mad_agent_priv
);
534 deref_mad_agent(mad_agent_priv
);
535 wait_for_completion(&mad_agent_priv
->comp
);
537 kfree(mad_agent_priv
->reg_req
);
538 ib_dereg_mr(mad_agent_priv
->agent
.mr
);
539 kfree(mad_agent_priv
);
542 static void unregister_mad_snoop(struct ib_mad_snoop_private
*mad_snoop_priv
)
544 struct ib_mad_qp_info
*qp_info
;
547 qp_info
= mad_snoop_priv
->qp_info
;
548 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
549 qp_info
->snoop_table
[mad_snoop_priv
->snoop_index
] = NULL
;
550 atomic_dec(&qp_info
->snoop_count
);
551 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
553 deref_snoop_agent(mad_snoop_priv
);
554 wait_for_completion(&mad_snoop_priv
->comp
);
556 kfree(mad_snoop_priv
);
560 * ib_unregister_mad_agent - Unregisters a client from using MAD services
562 int ib_unregister_mad_agent(struct ib_mad_agent
*mad_agent
)
564 struct ib_mad_agent_private
*mad_agent_priv
;
565 struct ib_mad_snoop_private
*mad_snoop_priv
;
567 /* If the TID is zero, the agent can only snoop. */
568 if (mad_agent
->hi_tid
) {
569 mad_agent_priv
= container_of(mad_agent
,
570 struct ib_mad_agent_private
,
572 unregister_mad_agent(mad_agent_priv
);
574 mad_snoop_priv
= container_of(mad_agent
,
575 struct ib_mad_snoop_private
,
577 unregister_mad_snoop(mad_snoop_priv
);
581 EXPORT_SYMBOL(ib_unregister_mad_agent
);
583 static void dequeue_mad(struct ib_mad_list_head
*mad_list
)
585 struct ib_mad_queue
*mad_queue
;
588 BUG_ON(!mad_list
->mad_queue
);
589 mad_queue
= mad_list
->mad_queue
;
590 spin_lock_irqsave(&mad_queue
->lock
, flags
);
591 list_del(&mad_list
->list
);
593 spin_unlock_irqrestore(&mad_queue
->lock
, flags
);
596 static void snoop_send(struct ib_mad_qp_info
*qp_info
,
597 struct ib_mad_send_buf
*send_buf
,
598 struct ib_mad_send_wc
*mad_send_wc
,
601 struct ib_mad_snoop_private
*mad_snoop_priv
;
605 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
606 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
607 mad_snoop_priv
= qp_info
->snoop_table
[i
];
608 if (!mad_snoop_priv
||
609 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
612 atomic_inc(&mad_snoop_priv
->refcount
);
613 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
614 mad_snoop_priv
->agent
.snoop_handler(&mad_snoop_priv
->agent
,
615 send_buf
, mad_send_wc
);
616 deref_snoop_agent(mad_snoop_priv
);
617 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
619 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
622 static void snoop_recv(struct ib_mad_qp_info
*qp_info
,
623 struct ib_mad_recv_wc
*mad_recv_wc
,
626 struct ib_mad_snoop_private
*mad_snoop_priv
;
630 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
631 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
632 mad_snoop_priv
= qp_info
->snoop_table
[i
];
633 if (!mad_snoop_priv
||
634 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
637 atomic_inc(&mad_snoop_priv
->refcount
);
638 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
639 mad_snoop_priv
->agent
.recv_handler(&mad_snoop_priv
->agent
,
641 deref_snoop_agent(mad_snoop_priv
);
642 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
644 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
647 static void build_smp_wc(struct ib_qp
*qp
,
648 u64 wr_id
, u16 slid
, u16 pkey_index
, u8 port_num
,
651 memset(wc
, 0, sizeof *wc
);
653 wc
->status
= IB_WC_SUCCESS
;
654 wc
->opcode
= IB_WC_RECV
;
655 wc
->pkey_index
= pkey_index
;
656 wc
->byte_len
= sizeof(struct ib_mad
) + sizeof(struct ib_grh
);
661 wc
->dlid_path_bits
= 0;
662 wc
->port_num
= port_num
;
666 * Return 0 if SMP is to be sent
667 * Return 1 if SMP was consumed locally (whether or not solicited)
668 * Return < 0 if error
670 static int handle_outgoing_dr_smp(struct ib_mad_agent_private
*mad_agent_priv
,
671 struct ib_mad_send_wr_private
*mad_send_wr
)
674 struct ib_smp
*smp
= mad_send_wr
->send_buf
.mad
;
676 struct ib_mad_local_private
*local
;
677 struct ib_mad_private
*mad_priv
;
678 struct ib_mad_port_private
*port_priv
;
679 struct ib_mad_agent_private
*recv_mad_agent
= NULL
;
680 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
683 struct ib_send_wr
*send_wr
= &mad_send_wr
->send_wr
;
685 if (device
->node_type
== RDMA_NODE_IB_SWITCH
&&
686 smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
687 port_num
= send_wr
->wr
.ud
.port_num
;
689 port_num
= mad_agent_priv
->agent
.port_num
;
692 * Directed route handling starts if the initial LID routed part of
693 * a request or the ending LID routed part of a response is empty.
694 * If we are at the start of the LID routed part, don't update the
695 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
697 if ((ib_get_smp_direction(smp
) ? smp
->dr_dlid
: smp
->dr_slid
) ==
699 smi_handle_dr_smp_send(smp
, device
->node_type
, port_num
) ==
702 printk(KERN_ERR PFX
"Invalid directed route\n");
706 /* Check to post send on QP or process locally */
707 if (smi_check_local_smp(smp
, device
) == IB_SMI_DISCARD
&&
708 smi_check_local_returning_smp(smp
, device
) == IB_SMI_DISCARD
)
711 local
= kmalloc(sizeof *local
, GFP_ATOMIC
);
714 printk(KERN_ERR PFX
"No memory for ib_mad_local_private\n");
717 local
->mad_priv
= NULL
;
718 local
->recv_mad_agent
= NULL
;
719 mad_priv
= kmem_cache_alloc(ib_mad_cache
, GFP_ATOMIC
);
722 printk(KERN_ERR PFX
"No memory for local response MAD\n");
727 build_smp_wc(mad_agent_priv
->agent
.qp
,
728 send_wr
->wr_id
, be16_to_cpu(smp
->dr_slid
),
729 send_wr
->wr
.ud
.pkey_index
,
730 send_wr
->wr
.ud
.port_num
, &mad_wc
);
732 /* No GRH for DR SMP */
733 ret
= device
->process_mad(device
, 0, port_num
, &mad_wc
, NULL
,
734 (struct ib_mad
*)smp
,
735 (struct ib_mad
*)&mad_priv
->mad
);
738 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
:
739 if (ib_response_mad(&mad_priv
->mad
.mad
) &&
740 mad_agent_priv
->agent
.recv_handler
) {
741 local
->mad_priv
= mad_priv
;
742 local
->recv_mad_agent
= mad_agent_priv
;
744 * Reference MAD agent until receive
745 * side of local completion handled
747 atomic_inc(&mad_agent_priv
->refcount
);
749 kmem_cache_free(ib_mad_cache
, mad_priv
);
751 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
:
752 kmem_cache_free(ib_mad_cache
, mad_priv
);
754 case IB_MAD_RESULT_SUCCESS
:
755 /* Treat like an incoming receive MAD */
756 port_priv
= ib_get_mad_port(mad_agent_priv
->agent
.device
,
757 mad_agent_priv
->agent
.port_num
);
759 memcpy(&mad_priv
->mad
.mad
, smp
, sizeof(struct ib_mad
));
760 recv_mad_agent
= find_mad_agent(port_priv
,
763 if (!port_priv
|| !recv_mad_agent
) {
765 * No receiving agent so drop packet and
766 * generate send completion.
768 kmem_cache_free(ib_mad_cache
, mad_priv
);
771 local
->mad_priv
= mad_priv
;
772 local
->recv_mad_agent
= recv_mad_agent
;
775 kmem_cache_free(ib_mad_cache
, mad_priv
);
781 local
->mad_send_wr
= mad_send_wr
;
782 /* Reference MAD agent until send side of local completion handled */
783 atomic_inc(&mad_agent_priv
->refcount
);
784 /* Queue local completion to local list */
785 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
786 list_add_tail(&local
->completion_list
, &mad_agent_priv
->local_list
);
787 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
788 queue_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
789 &mad_agent_priv
->local_work
);
795 static int get_pad_size(int hdr_len
, int data_len
)
799 seg_size
= sizeof(struct ib_mad
) - hdr_len
;
800 if (data_len
&& seg_size
) {
801 pad
= seg_size
- data_len
% seg_size
;
802 return pad
== seg_size
? 0 : pad
;
807 static void free_send_rmpp_list(struct ib_mad_send_wr_private
*mad_send_wr
)
809 struct ib_rmpp_segment
*s
, *t
;
811 list_for_each_entry_safe(s
, t
, &mad_send_wr
->rmpp_list
, list
) {
817 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private
*send_wr
,
820 struct ib_mad_send_buf
*send_buf
= &send_wr
->send_buf
;
821 struct ib_rmpp_mad
*rmpp_mad
= send_buf
->mad
;
822 struct ib_rmpp_segment
*seg
= NULL
;
823 int left
, seg_size
, pad
;
825 send_buf
->seg_size
= sizeof (struct ib_mad
) - send_buf
->hdr_len
;
826 seg_size
= send_buf
->seg_size
;
829 /* Allocate data segments. */
830 for (left
= send_buf
->data_len
+ pad
; left
> 0; left
-= seg_size
) {
831 seg
= kmalloc(sizeof (*seg
) + seg_size
, gfp_mask
);
833 printk(KERN_ERR
"alloc_send_rmpp_segs: RMPP mem "
834 "alloc failed for len %zd, gfp %#x\n",
835 sizeof (*seg
) + seg_size
, gfp_mask
);
836 free_send_rmpp_list(send_wr
);
839 seg
->num
= ++send_buf
->seg_count
;
840 list_add_tail(&seg
->list
, &send_wr
->rmpp_list
);
843 /* Zero any padding */
845 memset(seg
->data
+ seg_size
- pad
, 0, pad
);
847 rmpp_mad
->rmpp_hdr
.rmpp_version
= send_wr
->mad_agent_priv
->
849 rmpp_mad
->rmpp_hdr
.rmpp_type
= IB_MGMT_RMPP_TYPE_DATA
;
850 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
852 send_wr
->cur_seg
= container_of(send_wr
->rmpp_list
.next
,
853 struct ib_rmpp_segment
, list
);
854 send_wr
->last_ack_seg
= send_wr
->cur_seg
;
858 struct ib_mad_send_buf
* ib_create_send_mad(struct ib_mad_agent
*mad_agent
,
859 u32 remote_qpn
, u16 pkey_index
,
861 int hdr_len
, int data_len
,
864 struct ib_mad_agent_private
*mad_agent_priv
;
865 struct ib_mad_send_wr_private
*mad_send_wr
;
866 int pad
, message_size
, ret
, size
;
869 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
871 pad
= get_pad_size(hdr_len
, data_len
);
872 message_size
= hdr_len
+ data_len
+ pad
;
874 if ((!mad_agent
->rmpp_version
&&
875 (rmpp_active
|| message_size
> sizeof(struct ib_mad
))) ||
876 (!rmpp_active
&& message_size
> sizeof(struct ib_mad
)))
877 return ERR_PTR(-EINVAL
);
879 size
= rmpp_active
? hdr_len
: sizeof(struct ib_mad
);
880 buf
= kzalloc(sizeof *mad_send_wr
+ size
, gfp_mask
);
882 return ERR_PTR(-ENOMEM
);
884 mad_send_wr
= buf
+ size
;
885 INIT_LIST_HEAD(&mad_send_wr
->rmpp_list
);
886 mad_send_wr
->send_buf
.mad
= buf
;
887 mad_send_wr
->send_buf
.hdr_len
= hdr_len
;
888 mad_send_wr
->send_buf
.data_len
= data_len
;
889 mad_send_wr
->pad
= pad
;
891 mad_send_wr
->mad_agent_priv
= mad_agent_priv
;
892 mad_send_wr
->sg_list
[0].length
= hdr_len
;
893 mad_send_wr
->sg_list
[0].lkey
= mad_agent
->mr
->lkey
;
894 mad_send_wr
->sg_list
[1].length
= sizeof(struct ib_mad
) - hdr_len
;
895 mad_send_wr
->sg_list
[1].lkey
= mad_agent
->mr
->lkey
;
897 mad_send_wr
->send_wr
.wr_id
= (unsigned long) mad_send_wr
;
898 mad_send_wr
->send_wr
.sg_list
= mad_send_wr
->sg_list
;
899 mad_send_wr
->send_wr
.num_sge
= 2;
900 mad_send_wr
->send_wr
.opcode
= IB_WR_SEND
;
901 mad_send_wr
->send_wr
.send_flags
= IB_SEND_SIGNALED
;
902 mad_send_wr
->send_wr
.wr
.ud
.remote_qpn
= remote_qpn
;
903 mad_send_wr
->send_wr
.wr
.ud
.remote_qkey
= IB_QP_SET_QKEY
;
904 mad_send_wr
->send_wr
.wr
.ud
.pkey_index
= pkey_index
;
907 ret
= alloc_send_rmpp_list(mad_send_wr
, gfp_mask
);
914 mad_send_wr
->send_buf
.mad_agent
= mad_agent
;
915 atomic_inc(&mad_agent_priv
->refcount
);
916 return &mad_send_wr
->send_buf
;
918 EXPORT_SYMBOL(ib_create_send_mad
);
920 int ib_get_mad_data_offset(u8 mgmt_class
)
922 if (mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
)
923 return IB_MGMT_SA_HDR
;
924 else if ((mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
925 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
926 (mgmt_class
== IB_MGMT_CLASS_BIS
))
927 return IB_MGMT_DEVICE_HDR
;
928 else if ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
929 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
))
930 return IB_MGMT_VENDOR_HDR
;
932 return IB_MGMT_MAD_HDR
;
934 EXPORT_SYMBOL(ib_get_mad_data_offset
);
936 int ib_is_mad_class_rmpp(u8 mgmt_class
)
938 if ((mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
) ||
939 (mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
940 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
941 (mgmt_class
== IB_MGMT_CLASS_BIS
) ||
942 ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
943 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
)))
947 EXPORT_SYMBOL(ib_is_mad_class_rmpp
);
949 void *ib_get_rmpp_segment(struct ib_mad_send_buf
*send_buf
, int seg_num
)
951 struct ib_mad_send_wr_private
*mad_send_wr
;
952 struct list_head
*list
;
954 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
956 list
= &mad_send_wr
->cur_seg
->list
;
958 if (mad_send_wr
->cur_seg
->num
< seg_num
) {
959 list_for_each_entry(mad_send_wr
->cur_seg
, list
, list
)
960 if (mad_send_wr
->cur_seg
->num
== seg_num
)
962 } else if (mad_send_wr
->cur_seg
->num
> seg_num
) {
963 list_for_each_entry_reverse(mad_send_wr
->cur_seg
, list
, list
)
964 if (mad_send_wr
->cur_seg
->num
== seg_num
)
967 return mad_send_wr
->cur_seg
->data
;
969 EXPORT_SYMBOL(ib_get_rmpp_segment
);
971 static inline void *ib_get_payload(struct ib_mad_send_wr_private
*mad_send_wr
)
973 if (mad_send_wr
->send_buf
.seg_count
)
974 return ib_get_rmpp_segment(&mad_send_wr
->send_buf
,
975 mad_send_wr
->seg_num
);
977 return mad_send_wr
->send_buf
.mad
+
978 mad_send_wr
->send_buf
.hdr_len
;
981 void ib_free_send_mad(struct ib_mad_send_buf
*send_buf
)
983 struct ib_mad_agent_private
*mad_agent_priv
;
984 struct ib_mad_send_wr_private
*mad_send_wr
;
986 mad_agent_priv
= container_of(send_buf
->mad_agent
,
987 struct ib_mad_agent_private
, agent
);
988 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
991 free_send_rmpp_list(mad_send_wr
);
992 kfree(send_buf
->mad
);
993 deref_mad_agent(mad_agent_priv
);
995 EXPORT_SYMBOL(ib_free_send_mad
);
997 int ib_send_mad(struct ib_mad_send_wr_private
*mad_send_wr
)
999 struct ib_mad_qp_info
*qp_info
;
1000 struct list_head
*list
;
1001 struct ib_send_wr
*bad_send_wr
;
1002 struct ib_mad_agent
*mad_agent
;
1004 unsigned long flags
;
1007 /* Set WR ID to find mad_send_wr upon completion */
1008 qp_info
= mad_send_wr
->mad_agent_priv
->qp_info
;
1009 mad_send_wr
->send_wr
.wr_id
= (unsigned long)&mad_send_wr
->mad_list
;
1010 mad_send_wr
->mad_list
.mad_queue
= &qp_info
->send_queue
;
1012 mad_agent
= mad_send_wr
->send_buf
.mad_agent
;
1013 sge
= mad_send_wr
->sg_list
;
1014 sge
[0].addr
= ib_dma_map_single(mad_agent
->device
,
1015 mad_send_wr
->send_buf
.mad
,
1018 mad_send_wr
->header_mapping
= sge
[0].addr
;
1020 sge
[1].addr
= ib_dma_map_single(mad_agent
->device
,
1021 ib_get_payload(mad_send_wr
),
1024 mad_send_wr
->payload_mapping
= sge
[1].addr
;
1026 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
1027 if (qp_info
->send_queue
.count
< qp_info
->send_queue
.max_active
) {
1028 ret
= ib_post_send(mad_agent
->qp
, &mad_send_wr
->send_wr
,
1030 list
= &qp_info
->send_queue
.list
;
1033 list
= &qp_info
->overflow_list
;
1037 qp_info
->send_queue
.count
++;
1038 list_add_tail(&mad_send_wr
->mad_list
.list
, list
);
1040 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
1042 ib_dma_unmap_single(mad_agent
->device
,
1043 mad_send_wr
->header_mapping
,
1044 sge
[0].length
, DMA_TO_DEVICE
);
1045 ib_dma_unmap_single(mad_agent
->device
,
1046 mad_send_wr
->payload_mapping
,
1047 sge
[1].length
, DMA_TO_DEVICE
);
1053 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1054 * with the registered client
1056 int ib_post_send_mad(struct ib_mad_send_buf
*send_buf
,
1057 struct ib_mad_send_buf
**bad_send_buf
)
1059 struct ib_mad_agent_private
*mad_agent_priv
;
1060 struct ib_mad_send_buf
*next_send_buf
;
1061 struct ib_mad_send_wr_private
*mad_send_wr
;
1062 unsigned long flags
;
1065 /* Walk list of send WRs and post each on send list */
1066 for (; send_buf
; send_buf
= next_send_buf
) {
1068 mad_send_wr
= container_of(send_buf
,
1069 struct ib_mad_send_wr_private
,
1071 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1073 if (!send_buf
->mad_agent
->send_handler
||
1074 (send_buf
->timeout_ms
&&
1075 !send_buf
->mad_agent
->recv_handler
)) {
1080 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
)) {
1081 if (mad_agent_priv
->agent
.rmpp_version
) {
1088 * Save pointer to next work request to post in case the
1089 * current one completes, and the user modifies the work
1090 * request associated with the completion
1092 next_send_buf
= send_buf
->next
;
1093 mad_send_wr
->send_wr
.wr
.ud
.ah
= send_buf
->ah
;
1095 if (((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
==
1096 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1097 ret
= handle_outgoing_dr_smp(mad_agent_priv
,
1099 if (ret
< 0) /* error */
1101 else if (ret
== 1) /* locally consumed */
1105 mad_send_wr
->tid
= ((struct ib_mad_hdr
*) send_buf
->mad
)->tid
;
1106 /* Timeout will be updated after send completes */
1107 mad_send_wr
->timeout
= msecs_to_jiffies(send_buf
->timeout_ms
);
1108 mad_send_wr
->max_retries
= send_buf
->retries
;
1109 mad_send_wr
->retries_left
= send_buf
->retries
;
1110 send_buf
->retries
= 0;
1111 /* Reference for work request to QP + response */
1112 mad_send_wr
->refcount
= 1 + (mad_send_wr
->timeout
> 0);
1113 mad_send_wr
->status
= IB_WC_SUCCESS
;
1115 /* Reference MAD agent until send completes */
1116 atomic_inc(&mad_agent_priv
->refcount
);
1117 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1118 list_add_tail(&mad_send_wr
->agent_list
,
1119 &mad_agent_priv
->send_list
);
1120 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1122 if (mad_agent_priv
->agent
.rmpp_version
) {
1123 ret
= ib_send_rmpp_mad(mad_send_wr
);
1124 if (ret
>= 0 && ret
!= IB_RMPP_RESULT_CONSUMED
)
1125 ret
= ib_send_mad(mad_send_wr
);
1127 ret
= ib_send_mad(mad_send_wr
);
1129 /* Fail send request */
1130 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1131 list_del(&mad_send_wr
->agent_list
);
1132 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1133 atomic_dec(&mad_agent_priv
->refcount
);
1140 *bad_send_buf
= send_buf
;
1143 EXPORT_SYMBOL(ib_post_send_mad
);
1146 * ib_free_recv_mad - Returns data buffers used to receive
1147 * a MAD to the access layer
1149 void ib_free_recv_mad(struct ib_mad_recv_wc
*mad_recv_wc
)
1151 struct ib_mad_recv_buf
*mad_recv_buf
, *temp_recv_buf
;
1152 struct ib_mad_private_header
*mad_priv_hdr
;
1153 struct ib_mad_private
*priv
;
1154 struct list_head free_list
;
1156 INIT_LIST_HEAD(&free_list
);
1157 list_splice_init(&mad_recv_wc
->rmpp_list
, &free_list
);
1159 list_for_each_entry_safe(mad_recv_buf
, temp_recv_buf
,
1161 mad_recv_wc
= container_of(mad_recv_buf
, struct ib_mad_recv_wc
,
1163 mad_priv_hdr
= container_of(mad_recv_wc
,
1164 struct ib_mad_private_header
,
1166 priv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
1168 kmem_cache_free(ib_mad_cache
, priv
);
1171 EXPORT_SYMBOL(ib_free_recv_mad
);
1173 struct ib_mad_agent
*ib_redirect_mad_qp(struct ib_qp
*qp
,
1175 ib_mad_send_handler send_handler
,
1176 ib_mad_recv_handler recv_handler
,
1179 return ERR_PTR(-EINVAL
); /* XXX: for now */
1181 EXPORT_SYMBOL(ib_redirect_mad_qp
);
1183 int ib_process_mad_wc(struct ib_mad_agent
*mad_agent
,
1186 printk(KERN_ERR PFX
"ib_process_mad_wc() not implemented yet\n");
1189 EXPORT_SYMBOL(ib_process_mad_wc
);
1191 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
1192 struct ib_mad_reg_req
*mad_reg_req
)
1196 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
) {
1197 if ((*method
)->agent
[i
]) {
1198 printk(KERN_ERR PFX
"Method %d already in use\n", i
);
1205 static int allocate_method_table(struct ib_mad_mgmt_method_table
**method
)
1207 /* Allocate management method table */
1208 *method
= kzalloc(sizeof **method
, GFP_ATOMIC
);
1210 printk(KERN_ERR PFX
"No memory for "
1211 "ib_mad_mgmt_method_table\n");
1219 * Check to see if there are any methods still in use
1221 static int check_method_table(struct ib_mad_mgmt_method_table
*method
)
1225 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++)
1226 if (method
->agent
[i
])
1232 * Check to see if there are any method tables for this class still in use
1234 static int check_class_table(struct ib_mad_mgmt_class_table
*class)
1238 for (i
= 0; i
< MAX_MGMT_CLASS
; i
++)
1239 if (class->method_table
[i
])
1244 static int check_vendor_class(struct ib_mad_mgmt_vendor_class
*vendor_class
)
1248 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1249 if (vendor_class
->method_table
[i
])
1254 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class
*vendor_class
,
1259 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1260 /* Is there matching OUI for this vendor class ? */
1261 if (!memcmp(vendor_class
->oui
[i
], oui
, 3))
1267 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table
*vendor
)
1271 for (i
= 0; i
< MAX_MGMT_VENDOR_RANGE2
; i
++)
1272 if (vendor
->vendor_class
[i
])
1278 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table
*method
,
1279 struct ib_mad_agent_private
*agent
)
1283 /* Remove any methods for this mad agent */
1284 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++) {
1285 if (method
->agent
[i
] == agent
) {
1286 method
->agent
[i
] = NULL
;
1291 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1292 struct ib_mad_agent_private
*agent_priv
,
1295 struct ib_mad_port_private
*port_priv
;
1296 struct ib_mad_mgmt_class_table
**class;
1297 struct ib_mad_mgmt_method_table
**method
;
1300 port_priv
= agent_priv
->qp_info
->port_priv
;
1301 class = &port_priv
->version
[mad_reg_req
->mgmt_class_version
].class;
1303 /* Allocate management class table for "new" class version */
1304 *class = kzalloc(sizeof **class, GFP_ATOMIC
);
1306 printk(KERN_ERR PFX
"No memory for "
1307 "ib_mad_mgmt_class_table\n");
1312 /* Allocate method table for this management class */
1313 method
= &(*class)->method_table
[mgmt_class
];
1314 if ((ret
= allocate_method_table(method
)))
1317 method
= &(*class)->method_table
[mgmt_class
];
1319 /* Allocate method table for this management class */
1320 if ((ret
= allocate_method_table(method
)))
1325 /* Now, make sure methods are not already in use */
1326 if (method_in_use(method
, mad_reg_req
))
1329 /* Finally, add in methods being registered */
1330 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
)
1331 (*method
)->agent
[i
] = agent_priv
;
1336 /* Remove any methods for this mad agent */
1337 remove_methods_mad_agent(*method
, agent_priv
);
1338 /* Now, check to see if there are any methods in use */
1339 if (!check_method_table(*method
)) {
1340 /* If not, release management method table */
1353 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1354 struct ib_mad_agent_private
*agent_priv
)
1356 struct ib_mad_port_private
*port_priv
;
1357 struct ib_mad_mgmt_vendor_class_table
**vendor_table
;
1358 struct ib_mad_mgmt_vendor_class_table
*vendor
= NULL
;
1359 struct ib_mad_mgmt_vendor_class
*vendor_class
= NULL
;
1360 struct ib_mad_mgmt_method_table
**method
;
1361 int i
, ret
= -ENOMEM
;
1364 /* "New" vendor (with OUI) class */
1365 vclass
= vendor_class_index(mad_reg_req
->mgmt_class
);
1366 port_priv
= agent_priv
->qp_info
->port_priv
;
1367 vendor_table
= &port_priv
->version
[
1368 mad_reg_req
->mgmt_class_version
].vendor
;
1369 if (!*vendor_table
) {
1370 /* Allocate mgmt vendor class table for "new" class version */
1371 vendor
= kzalloc(sizeof *vendor
, GFP_ATOMIC
);
1373 printk(KERN_ERR PFX
"No memory for "
1374 "ib_mad_mgmt_vendor_class_table\n");
1378 *vendor_table
= vendor
;
1380 if (!(*vendor_table
)->vendor_class
[vclass
]) {
1381 /* Allocate table for this management vendor class */
1382 vendor_class
= kzalloc(sizeof *vendor_class
, GFP_ATOMIC
);
1383 if (!vendor_class
) {
1384 printk(KERN_ERR PFX
"No memory for "
1385 "ib_mad_mgmt_vendor_class\n");
1389 (*vendor_table
)->vendor_class
[vclass
] = vendor_class
;
1391 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1392 /* Is there matching OUI for this vendor class ? */
1393 if (!memcmp((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1394 mad_reg_req
->oui
, 3)) {
1395 method
= &(*vendor_table
)->vendor_class
[
1396 vclass
]->method_table
[i
];
1401 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1402 /* OUI slot available ? */
1403 if (!is_vendor_oui((*vendor_table
)->vendor_class
[
1405 method
= &(*vendor_table
)->vendor_class
[
1406 vclass
]->method_table
[i
];
1408 /* Allocate method table for this OUI */
1409 if ((ret
= allocate_method_table(method
)))
1411 memcpy((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1412 mad_reg_req
->oui
, 3);
1416 printk(KERN_ERR PFX
"All OUI slots in use\n");
1420 /* Now, make sure methods are not already in use */
1421 if (method_in_use(method
, mad_reg_req
))
1424 /* Finally, add in methods being registered */
1425 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
)
1426 (*method
)->agent
[i
] = agent_priv
;
1431 /* Remove any methods for this mad agent */
1432 remove_methods_mad_agent(*method
, agent_priv
);
1433 /* Now, check to see if there are any methods in use */
1434 if (!check_method_table(*method
)) {
1435 /* If not, release management method table */
1442 (*vendor_table
)->vendor_class
[vclass
] = NULL
;
1443 kfree(vendor_class
);
1447 *vendor_table
= NULL
;
1454 static void remove_mad_reg_req(struct ib_mad_agent_private
*agent_priv
)
1456 struct ib_mad_port_private
*port_priv
;
1457 struct ib_mad_mgmt_class_table
*class;
1458 struct ib_mad_mgmt_method_table
*method
;
1459 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1460 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1465 * Was MAD registration request supplied
1466 * with original registration ?
1468 if (!agent_priv
->reg_req
) {
1472 port_priv
= agent_priv
->qp_info
->port_priv
;
1473 mgmt_class
= convert_mgmt_class(agent_priv
->reg_req
->mgmt_class
);
1474 class = port_priv
->version
[
1475 agent_priv
->reg_req
->mgmt_class_version
].class;
1479 method
= class->method_table
[mgmt_class
];
1481 /* Remove any methods for this mad agent */
1482 remove_methods_mad_agent(method
, agent_priv
);
1483 /* Now, check to see if there are any methods still in use */
1484 if (!check_method_table(method
)) {
1485 /* If not, release management method table */
1487 class->method_table
[mgmt_class
] = NULL
;
1488 /* Any management classes left ? */
1489 if (!check_class_table(class)) {
1490 /* If not, release management class table */
1493 agent_priv
->reg_req
->
1494 mgmt_class_version
].class = NULL
;
1500 if (!is_vendor_class(mgmt_class
))
1503 /* normalize mgmt_class to vendor range 2 */
1504 mgmt_class
= vendor_class_index(agent_priv
->reg_req
->mgmt_class
);
1505 vendor
= port_priv
->version
[
1506 agent_priv
->reg_req
->mgmt_class_version
].vendor
;
1511 vendor_class
= vendor
->vendor_class
[mgmt_class
];
1513 index
= find_vendor_oui(vendor_class
, agent_priv
->reg_req
->oui
);
1516 method
= vendor_class
->method_table
[index
];
1518 /* Remove any methods for this mad agent */
1519 remove_methods_mad_agent(method
, agent_priv
);
1521 * Now, check to see if there are
1522 * any methods still in use
1524 if (!check_method_table(method
)) {
1525 /* If not, release management method table */
1527 vendor_class
->method_table
[index
] = NULL
;
1528 memset(vendor_class
->oui
[index
], 0, 3);
1529 /* Any OUIs left ? */
1530 if (!check_vendor_class(vendor_class
)) {
1531 /* If not, release vendor class table */
1532 kfree(vendor_class
);
1533 vendor
->vendor_class
[mgmt_class
] = NULL
;
1534 /* Any other vendor classes left ? */
1535 if (!check_vendor_table(vendor
)) {
1538 agent_priv
->reg_req
->
1539 mgmt_class_version
].
1551 static struct ib_mad_agent_private
*
1552 find_mad_agent(struct ib_mad_port_private
*port_priv
,
1555 struct ib_mad_agent_private
*mad_agent
= NULL
;
1556 unsigned long flags
;
1558 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
1559 if (ib_response_mad(mad
)) {
1561 struct ib_mad_agent_private
*entry
;
1564 * Routing is based on high 32 bits of transaction ID
1567 hi_tid
= be64_to_cpu(mad
->mad_hdr
.tid
) >> 32;
1568 list_for_each_entry(entry
, &port_priv
->agent_list
, agent_list
) {
1569 if (entry
->agent
.hi_tid
== hi_tid
) {
1575 struct ib_mad_mgmt_class_table
*class;
1576 struct ib_mad_mgmt_method_table
*method
;
1577 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1578 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1579 struct ib_vendor_mad
*vendor_mad
;
1583 * Routing is based on version, class, and method
1584 * For "newer" vendor MADs, also based on OUI
1586 if (mad
->mad_hdr
.class_version
>= MAX_MGMT_VERSION
)
1588 if (!is_vendor_class(mad
->mad_hdr
.mgmt_class
)) {
1589 class = port_priv
->version
[
1590 mad
->mad_hdr
.class_version
].class;
1593 method
= class->method_table
[convert_mgmt_class(
1594 mad
->mad_hdr
.mgmt_class
)];
1596 mad_agent
= method
->agent
[mad
->mad_hdr
.method
&
1597 ~IB_MGMT_METHOD_RESP
];
1599 vendor
= port_priv
->version
[
1600 mad
->mad_hdr
.class_version
].vendor
;
1603 vendor_class
= vendor
->vendor_class
[vendor_class_index(
1604 mad
->mad_hdr
.mgmt_class
)];
1607 /* Find matching OUI */
1608 vendor_mad
= (struct ib_vendor_mad
*)mad
;
1609 index
= find_vendor_oui(vendor_class
, vendor_mad
->oui
);
1612 method
= vendor_class
->method_table
[index
];
1614 mad_agent
= method
->agent
[mad
->mad_hdr
.method
&
1615 ~IB_MGMT_METHOD_RESP
];
1621 if (mad_agent
->agent
.recv_handler
)
1622 atomic_inc(&mad_agent
->refcount
);
1624 printk(KERN_NOTICE PFX
"No receive handler for client "
1626 &mad_agent
->agent
, port_priv
->port_num
);
1631 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
1636 static int validate_mad(struct ib_mad
*mad
, u32 qp_num
)
1640 /* Make sure MAD base version is understood */
1641 if (mad
->mad_hdr
.base_version
!= IB_MGMT_BASE_VERSION
) {
1642 printk(KERN_ERR PFX
"MAD received with unsupported base "
1643 "version %d\n", mad
->mad_hdr
.base_version
);
1647 /* Filter SMI packets sent to other than QP0 */
1648 if ((mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
1649 (mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
1653 /* Filter GSI packets sent to QP0 */
1662 static int is_data_mad(struct ib_mad_agent_private
*mad_agent_priv
,
1663 struct ib_mad_hdr
*mad_hdr
)
1665 struct ib_rmpp_mad
*rmpp_mad
;
1667 rmpp_mad
= (struct ib_rmpp_mad
*)mad_hdr
;
1668 return !mad_agent_priv
->agent
.rmpp_version
||
1669 !(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
1670 IB_MGMT_RMPP_FLAG_ACTIVE
) ||
1671 (rmpp_mad
->rmpp_hdr
.rmpp_type
== IB_MGMT_RMPP_TYPE_DATA
);
1674 static inline int rcv_has_same_class(struct ib_mad_send_wr_private
*wr
,
1675 struct ib_mad_recv_wc
*rwc
)
1677 return ((struct ib_mad
*)(wr
->send_buf
.mad
))->mad_hdr
.mgmt_class
==
1678 rwc
->recv_buf
.mad
->mad_hdr
.mgmt_class
;
1681 static inline int rcv_has_same_gid(struct ib_mad_agent_private
*mad_agent_priv
,
1682 struct ib_mad_send_wr_private
*wr
,
1683 struct ib_mad_recv_wc
*rwc
)
1685 struct ib_ah_attr attr
;
1686 u8 send_resp
, rcv_resp
;
1688 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
1689 u8 port_num
= mad_agent_priv
->agent
.port_num
;
1692 send_resp
= ib_response_mad((struct ib_mad
*)wr
->send_buf
.mad
);
1693 rcv_resp
= ib_response_mad(rwc
->recv_buf
.mad
);
1695 if (send_resp
== rcv_resp
)
1696 /* both requests, or both responses. GIDs different */
1699 if (ib_query_ah(wr
->send_buf
.ah
, &attr
))
1700 /* Assume not equal, to avoid false positives. */
1703 if (!!(attr
.ah_flags
& IB_AH_GRH
) !=
1704 !!(rwc
->wc
->wc_flags
& IB_WC_GRH
))
1705 /* one has GID, other does not. Assume different */
1708 if (!send_resp
&& rcv_resp
) {
1709 /* is request/response. */
1710 if (!(attr
.ah_flags
& IB_AH_GRH
)) {
1711 if (ib_get_cached_lmc(device
, port_num
, &lmc
))
1713 return (!lmc
|| !((attr
.src_path_bits
^
1714 rwc
->wc
->dlid_path_bits
) &
1717 if (ib_get_cached_gid(device
, port_num
,
1718 attr
.grh
.sgid_index
, &sgid
))
1720 return !memcmp(sgid
.raw
, rwc
->recv_buf
.grh
->dgid
.raw
,
1725 if (!(attr
.ah_flags
& IB_AH_GRH
))
1726 return attr
.dlid
== rwc
->wc
->slid
;
1728 return !memcmp(attr
.grh
.dgid
.raw
, rwc
->recv_buf
.grh
->sgid
.raw
,
1732 static inline int is_direct(u8
class)
1734 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
);
1737 struct ib_mad_send_wr_private
*
1738 ib_find_send_mad(struct ib_mad_agent_private
*mad_agent_priv
,
1739 struct ib_mad_recv_wc
*wc
)
1741 struct ib_mad_send_wr_private
*wr
;
1744 mad
= (struct ib_mad
*)wc
->recv_buf
.mad
;
1746 list_for_each_entry(wr
, &mad_agent_priv
->wait_list
, agent_list
) {
1747 if ((wr
->tid
== mad
->mad_hdr
.tid
) &&
1748 rcv_has_same_class(wr
, wc
) &&
1750 * Don't check GID for direct routed MADs.
1751 * These might have permissive LIDs.
1753 (is_direct(wc
->recv_buf
.mad
->mad_hdr
.mgmt_class
) ||
1754 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1755 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1759 * It's possible to receive the response before we've
1760 * been notified that the send has completed
1762 list_for_each_entry(wr
, &mad_agent_priv
->send_list
, agent_list
) {
1763 if (is_data_mad(mad_agent_priv
, wr
->send_buf
.mad
) &&
1764 wr
->tid
== mad
->mad_hdr
.tid
&&
1766 rcv_has_same_class(wr
, wc
) &&
1768 * Don't check GID for direct routed MADs.
1769 * These might have permissive LIDs.
1771 (is_direct(wc
->recv_buf
.mad
->mad_hdr
.mgmt_class
) ||
1772 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1773 /* Verify request has not been canceled */
1774 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1779 void ib_mark_mad_done(struct ib_mad_send_wr_private
*mad_send_wr
)
1781 mad_send_wr
->timeout
= 0;
1782 if (mad_send_wr
->refcount
== 1)
1783 list_move_tail(&mad_send_wr
->agent_list
,
1784 &mad_send_wr
->mad_agent_priv
->done_list
);
1787 static void ib_mad_complete_recv(struct ib_mad_agent_private
*mad_agent_priv
,
1788 struct ib_mad_recv_wc
*mad_recv_wc
)
1790 struct ib_mad_send_wr_private
*mad_send_wr
;
1791 struct ib_mad_send_wc mad_send_wc
;
1792 unsigned long flags
;
1794 INIT_LIST_HEAD(&mad_recv_wc
->rmpp_list
);
1795 list_add(&mad_recv_wc
->recv_buf
.list
, &mad_recv_wc
->rmpp_list
);
1796 if (mad_agent_priv
->agent
.rmpp_version
) {
1797 mad_recv_wc
= ib_process_rmpp_recv_wc(mad_agent_priv
,
1800 deref_mad_agent(mad_agent_priv
);
1805 /* Complete corresponding request */
1806 if (ib_response_mad(mad_recv_wc
->recv_buf
.mad
)) {
1807 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1808 mad_send_wr
= ib_find_send_mad(mad_agent_priv
, mad_recv_wc
);
1810 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1811 ib_free_recv_mad(mad_recv_wc
);
1812 deref_mad_agent(mad_agent_priv
);
1815 ib_mark_mad_done(mad_send_wr
);
1816 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1818 /* Defined behavior is to complete response before request */
1819 mad_recv_wc
->wc
->wr_id
= (unsigned long) &mad_send_wr
->send_buf
;
1820 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1822 atomic_dec(&mad_agent_priv
->refcount
);
1824 mad_send_wc
.status
= IB_WC_SUCCESS
;
1825 mad_send_wc
.vendor_err
= 0;
1826 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
1827 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
1829 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1831 deref_mad_agent(mad_agent_priv
);
1835 static void ib_mad_recv_done_handler(struct ib_mad_port_private
*port_priv
,
1838 struct ib_mad_qp_info
*qp_info
;
1839 struct ib_mad_private_header
*mad_priv_hdr
;
1840 struct ib_mad_private
*recv
, *response
= NULL
;
1841 struct ib_mad_list_head
*mad_list
;
1842 struct ib_mad_agent_private
*mad_agent
;
1845 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
1846 qp_info
= mad_list
->mad_queue
->qp_info
;
1847 dequeue_mad(mad_list
);
1849 mad_priv_hdr
= container_of(mad_list
, struct ib_mad_private_header
,
1851 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
, header
);
1852 ib_dma_unmap_single(port_priv
->device
,
1853 recv
->header
.mapping
,
1854 sizeof(struct ib_mad_private
) -
1855 sizeof(struct ib_mad_private_header
),
1858 /* Setup MAD receive work completion from "normal" work completion */
1859 recv
->header
.wc
= *wc
;
1860 recv
->header
.recv_wc
.wc
= &recv
->header
.wc
;
1861 recv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
1862 recv
->header
.recv_wc
.recv_buf
.mad
= &recv
->mad
.mad
;
1863 recv
->header
.recv_wc
.recv_buf
.grh
= &recv
->grh
;
1865 if (atomic_read(&qp_info
->snoop_count
))
1866 snoop_recv(qp_info
, &recv
->header
.recv_wc
, IB_MAD_SNOOP_RECVS
);
1869 if (!validate_mad(&recv
->mad
.mad
, qp_info
->qp
->qp_num
))
1872 response
= kmem_cache_alloc(ib_mad_cache
, GFP_KERNEL
);
1874 printk(KERN_ERR PFX
"ib_mad_recv_done_handler no memory "
1875 "for response buffer\n");
1879 if (port_priv
->device
->node_type
== RDMA_NODE_IB_SWITCH
)
1880 port_num
= wc
->port_num
;
1882 port_num
= port_priv
->port_num
;
1884 if (recv
->mad
.mad
.mad_hdr
.mgmt_class
==
1885 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1886 enum smi_forward_action retsmi
;
1888 if (smi_handle_dr_smp_recv(&recv
->mad
.smp
,
1889 port_priv
->device
->node_type
,
1891 port_priv
->device
->phys_port_cnt
) ==
1895 retsmi
= smi_check_forward_dr_smp(&recv
->mad
.smp
);
1896 if (retsmi
== IB_SMI_LOCAL
)
1899 if (retsmi
== IB_SMI_SEND
) { /* don't forward */
1900 if (smi_handle_dr_smp_send(&recv
->mad
.smp
,
1901 port_priv
->device
->node_type
,
1902 port_num
) == IB_SMI_DISCARD
)
1905 if (smi_check_local_smp(&recv
->mad
.smp
, port_priv
->device
) == IB_SMI_DISCARD
)
1907 } else if (port_priv
->device
->node_type
== RDMA_NODE_IB_SWITCH
) {
1908 /* forward case for switches */
1909 memcpy(response
, recv
, sizeof(*response
));
1910 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
1911 response
->header
.recv_wc
.recv_buf
.mad
= &response
->mad
.mad
;
1912 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
1914 agent_send_response(&response
->mad
.mad
,
1917 smi_get_fwd_port(&recv
->mad
.smp
),
1918 qp_info
->qp
->qp_num
);
1925 /* Give driver "right of first refusal" on incoming MAD */
1926 if (port_priv
->device
->process_mad
) {
1929 ret
= port_priv
->device
->process_mad(port_priv
->device
, 0,
1930 port_priv
->port_num
,
1933 &response
->mad
.mad
);
1934 if (ret
& IB_MAD_RESULT_SUCCESS
) {
1935 if (ret
& IB_MAD_RESULT_CONSUMED
)
1937 if (ret
& IB_MAD_RESULT_REPLY
) {
1938 agent_send_response(&response
->mad
.mad
,
1942 qp_info
->qp
->qp_num
);
1948 mad_agent
= find_mad_agent(port_priv
, &recv
->mad
.mad
);
1950 ib_mad_complete_recv(mad_agent
, &recv
->header
.recv_wc
);
1952 * recv is freed up in error cases in ib_mad_complete_recv
1953 * or via recv_handler in ib_mad_complete_recv()
1959 /* Post another receive request for this QP */
1961 ib_mad_post_receive_mads(qp_info
, response
);
1963 kmem_cache_free(ib_mad_cache
, recv
);
1965 ib_mad_post_receive_mads(qp_info
, recv
);
1968 static void adjust_timeout(struct ib_mad_agent_private
*mad_agent_priv
)
1970 struct ib_mad_send_wr_private
*mad_send_wr
;
1971 unsigned long delay
;
1973 if (list_empty(&mad_agent_priv
->wait_list
)) {
1974 __cancel_delayed_work(&mad_agent_priv
->timed_work
);
1976 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
1977 struct ib_mad_send_wr_private
,
1980 if (time_after(mad_agent_priv
->timeout
,
1981 mad_send_wr
->timeout
)) {
1982 mad_agent_priv
->timeout
= mad_send_wr
->timeout
;
1983 __cancel_delayed_work(&mad_agent_priv
->timed_work
);
1984 delay
= mad_send_wr
->timeout
- jiffies
;
1985 if ((long)delay
<= 0)
1987 queue_delayed_work(mad_agent_priv
->qp_info
->
1989 &mad_agent_priv
->timed_work
, delay
);
1994 static void wait_for_response(struct ib_mad_send_wr_private
*mad_send_wr
)
1996 struct ib_mad_agent_private
*mad_agent_priv
;
1997 struct ib_mad_send_wr_private
*temp_mad_send_wr
;
1998 struct list_head
*list_item
;
1999 unsigned long delay
;
2001 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2002 list_del(&mad_send_wr
->agent_list
);
2004 delay
= mad_send_wr
->timeout
;
2005 mad_send_wr
->timeout
+= jiffies
;
2008 list_for_each_prev(list_item
, &mad_agent_priv
->wait_list
) {
2009 temp_mad_send_wr
= list_entry(list_item
,
2010 struct ib_mad_send_wr_private
,
2012 if (time_after(mad_send_wr
->timeout
,
2013 temp_mad_send_wr
->timeout
))
2018 list_item
= &mad_agent_priv
->wait_list
;
2019 list_add(&mad_send_wr
->agent_list
, list_item
);
2021 /* Reschedule a work item if we have a shorter timeout */
2022 if (mad_agent_priv
->wait_list
.next
== &mad_send_wr
->agent_list
) {
2023 __cancel_delayed_work(&mad_agent_priv
->timed_work
);
2024 queue_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2025 &mad_agent_priv
->timed_work
, delay
);
2029 void ib_reset_mad_timeout(struct ib_mad_send_wr_private
*mad_send_wr
,
2032 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2033 wait_for_response(mad_send_wr
);
2037 * Process a send work completion
2039 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private
*mad_send_wr
,
2040 struct ib_mad_send_wc
*mad_send_wc
)
2042 struct ib_mad_agent_private
*mad_agent_priv
;
2043 unsigned long flags
;
2046 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2047 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2048 if (mad_agent_priv
->agent
.rmpp_version
) {
2049 ret
= ib_process_rmpp_send_wc(mad_send_wr
, mad_send_wc
);
2050 if (ret
== IB_RMPP_RESULT_CONSUMED
)
2053 ret
= IB_RMPP_RESULT_UNHANDLED
;
2055 if (mad_send_wc
->status
!= IB_WC_SUCCESS
&&
2056 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2057 mad_send_wr
->status
= mad_send_wc
->status
;
2058 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2061 if (--mad_send_wr
->refcount
> 0) {
2062 if (mad_send_wr
->refcount
== 1 && mad_send_wr
->timeout
&&
2063 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2064 wait_for_response(mad_send_wr
);
2069 /* Remove send from MAD agent and notify client of completion */
2070 list_del(&mad_send_wr
->agent_list
);
2071 adjust_timeout(mad_agent_priv
);
2072 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2074 if (mad_send_wr
->status
!= IB_WC_SUCCESS
)
2075 mad_send_wc
->status
= mad_send_wr
->status
;
2076 if (ret
== IB_RMPP_RESULT_INTERNAL
)
2077 ib_rmpp_send_handler(mad_send_wc
);
2079 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2082 /* Release reference on agent taken when sending */
2083 deref_mad_agent(mad_agent_priv
);
2086 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2089 static void ib_mad_send_done_handler(struct ib_mad_port_private
*port_priv
,
2092 struct ib_mad_send_wr_private
*mad_send_wr
, *queued_send_wr
;
2093 struct ib_mad_list_head
*mad_list
;
2094 struct ib_mad_qp_info
*qp_info
;
2095 struct ib_mad_queue
*send_queue
;
2096 struct ib_send_wr
*bad_send_wr
;
2097 struct ib_mad_send_wc mad_send_wc
;
2098 unsigned long flags
;
2101 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2102 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2104 send_queue
= mad_list
->mad_queue
;
2105 qp_info
= send_queue
->qp_info
;
2108 ib_dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
,
2109 mad_send_wr
->header_mapping
,
2110 mad_send_wr
->sg_list
[0].length
, DMA_TO_DEVICE
);
2111 ib_dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
,
2112 mad_send_wr
->payload_mapping
,
2113 mad_send_wr
->sg_list
[1].length
, DMA_TO_DEVICE
);
2114 queued_send_wr
= NULL
;
2115 spin_lock_irqsave(&send_queue
->lock
, flags
);
2116 list_del(&mad_list
->list
);
2118 /* Move queued send to the send queue */
2119 if (send_queue
->count
-- > send_queue
->max_active
) {
2120 mad_list
= container_of(qp_info
->overflow_list
.next
,
2121 struct ib_mad_list_head
, list
);
2122 queued_send_wr
= container_of(mad_list
,
2123 struct ib_mad_send_wr_private
,
2125 list_move_tail(&mad_list
->list
, &send_queue
->list
);
2127 spin_unlock_irqrestore(&send_queue
->lock
, flags
);
2129 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2130 mad_send_wc
.status
= wc
->status
;
2131 mad_send_wc
.vendor_err
= wc
->vendor_err
;
2132 if (atomic_read(&qp_info
->snoop_count
))
2133 snoop_send(qp_info
, &mad_send_wr
->send_buf
, &mad_send_wc
,
2134 IB_MAD_SNOOP_SEND_COMPLETIONS
);
2135 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
2137 if (queued_send_wr
) {
2138 ret
= ib_post_send(qp_info
->qp
, &queued_send_wr
->send_wr
,
2141 printk(KERN_ERR PFX
"ib_post_send failed: %d\n", ret
);
2142 mad_send_wr
= queued_send_wr
;
2143 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
2149 static void mark_sends_for_retry(struct ib_mad_qp_info
*qp_info
)
2151 struct ib_mad_send_wr_private
*mad_send_wr
;
2152 struct ib_mad_list_head
*mad_list
;
2153 unsigned long flags
;
2155 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
2156 list_for_each_entry(mad_list
, &qp_info
->send_queue
.list
, list
) {
2157 mad_send_wr
= container_of(mad_list
,
2158 struct ib_mad_send_wr_private
,
2160 mad_send_wr
->retry
= 1;
2162 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
2165 static void mad_error_handler(struct ib_mad_port_private
*port_priv
,
2168 struct ib_mad_list_head
*mad_list
;
2169 struct ib_mad_qp_info
*qp_info
;
2170 struct ib_mad_send_wr_private
*mad_send_wr
;
2173 /* Determine if failure was a send or receive */
2174 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2175 qp_info
= mad_list
->mad_queue
->qp_info
;
2176 if (mad_list
->mad_queue
== &qp_info
->recv_queue
)
2178 * Receive errors indicate that the QP has entered the error
2179 * state - error handling/shutdown code will cleanup
2184 * Send errors will transition the QP to SQE - move
2185 * QP to RTS and repost flushed work requests
2187 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2189 if (wc
->status
== IB_WC_WR_FLUSH_ERR
) {
2190 if (mad_send_wr
->retry
) {
2192 struct ib_send_wr
*bad_send_wr
;
2194 mad_send_wr
->retry
= 0;
2195 ret
= ib_post_send(qp_info
->qp
, &mad_send_wr
->send_wr
,
2198 ib_mad_send_done_handler(port_priv
, wc
);
2200 ib_mad_send_done_handler(port_priv
, wc
);
2202 struct ib_qp_attr
*attr
;
2204 /* Transition QP to RTS and fail offending send */
2205 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2207 attr
->qp_state
= IB_QPS_RTS
;
2208 attr
->cur_qp_state
= IB_QPS_SQE
;
2209 ret
= ib_modify_qp(qp_info
->qp
, attr
,
2210 IB_QP_STATE
| IB_QP_CUR_STATE
);
2213 printk(KERN_ERR PFX
"mad_error_handler - "
2214 "ib_modify_qp to RTS : %d\n", ret
);
2216 mark_sends_for_retry(qp_info
);
2218 ib_mad_send_done_handler(port_priv
, wc
);
2223 * IB MAD completion callback
2225 static void ib_mad_completion_handler(struct work_struct
*work
)
2227 struct ib_mad_port_private
*port_priv
;
2230 port_priv
= container_of(work
, struct ib_mad_port_private
, work
);
2231 ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
2233 while (ib_poll_cq(port_priv
->cq
, 1, &wc
) == 1) {
2234 if (wc
.status
== IB_WC_SUCCESS
) {
2235 switch (wc
.opcode
) {
2237 ib_mad_send_done_handler(port_priv
, &wc
);
2240 ib_mad_recv_done_handler(port_priv
, &wc
);
2247 mad_error_handler(port_priv
, &wc
);
2251 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
)
2253 unsigned long flags
;
2254 struct ib_mad_send_wr_private
*mad_send_wr
, *temp_mad_send_wr
;
2255 struct ib_mad_send_wc mad_send_wc
;
2256 struct list_head cancel_list
;
2258 INIT_LIST_HEAD(&cancel_list
);
2260 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2261 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2262 &mad_agent_priv
->send_list
, agent_list
) {
2263 if (mad_send_wr
->status
== IB_WC_SUCCESS
) {
2264 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2265 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2269 /* Empty wait list to prevent receives from finding a request */
2270 list_splice_init(&mad_agent_priv
->wait_list
, &cancel_list
);
2271 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2273 /* Report all cancelled requests */
2274 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
2275 mad_send_wc
.vendor_err
= 0;
2277 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2278 &cancel_list
, agent_list
) {
2279 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2280 list_del(&mad_send_wr
->agent_list
);
2281 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2283 atomic_dec(&mad_agent_priv
->refcount
);
2287 static struct ib_mad_send_wr_private
*
2288 find_send_wr(struct ib_mad_agent_private
*mad_agent_priv
,
2289 struct ib_mad_send_buf
*send_buf
)
2291 struct ib_mad_send_wr_private
*mad_send_wr
;
2293 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
2295 if (&mad_send_wr
->send_buf
== send_buf
)
2299 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
2301 if (is_data_mad(mad_agent_priv
, mad_send_wr
->send_buf
.mad
) &&
2302 &mad_send_wr
->send_buf
== send_buf
)
2308 int ib_modify_mad(struct ib_mad_agent
*mad_agent
,
2309 struct ib_mad_send_buf
*send_buf
, u32 timeout_ms
)
2311 struct ib_mad_agent_private
*mad_agent_priv
;
2312 struct ib_mad_send_wr_private
*mad_send_wr
;
2313 unsigned long flags
;
2316 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
2318 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2319 mad_send_wr
= find_send_wr(mad_agent_priv
, send_buf
);
2320 if (!mad_send_wr
|| mad_send_wr
->status
!= IB_WC_SUCCESS
) {
2321 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2325 active
= (!mad_send_wr
->timeout
|| mad_send_wr
->refcount
> 1);
2327 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2328 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2331 mad_send_wr
->send_buf
.timeout_ms
= timeout_ms
;
2333 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2335 ib_reset_mad_timeout(mad_send_wr
, timeout_ms
);
2337 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2340 EXPORT_SYMBOL(ib_modify_mad
);
2342 void ib_cancel_mad(struct ib_mad_agent
*mad_agent
,
2343 struct ib_mad_send_buf
*send_buf
)
2345 ib_modify_mad(mad_agent
, send_buf
, 0);
2347 EXPORT_SYMBOL(ib_cancel_mad
);
2349 static void local_completions(struct work_struct
*work
)
2351 struct ib_mad_agent_private
*mad_agent_priv
;
2352 struct ib_mad_local_private
*local
;
2353 struct ib_mad_agent_private
*recv_mad_agent
;
2354 unsigned long flags
;
2357 struct ib_mad_send_wc mad_send_wc
;
2360 container_of(work
, struct ib_mad_agent_private
, local_work
);
2362 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2363 while (!list_empty(&mad_agent_priv
->local_list
)) {
2364 local
= list_entry(mad_agent_priv
->local_list
.next
,
2365 struct ib_mad_local_private
,
2367 list_del(&local
->completion_list
);
2368 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2370 if (local
->mad_priv
) {
2371 recv_mad_agent
= local
->recv_mad_agent
;
2372 if (!recv_mad_agent
) {
2373 printk(KERN_ERR PFX
"No receive MAD agent for local completion\n");
2375 goto local_send_completion
;
2379 * Defined behavior is to complete response
2382 build_smp_wc(recv_mad_agent
->agent
.qp
,
2383 (unsigned long) local
->mad_send_wr
,
2384 be16_to_cpu(IB_LID_PERMISSIVE
),
2385 0, recv_mad_agent
->agent
.port_num
, &wc
);
2387 local
->mad_priv
->header
.recv_wc
.wc
= &wc
;
2388 local
->mad_priv
->header
.recv_wc
.mad_len
=
2389 sizeof(struct ib_mad
);
2390 INIT_LIST_HEAD(&local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2391 list_add(&local
->mad_priv
->header
.recv_wc
.recv_buf
.list
,
2392 &local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2393 local
->mad_priv
->header
.recv_wc
.recv_buf
.grh
= NULL
;
2394 local
->mad_priv
->header
.recv_wc
.recv_buf
.mad
=
2395 &local
->mad_priv
->mad
.mad
;
2396 if (atomic_read(&recv_mad_agent
->qp_info
->snoop_count
))
2397 snoop_recv(recv_mad_agent
->qp_info
,
2398 &local
->mad_priv
->header
.recv_wc
,
2399 IB_MAD_SNOOP_RECVS
);
2400 recv_mad_agent
->agent
.recv_handler(
2401 &recv_mad_agent
->agent
,
2402 &local
->mad_priv
->header
.recv_wc
);
2403 spin_lock_irqsave(&recv_mad_agent
->lock
, flags
);
2404 atomic_dec(&recv_mad_agent
->refcount
);
2405 spin_unlock_irqrestore(&recv_mad_agent
->lock
, flags
);
2408 local_send_completion
:
2410 mad_send_wc
.status
= IB_WC_SUCCESS
;
2411 mad_send_wc
.vendor_err
= 0;
2412 mad_send_wc
.send_buf
= &local
->mad_send_wr
->send_buf
;
2413 if (atomic_read(&mad_agent_priv
->qp_info
->snoop_count
))
2414 snoop_send(mad_agent_priv
->qp_info
,
2415 &local
->mad_send_wr
->send_buf
,
2416 &mad_send_wc
, IB_MAD_SNOOP_SEND_COMPLETIONS
);
2417 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2420 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2421 atomic_dec(&mad_agent_priv
->refcount
);
2423 kmem_cache_free(ib_mad_cache
, local
->mad_priv
);
2426 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2429 static int retry_send(struct ib_mad_send_wr_private
*mad_send_wr
)
2433 if (!mad_send_wr
->retries_left
)
2436 mad_send_wr
->retries_left
--;
2437 mad_send_wr
->send_buf
.retries
++;
2439 mad_send_wr
->timeout
= msecs_to_jiffies(mad_send_wr
->send_buf
.timeout_ms
);
2441 if (mad_send_wr
->mad_agent_priv
->agent
.rmpp_version
) {
2442 ret
= ib_retry_rmpp(mad_send_wr
);
2444 case IB_RMPP_RESULT_UNHANDLED
:
2445 ret
= ib_send_mad(mad_send_wr
);
2447 case IB_RMPP_RESULT_CONSUMED
:
2455 ret
= ib_send_mad(mad_send_wr
);
2458 mad_send_wr
->refcount
++;
2459 list_add_tail(&mad_send_wr
->agent_list
,
2460 &mad_send_wr
->mad_agent_priv
->send_list
);
2465 static void timeout_sends(struct work_struct
*work
)
2467 struct ib_mad_agent_private
*mad_agent_priv
;
2468 struct ib_mad_send_wr_private
*mad_send_wr
;
2469 struct ib_mad_send_wc mad_send_wc
;
2470 unsigned long flags
, delay
;
2472 mad_agent_priv
= container_of(work
, struct ib_mad_agent_private
,
2474 mad_send_wc
.vendor_err
= 0;
2476 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2477 while (!list_empty(&mad_agent_priv
->wait_list
)) {
2478 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2479 struct ib_mad_send_wr_private
,
2482 if (time_after(mad_send_wr
->timeout
, jiffies
)) {
2483 delay
= mad_send_wr
->timeout
- jiffies
;
2484 if ((long)delay
<= 0)
2486 queue_delayed_work(mad_agent_priv
->qp_info
->
2488 &mad_agent_priv
->timed_work
, delay
);
2492 list_del(&mad_send_wr
->agent_list
);
2493 if (mad_send_wr
->status
== IB_WC_SUCCESS
&&
2494 !retry_send(mad_send_wr
))
2497 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2499 if (mad_send_wr
->status
== IB_WC_SUCCESS
)
2500 mad_send_wc
.status
= IB_WC_RESP_TIMEOUT_ERR
;
2502 mad_send_wc
.status
= mad_send_wr
->status
;
2503 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2504 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2507 atomic_dec(&mad_agent_priv
->refcount
);
2508 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2510 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2513 static void ib_mad_thread_completion_handler(struct ib_cq
*cq
, void *arg
)
2515 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2516 unsigned long flags
;
2518 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2519 if (!list_empty(&port_priv
->port_list
))
2520 queue_work(port_priv
->wq
, &port_priv
->work
);
2521 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2525 * Allocate receive MADs and post receive WRs for them
2527 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
2528 struct ib_mad_private
*mad
)
2530 unsigned long flags
;
2532 struct ib_mad_private
*mad_priv
;
2533 struct ib_sge sg_list
;
2534 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
2535 struct ib_mad_queue
*recv_queue
= &qp_info
->recv_queue
;
2537 /* Initialize common scatter list fields */
2538 sg_list
.length
= sizeof *mad_priv
- sizeof mad_priv
->header
;
2539 sg_list
.lkey
= (*qp_info
->port_priv
->mr
).lkey
;
2541 /* Initialize common receive WR fields */
2542 recv_wr
.next
= NULL
;
2543 recv_wr
.sg_list
= &sg_list
;
2544 recv_wr
.num_sge
= 1;
2547 /* Allocate and map receive buffer */
2552 mad_priv
= kmem_cache_alloc(ib_mad_cache
, GFP_KERNEL
);
2554 printk(KERN_ERR PFX
"No memory for receive buffer\n");
2559 sg_list
.addr
= ib_dma_map_single(qp_info
->port_priv
->device
,
2562 sizeof mad_priv
->header
,
2564 mad_priv
->header
.mapping
= sg_list
.addr
;
2565 recv_wr
.wr_id
= (unsigned long)&mad_priv
->header
.mad_list
;
2566 mad_priv
->header
.mad_list
.mad_queue
= recv_queue
;
2568 /* Post receive WR */
2569 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2570 post
= (++recv_queue
->count
< recv_queue
->max_active
);
2571 list_add_tail(&mad_priv
->header
.mad_list
.list
, &recv_queue
->list
);
2572 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2573 ret
= ib_post_recv(qp_info
->qp
, &recv_wr
, &bad_recv_wr
);
2575 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2576 list_del(&mad_priv
->header
.mad_list
.list
);
2577 recv_queue
->count
--;
2578 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2579 ib_dma_unmap_single(qp_info
->port_priv
->device
,
2580 mad_priv
->header
.mapping
,
2582 sizeof mad_priv
->header
,
2584 kmem_cache_free(ib_mad_cache
, mad_priv
);
2585 printk(KERN_ERR PFX
"ib_post_recv failed: %d\n", ret
);
2594 * Return all the posted receive MADs
2596 static void cleanup_recv_queue(struct ib_mad_qp_info
*qp_info
)
2598 struct ib_mad_private_header
*mad_priv_hdr
;
2599 struct ib_mad_private
*recv
;
2600 struct ib_mad_list_head
*mad_list
;
2602 while (!list_empty(&qp_info
->recv_queue
.list
)) {
2604 mad_list
= list_entry(qp_info
->recv_queue
.list
.next
,
2605 struct ib_mad_list_head
, list
);
2606 mad_priv_hdr
= container_of(mad_list
,
2607 struct ib_mad_private_header
,
2609 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
2612 /* Remove from posted receive MAD list */
2613 list_del(&mad_list
->list
);
2615 ib_dma_unmap_single(qp_info
->port_priv
->device
,
2616 recv
->header
.mapping
,
2617 sizeof(struct ib_mad_private
) -
2618 sizeof(struct ib_mad_private_header
),
2620 kmem_cache_free(ib_mad_cache
, recv
);
2623 qp_info
->recv_queue
.count
= 0;
2629 static int ib_mad_port_start(struct ib_mad_port_private
*port_priv
)
2632 struct ib_qp_attr
*attr
;
2635 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2637 printk(KERN_ERR PFX
"Couldn't kmalloc ib_qp_attr\n");
2641 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2642 qp
= port_priv
->qp_info
[i
].qp
;
2644 * PKey index for QP1 is irrelevant but
2645 * one is needed for the Reset to Init transition
2647 attr
->qp_state
= IB_QPS_INIT
;
2648 attr
->pkey_index
= 0;
2649 attr
->qkey
= (qp
->qp_num
== 0) ? 0 : IB_QP1_QKEY
;
2650 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
|
2651 IB_QP_PKEY_INDEX
| IB_QP_QKEY
);
2653 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2654 "INIT: %d\n", i
, ret
);
2658 attr
->qp_state
= IB_QPS_RTR
;
2659 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
);
2661 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2662 "RTR: %d\n", i
, ret
);
2666 attr
->qp_state
= IB_QPS_RTS
;
2667 attr
->sq_psn
= IB_MAD_SEND_Q_PSN
;
2668 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
2670 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2671 "RTS: %d\n", i
, ret
);
2676 ret
= ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
2678 printk(KERN_ERR PFX
"Failed to request completion "
2679 "notification: %d\n", ret
);
2683 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2684 ret
= ib_mad_post_receive_mads(&port_priv
->qp_info
[i
], NULL
);
2686 printk(KERN_ERR PFX
"Couldn't post receive WRs\n");
2695 static void qp_event_handler(struct ib_event
*event
, void *qp_context
)
2697 struct ib_mad_qp_info
*qp_info
= qp_context
;
2699 /* It's worse than that! He's dead, Jim! */
2700 printk(KERN_ERR PFX
"Fatal error (%d) on MAD QP (%d)\n",
2701 event
->event
, qp_info
->qp
->qp_num
);
2704 static void init_mad_queue(struct ib_mad_qp_info
*qp_info
,
2705 struct ib_mad_queue
*mad_queue
)
2707 mad_queue
->qp_info
= qp_info
;
2708 mad_queue
->count
= 0;
2709 spin_lock_init(&mad_queue
->lock
);
2710 INIT_LIST_HEAD(&mad_queue
->list
);
2713 static void init_mad_qp(struct ib_mad_port_private
*port_priv
,
2714 struct ib_mad_qp_info
*qp_info
)
2716 qp_info
->port_priv
= port_priv
;
2717 init_mad_queue(qp_info
, &qp_info
->send_queue
);
2718 init_mad_queue(qp_info
, &qp_info
->recv_queue
);
2719 INIT_LIST_HEAD(&qp_info
->overflow_list
);
2720 spin_lock_init(&qp_info
->snoop_lock
);
2721 qp_info
->snoop_table
= NULL
;
2722 qp_info
->snoop_table_size
= 0;
2723 atomic_set(&qp_info
->snoop_count
, 0);
2726 static int create_mad_qp(struct ib_mad_qp_info
*qp_info
,
2727 enum ib_qp_type qp_type
)
2729 struct ib_qp_init_attr qp_init_attr
;
2732 memset(&qp_init_attr
, 0, sizeof qp_init_attr
);
2733 qp_init_attr
.send_cq
= qp_info
->port_priv
->cq
;
2734 qp_init_attr
.recv_cq
= qp_info
->port_priv
->cq
;
2735 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
2736 qp_init_attr
.cap
.max_send_wr
= mad_sendq_size
;
2737 qp_init_attr
.cap
.max_recv_wr
= mad_recvq_size
;
2738 qp_init_attr
.cap
.max_send_sge
= IB_MAD_SEND_REQ_MAX_SG
;
2739 qp_init_attr
.cap
.max_recv_sge
= IB_MAD_RECV_REQ_MAX_SG
;
2740 qp_init_attr
.qp_type
= qp_type
;
2741 qp_init_attr
.port_num
= qp_info
->port_priv
->port_num
;
2742 qp_init_attr
.qp_context
= qp_info
;
2743 qp_init_attr
.event_handler
= qp_event_handler
;
2744 qp_info
->qp
= ib_create_qp(qp_info
->port_priv
->pd
, &qp_init_attr
);
2745 if (IS_ERR(qp_info
->qp
)) {
2746 printk(KERN_ERR PFX
"Couldn't create ib_mad QP%d\n",
2747 get_spl_qp_index(qp_type
));
2748 ret
= PTR_ERR(qp_info
->qp
);
2751 /* Use minimum queue sizes unless the CQ is resized */
2752 qp_info
->send_queue
.max_active
= mad_sendq_size
;
2753 qp_info
->recv_queue
.max_active
= mad_recvq_size
;
2760 static void destroy_mad_qp(struct ib_mad_qp_info
*qp_info
)
2762 ib_destroy_qp(qp_info
->qp
);
2763 kfree(qp_info
->snoop_table
);
2768 * Create the QP, PD, MR, and CQ if needed
2770 static int ib_mad_port_open(struct ib_device
*device
,
2774 struct ib_mad_port_private
*port_priv
;
2775 unsigned long flags
;
2776 char name
[sizeof "ib_mad123"];
2778 /* Create new device info */
2779 port_priv
= kzalloc(sizeof *port_priv
, GFP_KERNEL
);
2781 printk(KERN_ERR PFX
"No memory for ib_mad_port_private\n");
2785 port_priv
->device
= device
;
2786 port_priv
->port_num
= port_num
;
2787 spin_lock_init(&port_priv
->reg_lock
);
2788 INIT_LIST_HEAD(&port_priv
->agent_list
);
2789 init_mad_qp(port_priv
, &port_priv
->qp_info
[0]);
2790 init_mad_qp(port_priv
, &port_priv
->qp_info
[1]);
2792 cq_size
= (mad_sendq_size
+ mad_recvq_size
) * 2;
2793 port_priv
->cq
= ib_create_cq(port_priv
->device
,
2794 ib_mad_thread_completion_handler
,
2795 NULL
, port_priv
, cq_size
, 0);
2796 if (IS_ERR(port_priv
->cq
)) {
2797 printk(KERN_ERR PFX
"Couldn't create ib_mad CQ\n");
2798 ret
= PTR_ERR(port_priv
->cq
);
2802 port_priv
->pd
= ib_alloc_pd(device
);
2803 if (IS_ERR(port_priv
->pd
)) {
2804 printk(KERN_ERR PFX
"Couldn't create ib_mad PD\n");
2805 ret
= PTR_ERR(port_priv
->pd
);
2809 port_priv
->mr
= ib_get_dma_mr(port_priv
->pd
, IB_ACCESS_LOCAL_WRITE
);
2810 if (IS_ERR(port_priv
->mr
)) {
2811 printk(KERN_ERR PFX
"Couldn't get ib_mad DMA MR\n");
2812 ret
= PTR_ERR(port_priv
->mr
);
2816 ret
= create_mad_qp(&port_priv
->qp_info
[0], IB_QPT_SMI
);
2819 ret
= create_mad_qp(&port_priv
->qp_info
[1], IB_QPT_GSI
);
2823 snprintf(name
, sizeof name
, "ib_mad%d", port_num
);
2824 port_priv
->wq
= create_singlethread_workqueue(name
);
2825 if (!port_priv
->wq
) {
2829 INIT_WORK(&port_priv
->work
, ib_mad_completion_handler
);
2831 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2832 list_add_tail(&port_priv
->port_list
, &ib_mad_port_list
);
2833 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2835 ret
= ib_mad_port_start(port_priv
);
2837 printk(KERN_ERR PFX
"Couldn't start port\n");
2844 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2845 list_del_init(&port_priv
->port_list
);
2846 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2848 destroy_workqueue(port_priv
->wq
);
2850 destroy_mad_qp(&port_priv
->qp_info
[1]);
2852 destroy_mad_qp(&port_priv
->qp_info
[0]);
2854 ib_dereg_mr(port_priv
->mr
);
2856 ib_dealloc_pd(port_priv
->pd
);
2858 ib_destroy_cq(port_priv
->cq
);
2859 cleanup_recv_queue(&port_priv
->qp_info
[1]);
2860 cleanup_recv_queue(&port_priv
->qp_info
[0]);
2869 * If there are no classes using the port, free the port
2870 * resources (CQ, MR, PD, QP) and remove the port's info structure
2872 static int ib_mad_port_close(struct ib_device
*device
, int port_num
)
2874 struct ib_mad_port_private
*port_priv
;
2875 unsigned long flags
;
2877 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2878 port_priv
= __ib_get_mad_port(device
, port_num
);
2879 if (port_priv
== NULL
) {
2880 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2881 printk(KERN_ERR PFX
"Port %d not found\n", port_num
);
2884 list_del_init(&port_priv
->port_list
);
2885 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2887 destroy_workqueue(port_priv
->wq
);
2888 destroy_mad_qp(&port_priv
->qp_info
[1]);
2889 destroy_mad_qp(&port_priv
->qp_info
[0]);
2890 ib_dereg_mr(port_priv
->mr
);
2891 ib_dealloc_pd(port_priv
->pd
);
2892 ib_destroy_cq(port_priv
->cq
);
2893 cleanup_recv_queue(&port_priv
->qp_info
[1]);
2894 cleanup_recv_queue(&port_priv
->qp_info
[0]);
2895 /* XXX: Handle deallocation of MAD registration tables */
2902 static void ib_mad_init_device(struct ib_device
*device
)
2906 if (rdma_node_get_transport(device
->node_type
) != RDMA_TRANSPORT_IB
)
2909 if (device
->node_type
== RDMA_NODE_IB_SWITCH
) {
2914 end
= device
->phys_port_cnt
;
2917 for (i
= start
; i
<= end
; i
++) {
2918 if (ib_mad_port_open(device
, i
)) {
2919 printk(KERN_ERR PFX
"Couldn't open %s port %d\n",
2923 if (ib_agent_port_open(device
, i
)) {
2924 printk(KERN_ERR PFX
"Couldn't open %s port %d "
2933 if (ib_mad_port_close(device
, i
))
2934 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2940 while (i
>= start
) {
2941 if (ib_agent_port_close(device
, i
))
2942 printk(KERN_ERR PFX
"Couldn't close %s port %d "
2945 if (ib_mad_port_close(device
, i
))
2946 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2952 static void ib_mad_remove_device(struct ib_device
*device
)
2954 int i
, num_ports
, cur_port
;
2956 if (rdma_node_get_transport(device
->node_type
) != RDMA_TRANSPORT_IB
)
2959 if (device
->node_type
== RDMA_NODE_IB_SWITCH
) {
2963 num_ports
= device
->phys_port_cnt
;
2966 for (i
= 0; i
< num_ports
; i
++, cur_port
++) {
2967 if (ib_agent_port_close(device
, cur_port
))
2968 printk(KERN_ERR PFX
"Couldn't close %s port %d "
2970 device
->name
, cur_port
);
2971 if (ib_mad_port_close(device
, cur_port
))
2972 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2973 device
->name
, cur_port
);
2977 static struct ib_client mad_client
= {
2979 .add
= ib_mad_init_device
,
2980 .remove
= ib_mad_remove_device
2983 static int __init
ib_mad_init_module(void)
2987 mad_recvq_size
= min(mad_recvq_size
, IB_MAD_QP_MAX_SIZE
);
2988 mad_recvq_size
= max(mad_recvq_size
, IB_MAD_QP_MIN_SIZE
);
2990 mad_sendq_size
= min(mad_sendq_size
, IB_MAD_QP_MAX_SIZE
);
2991 mad_sendq_size
= max(mad_sendq_size
, IB_MAD_QP_MIN_SIZE
);
2993 ib_mad_cache
= kmem_cache_create("ib_mad",
2994 sizeof(struct ib_mad_private
),
2998 if (!ib_mad_cache
) {
2999 printk(KERN_ERR PFX
"Couldn't create ib_mad cache\n");
3004 INIT_LIST_HEAD(&ib_mad_port_list
);
3006 if (ib_register_client(&mad_client
)) {
3007 printk(KERN_ERR PFX
"Couldn't register ib_mad client\n");
3015 kmem_cache_destroy(ib_mad_cache
);
3020 static void __exit
ib_mad_cleanup_module(void)
3022 ib_unregister_client(&mad_client
);
3023 kmem_cache_destroy(ib_mad_cache
);
3026 module_init(ib_mad_init_module
);
3027 module_exit(ib_mad_cleanup_module
);