]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/infiniband/core/mad.c
IB/core: Add OPA MAD core capability flag
[mirror_ubuntu-zesty-kernel.git] / drivers / infiniband / core / mad.c
CommitLineData
1da177e4 1/*
de493d47 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
fa619a77
HR
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
b76aabc3 5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
1da177e4
LT
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
1da177e4 35 */
7ef5d4b0
IW
36
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
1da177e4 39#include <linux/dma-mapping.h>
5a0e3ad6 40#include <linux/slab.h>
e4dd23d7 41#include <linux/module.h>
9874e746 42#include <rdma/ib_cache.h>
1da177e4
LT
43
44#include "mad_priv.h"
fa619a77 45#include "mad_rmpp.h"
1da177e4
LT
46#include "smi.h"
47#include "agent.h"
48
49MODULE_LICENSE("Dual BSD/GPL");
50MODULE_DESCRIPTION("kernel IB MAD API");
51MODULE_AUTHOR("Hal Rosenstock");
52MODULE_AUTHOR("Sean Hefty");
53
16933955
RD
54static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
55static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
b76aabc3
HR
56
57module_param_named(send_queue_size, mad_sendq_size, int, 0444);
58MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
59module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
60MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
61
1da177e4
LT
62static struct list_head ib_mad_port_list;
63static u32 ib_mad_client_id = 0;
64
65/* Port list lock */
6276e08a 66static DEFINE_SPINLOCK(ib_mad_port_list_lock);
1da177e4
LT
67
68/* Forward declarations */
69static int method_in_use(struct ib_mad_mgmt_method_table **method,
70 struct ib_mad_reg_req *mad_reg_req);
71static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
72static struct ib_mad_agent_private *find_mad_agent(
73 struct ib_mad_port_private *port_priv,
d94bd266 74 const struct ib_mad_hdr *mad);
1da177e4
LT
75static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
76 struct ib_mad_private *mad);
77static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
c4028958
DH
78static void timeout_sends(struct work_struct *work);
79static void local_completions(struct work_struct *work);
1da177e4
LT
80static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
81 struct ib_mad_agent_private *agent_priv,
82 u8 mgmt_class);
83static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
84 struct ib_mad_agent_private *agent_priv);
85
86/*
87 * Returns a ib_mad_port_private structure or NULL for a device/port
88 * Assumes ib_mad_port_list_lock is being held
89 */
90static inline struct ib_mad_port_private *
91__ib_get_mad_port(struct ib_device *device, int port_num)
92{
93 struct ib_mad_port_private *entry;
94
95 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
96 if (entry->device == device && entry->port_num == port_num)
97 return entry;
98 }
99 return NULL;
100}
101
102/*
103 * Wrapper function to return a ib_mad_port_private structure or NULL
104 * for a device/port
105 */
106static inline struct ib_mad_port_private *
107ib_get_mad_port(struct ib_device *device, int port_num)
108{
109 struct ib_mad_port_private *entry;
110 unsigned long flags;
111
112 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
113 entry = __ib_get_mad_port(device, port_num);
114 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
115
116 return entry;
117}
118
119static inline u8 convert_mgmt_class(u8 mgmt_class)
120{
121 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
122 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
123 0 : mgmt_class;
124}
125
126static int get_spl_qp_index(enum ib_qp_type qp_type)
127{
128 switch (qp_type)
129 {
130 case IB_QPT_SMI:
131 return 0;
132 case IB_QPT_GSI:
133 return 1;
134 default:
135 return -1;
136 }
137}
138
139static int vendor_class_index(u8 mgmt_class)
140{
141 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
142}
143
144static int is_vendor_class(u8 mgmt_class)
145{
146 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
147 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
148 return 0;
149 return 1;
150}
151
152static int is_vendor_oui(char *oui)
153{
154 if (oui[0] || oui[1] || oui[2])
155 return 1;
156 return 0;
157}
158
159static int is_vendor_method_in_use(
160 struct ib_mad_mgmt_vendor_class *vendor_class,
161 struct ib_mad_reg_req *mad_reg_req)
162{
163 struct ib_mad_mgmt_method_table *method;
164 int i;
165
166 for (i = 0; i < MAX_MGMT_OUI; i++) {
167 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
168 method = vendor_class->method_table[i];
169 if (method) {
170 if (method_in_use(&method, mad_reg_req))
171 return 1;
172 else
173 break;
174 }
175 }
176 }
177 return 0;
178}
179
96909308 180int ib_response_mad(const struct ib_mad_hdr *hdr)
2527e681 181{
96909308
IW
182 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
183 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
184 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
185 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
2527e681
SH
186}
187EXPORT_SYMBOL(ib_response_mad);
188
1da177e4
LT
189/*
190 * ib_register_mad_agent - Register to send/receive MADs
191 */
192struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
193 u8 port_num,
194 enum ib_qp_type qp_type,
195 struct ib_mad_reg_req *mad_reg_req,
196 u8 rmpp_version,
197 ib_mad_send_handler send_handler,
198 ib_mad_recv_handler recv_handler,
0f29b46d
IW
199 void *context,
200 u32 registration_flags)
1da177e4
LT
201{
202 struct ib_mad_port_private *port_priv;
203 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
204 struct ib_mad_agent_private *mad_agent_priv;
205 struct ib_mad_reg_req *reg_req = NULL;
206 struct ib_mad_mgmt_class_table *class;
207 struct ib_mad_mgmt_vendor_class_table *vendor;
208 struct ib_mad_mgmt_vendor_class *vendor_class;
209 struct ib_mad_mgmt_method_table *method;
210 int ret2, qpn;
211 unsigned long flags;
212 u8 mgmt_class, vclass;
213
214 /* Validate parameters */
215 qpn = get_spl_qp_index(qp_type);
9ad13a42
IW
216 if (qpn == -1) {
217 dev_notice(&device->dev,
218 "ib_register_mad_agent: invalid QP Type %d\n",
219 qp_type);
1da177e4 220 goto error1;
9ad13a42 221 }
1da177e4 222
9ad13a42
IW
223 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
224 dev_notice(&device->dev,
225 "ib_register_mad_agent: invalid RMPP Version %u\n",
226 rmpp_version);
fa619a77 227 goto error1;
9ad13a42 228 }
1da177e4
LT
229
230 /* Validate MAD registration request if supplied */
231 if (mad_reg_req) {
9ad13a42
IW
232 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
233 dev_notice(&device->dev,
234 "ib_register_mad_agent: invalid Class Version %u\n",
235 mad_reg_req->mgmt_class_version);
1da177e4 236 goto error1;
9ad13a42
IW
237 }
238 if (!recv_handler) {
239 dev_notice(&device->dev,
240 "ib_register_mad_agent: no recv_handler\n");
1da177e4 241 goto error1;
9ad13a42 242 }
1da177e4
LT
243 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
244 /*
245 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
246 * one in this range currently allowed
247 */
248 if (mad_reg_req->mgmt_class !=
9ad13a42
IW
249 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
250 dev_notice(&device->dev,
251 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
252 mad_reg_req->mgmt_class);
1da177e4 253 goto error1;
9ad13a42 254 }
1da177e4
LT
255 } else if (mad_reg_req->mgmt_class == 0) {
256 /*
257 * Class 0 is reserved in IBA and is used for
258 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
259 */
9ad13a42
IW
260 dev_notice(&device->dev,
261 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
1da177e4
LT
262 goto error1;
263 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
264 /*
265 * If class is in "new" vendor range,
266 * ensure supplied OUI is not zero
267 */
9ad13a42
IW
268 if (!is_vendor_oui(mad_reg_req->oui)) {
269 dev_notice(&device->dev,
270 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
271 mad_reg_req->mgmt_class);
1da177e4 272 goto error1;
9ad13a42 273 }
1da177e4 274 }
618a3c03 275 /* Make sure class supplied is consistent with RMPP */
64cb9c6a 276 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
9ad13a42
IW
277 if (rmpp_version) {
278 dev_notice(&device->dev,
279 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
280 mad_reg_req->mgmt_class);
618a3c03 281 goto error1;
9ad13a42 282 }
618a3c03 283 }
1471cb6c 284
1da177e4
LT
285 /* Make sure class supplied is consistent with QP type */
286 if (qp_type == IB_QPT_SMI) {
287 if ((mad_reg_req->mgmt_class !=
288 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
289 (mad_reg_req->mgmt_class !=
9ad13a42
IW
290 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
291 dev_notice(&device->dev,
292 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
293 mad_reg_req->mgmt_class);
1da177e4 294 goto error1;
9ad13a42 295 }
1da177e4
LT
296 } else {
297 if ((mad_reg_req->mgmt_class ==
298 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
299 (mad_reg_req->mgmt_class ==
9ad13a42
IW
300 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
301 dev_notice(&device->dev,
302 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
303 mad_reg_req->mgmt_class);
1da177e4 304 goto error1;
9ad13a42 305 }
1da177e4
LT
306 }
307 } else {
308 /* No registration request supplied */
309 if (!send_handler)
310 goto error1;
1471cb6c
IW
311 if (registration_flags & IB_MAD_USER_RMPP)
312 goto error1;
1da177e4
LT
313 }
314
315 /* Validate device and port */
316 port_priv = ib_get_mad_port(device, port_num);
317 if (!port_priv) {
9ad13a42 318 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
1da177e4
LT
319 ret = ERR_PTR(-ENODEV);
320 goto error1;
321 }
322
c8367c4c
IW
323 /* Verify the QP requested is supported. For example, Ethernet devices
324 * will not have QP0 */
325 if (!port_priv->qp_info[qpn].qp) {
9ad13a42
IW
326 dev_notice(&device->dev,
327 "ib_register_mad_agent: QP %d not supported\n", qpn);
c8367c4c
IW
328 ret = ERR_PTR(-EPROTONOSUPPORT);
329 goto error1;
330 }
331
1da177e4 332 /* Allocate structures */
de6eb66b 333 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
1da177e4
LT
334 if (!mad_agent_priv) {
335 ret = ERR_PTR(-ENOMEM);
336 goto error1;
337 }
b82cab6b
HR
338
339 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
340 IB_ACCESS_LOCAL_WRITE);
341 if (IS_ERR(mad_agent_priv->agent.mr)) {
342 ret = ERR_PTR(-ENOMEM);
343 goto error2;
344 }
1da177e4
LT
345
346 if (mad_reg_req) {
9893e742 347 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
1da177e4
LT
348 if (!reg_req) {
349 ret = ERR_PTR(-ENOMEM);
b82cab6b 350 goto error3;
1da177e4 351 }
1da177e4
LT
352 }
353
354 /* Now, fill in the various structures */
1da177e4
LT
355 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
356 mad_agent_priv->reg_req = reg_req;
fa619a77 357 mad_agent_priv->agent.rmpp_version = rmpp_version;
1da177e4
LT
358 mad_agent_priv->agent.device = device;
359 mad_agent_priv->agent.recv_handler = recv_handler;
360 mad_agent_priv->agent.send_handler = send_handler;
361 mad_agent_priv->agent.context = context;
362 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
363 mad_agent_priv->agent.port_num = port_num;
0f29b46d 364 mad_agent_priv->agent.flags = registration_flags;
d9620a4c
RC
365 spin_lock_init(&mad_agent_priv->lock);
366 INIT_LIST_HEAD(&mad_agent_priv->send_list);
367 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
368 INIT_LIST_HEAD(&mad_agent_priv->done_list);
369 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
370 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
371 INIT_LIST_HEAD(&mad_agent_priv->local_list);
372 INIT_WORK(&mad_agent_priv->local_work, local_completions);
373 atomic_set(&mad_agent_priv->refcount, 1);
374 init_completion(&mad_agent_priv->comp);
1da177e4
LT
375
376 spin_lock_irqsave(&port_priv->reg_lock, flags);
377 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
378
379 /*
380 * Make sure MAD registration (if supplied)
381 * is non overlapping with any existing ones
382 */
383 if (mad_reg_req) {
384 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
385 if (!is_vendor_class(mgmt_class)) {
386 class = port_priv->version[mad_reg_req->
387 mgmt_class_version].class;
388 if (class) {
389 method = class->method_table[mgmt_class];
390 if (method) {
391 if (method_in_use(&method,
392 mad_reg_req))
b82cab6b 393 goto error4;
1da177e4
LT
394 }
395 }
396 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
397 mgmt_class);
398 } else {
399 /* "New" vendor class range */
400 vendor = port_priv->version[mad_reg_req->
401 mgmt_class_version].vendor;
402 if (vendor) {
403 vclass = vendor_class_index(mgmt_class);
404 vendor_class = vendor->vendor_class[vclass];
405 if (vendor_class) {
406 if (is_vendor_method_in_use(
407 vendor_class,
408 mad_reg_req))
b82cab6b 409 goto error4;
1da177e4
LT
410 }
411 }
412 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
413 }
414 if (ret2) {
415 ret = ERR_PTR(ret2);
b82cab6b 416 goto error4;
1da177e4
LT
417 }
418 }
419
420 /* Add mad agent into port's agent list */
421 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
422 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
423
1da177e4
LT
424 return &mad_agent_priv->agent;
425
b82cab6b 426error4:
1da177e4
LT
427 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
428 kfree(reg_req);
b82cab6b 429error3:
b82cab6b 430 ib_dereg_mr(mad_agent_priv->agent.mr);
2012a116
AB
431error2:
432 kfree(mad_agent_priv);
1da177e4
LT
433error1:
434 return ret;
435}
436EXPORT_SYMBOL(ib_register_mad_agent);
437
438static inline int is_snooping_sends(int mad_snoop_flags)
439{
440 return (mad_snoop_flags &
441 (/*IB_MAD_SNOOP_POSTED_SENDS |
442 IB_MAD_SNOOP_RMPP_SENDS |*/
443 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
444 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
445}
446
447static inline int is_snooping_recvs(int mad_snoop_flags)
448{
449 return (mad_snoop_flags &
450 (IB_MAD_SNOOP_RECVS /*|
451 IB_MAD_SNOOP_RMPP_RECVS*/));
452}
453
454static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
455 struct ib_mad_snoop_private *mad_snoop_priv)
456{
457 struct ib_mad_snoop_private **new_snoop_table;
458 unsigned long flags;
459 int i;
460
461 spin_lock_irqsave(&qp_info->snoop_lock, flags);
462 /* Check for empty slot in array. */
463 for (i = 0; i < qp_info->snoop_table_size; i++)
464 if (!qp_info->snoop_table[i])
465 break;
466
467 if (i == qp_info->snoop_table_size) {
468 /* Grow table. */
52805174
RD
469 new_snoop_table = krealloc(qp_info->snoop_table,
470 sizeof mad_snoop_priv *
471 (qp_info->snoop_table_size + 1),
472 GFP_ATOMIC);
1da177e4
LT
473 if (!new_snoop_table) {
474 i = -ENOMEM;
475 goto out;
476 }
52805174 477
1da177e4
LT
478 qp_info->snoop_table = new_snoop_table;
479 qp_info->snoop_table_size++;
480 }
481 qp_info->snoop_table[i] = mad_snoop_priv;
482 atomic_inc(&qp_info->snoop_count);
483out:
484 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
485 return i;
486}
487
488struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
489 u8 port_num,
490 enum ib_qp_type qp_type,
491 int mad_snoop_flags,
492 ib_mad_snoop_handler snoop_handler,
493 ib_mad_recv_handler recv_handler,
494 void *context)
495{
496 struct ib_mad_port_private *port_priv;
497 struct ib_mad_agent *ret;
498 struct ib_mad_snoop_private *mad_snoop_priv;
499 int qpn;
500
501 /* Validate parameters */
502 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
503 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
504 ret = ERR_PTR(-EINVAL);
505 goto error1;
506 }
507 qpn = get_spl_qp_index(qp_type);
508 if (qpn == -1) {
509 ret = ERR_PTR(-EINVAL);
510 goto error1;
511 }
512 port_priv = ib_get_mad_port(device, port_num);
513 if (!port_priv) {
514 ret = ERR_PTR(-ENODEV);
515 goto error1;
516 }
517 /* Allocate structures */
de6eb66b 518 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
1da177e4
LT
519 if (!mad_snoop_priv) {
520 ret = ERR_PTR(-ENOMEM);
521 goto error1;
522 }
523
524 /* Now, fill in the various structures */
1da177e4
LT
525 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
526 mad_snoop_priv->agent.device = device;
527 mad_snoop_priv->agent.recv_handler = recv_handler;
528 mad_snoop_priv->agent.snoop_handler = snoop_handler;
529 mad_snoop_priv->agent.context = context;
530 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
531 mad_snoop_priv->agent.port_num = port_num;
532 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
1b52fa98 533 init_completion(&mad_snoop_priv->comp);
1da177e4
LT
534 mad_snoop_priv->snoop_index = register_snoop_agent(
535 &port_priv->qp_info[qpn],
536 mad_snoop_priv);
537 if (mad_snoop_priv->snoop_index < 0) {
538 ret = ERR_PTR(mad_snoop_priv->snoop_index);
539 goto error2;
540 }
541
542 atomic_set(&mad_snoop_priv->refcount, 1);
543 return &mad_snoop_priv->agent;
544
545error2:
546 kfree(mad_snoop_priv);
547error1:
548 return ret;
549}
550EXPORT_SYMBOL(ib_register_mad_snoop);
551
1b52fa98
SH
552static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
553{
554 if (atomic_dec_and_test(&mad_agent_priv->refcount))
555 complete(&mad_agent_priv->comp);
556}
557
558static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
559{
560 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
561 complete(&mad_snoop_priv->comp);
562}
563
1da177e4
LT
564static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
565{
566 struct ib_mad_port_private *port_priv;
567 unsigned long flags;
568
569 /* Note that we could still be handling received MADs */
570
571 /*
572 * Canceling all sends results in dropping received response
573 * MADs, preventing us from queuing additional work
574 */
575 cancel_mads(mad_agent_priv);
1da177e4 576 port_priv = mad_agent_priv->qp_info->port_priv;
1da177e4 577 cancel_delayed_work(&mad_agent_priv->timed_work);
1da177e4
LT
578
579 spin_lock_irqsave(&port_priv->reg_lock, flags);
580 remove_mad_reg_req(mad_agent_priv);
581 list_del(&mad_agent_priv->agent_list);
582 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
583
b82cab6b 584 flush_workqueue(port_priv->wq);
fa619a77 585 ib_cancel_rmpp_recvs(mad_agent_priv);
1da177e4 586
1b52fa98
SH
587 deref_mad_agent(mad_agent_priv);
588 wait_for_completion(&mad_agent_priv->comp);
1da177e4 589
6044ec88 590 kfree(mad_agent_priv->reg_req);
b82cab6b 591 ib_dereg_mr(mad_agent_priv->agent.mr);
1da177e4
LT
592 kfree(mad_agent_priv);
593}
594
595static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
596{
597 struct ib_mad_qp_info *qp_info;
598 unsigned long flags;
599
600 qp_info = mad_snoop_priv->qp_info;
601 spin_lock_irqsave(&qp_info->snoop_lock, flags);
602 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
603 atomic_dec(&qp_info->snoop_count);
604 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
605
1b52fa98
SH
606 deref_snoop_agent(mad_snoop_priv);
607 wait_for_completion(&mad_snoop_priv->comp);
1da177e4
LT
608
609 kfree(mad_snoop_priv);
610}
611
612/*
613 * ib_unregister_mad_agent - Unregisters a client from using MAD services
614 */
615int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
616{
617 struct ib_mad_agent_private *mad_agent_priv;
618 struct ib_mad_snoop_private *mad_snoop_priv;
619
620 /* If the TID is zero, the agent can only snoop. */
621 if (mad_agent->hi_tid) {
622 mad_agent_priv = container_of(mad_agent,
623 struct ib_mad_agent_private,
624 agent);
625 unregister_mad_agent(mad_agent_priv);
626 } else {
627 mad_snoop_priv = container_of(mad_agent,
628 struct ib_mad_snoop_private,
629 agent);
630 unregister_mad_snoop(mad_snoop_priv);
631 }
632 return 0;
633}
634EXPORT_SYMBOL(ib_unregister_mad_agent);
635
636static void dequeue_mad(struct ib_mad_list_head *mad_list)
637{
638 struct ib_mad_queue *mad_queue;
639 unsigned long flags;
640
641 BUG_ON(!mad_list->mad_queue);
642 mad_queue = mad_list->mad_queue;
643 spin_lock_irqsave(&mad_queue->lock, flags);
644 list_del(&mad_list->list);
645 mad_queue->count--;
646 spin_unlock_irqrestore(&mad_queue->lock, flags);
647}
648
649static void snoop_send(struct ib_mad_qp_info *qp_info,
34816ad9 650 struct ib_mad_send_buf *send_buf,
1da177e4
LT
651 struct ib_mad_send_wc *mad_send_wc,
652 int mad_snoop_flags)
653{
654 struct ib_mad_snoop_private *mad_snoop_priv;
655 unsigned long flags;
656 int i;
657
658 spin_lock_irqsave(&qp_info->snoop_lock, flags);
659 for (i = 0; i < qp_info->snoop_table_size; i++) {
660 mad_snoop_priv = qp_info->snoop_table[i];
661 if (!mad_snoop_priv ||
662 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
663 continue;
664
665 atomic_inc(&mad_snoop_priv->refcount);
666 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
667 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
34816ad9 668 send_buf, mad_send_wc);
1b52fa98 669 deref_snoop_agent(mad_snoop_priv);
1da177e4
LT
670 spin_lock_irqsave(&qp_info->snoop_lock, flags);
671 }
672 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
673}
674
675static void snoop_recv(struct ib_mad_qp_info *qp_info,
676 struct ib_mad_recv_wc *mad_recv_wc,
677 int mad_snoop_flags)
678{
679 struct ib_mad_snoop_private *mad_snoop_priv;
680 unsigned long flags;
681 int i;
682
683 spin_lock_irqsave(&qp_info->snoop_lock, flags);
684 for (i = 0; i < qp_info->snoop_table_size; i++) {
685 mad_snoop_priv = qp_info->snoop_table[i];
686 if (!mad_snoop_priv ||
687 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
688 continue;
689
690 atomic_inc(&mad_snoop_priv->refcount);
691 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
692 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
693 mad_recv_wc);
1b52fa98 694 deref_snoop_agent(mad_snoop_priv);
1da177e4
LT
695 spin_lock_irqsave(&qp_info->snoop_lock, flags);
696 }
697 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
698}
699
062dbb69
MT
700static void build_smp_wc(struct ib_qp *qp,
701 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
1da177e4
LT
702 struct ib_wc *wc)
703{
704 memset(wc, 0, sizeof *wc);
705 wc->wr_id = wr_id;
706 wc->status = IB_WC_SUCCESS;
707 wc->opcode = IB_WC_RECV;
708 wc->pkey_index = pkey_index;
709 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
710 wc->src_qp = IB_QP0;
062dbb69 711 wc->qp = qp;
1da177e4
LT
712 wc->slid = slid;
713 wc->sl = 0;
714 wc->dlid_path_bits = 0;
715 wc->port_num = port_num;
716}
717
c9082e51
IW
718static size_t mad_priv_size(const struct ib_mad_private *mp)
719{
720 return sizeof(struct ib_mad_private) + mp->mad_size;
721}
722
723static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
724{
725 size_t size = sizeof(struct ib_mad_private) + mad_size;
726 struct ib_mad_private *ret = kzalloc(size, flags);
727
728 if (ret)
729 ret->mad_size = mad_size;
730
731 return ret;
732}
733
734static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
735{
736 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
737}
738
739static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
740{
741 return sizeof(struct ib_grh) + mp->mad_size;
742}
743
1da177e4
LT
744/*
745 * Return 0 if SMP is to be sent
746 * Return 1 if SMP was consumed locally (whether or not solicited)
747 * Return < 0 if error
748 */
749static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
34816ad9 750 struct ib_mad_send_wr_private *mad_send_wr)
1da177e4 751{
de493d47 752 int ret = 0;
34816ad9 753 struct ib_smp *smp = mad_send_wr->send_buf.mad;
1da177e4
LT
754 unsigned long flags;
755 struct ib_mad_local_private *local;
756 struct ib_mad_private *mad_priv;
757 struct ib_mad_port_private *port_priv;
758 struct ib_mad_agent_private *recv_mad_agent = NULL;
759 struct ib_device *device = mad_agent_priv->agent.device;
1bae4dbf 760 u8 port_num;
1da177e4 761 struct ib_wc mad_wc;
34816ad9 762 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
c9082e51 763 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
4cd7c947 764 u16 out_mad_pkey_index = 0;
1da177e4 765
1bae4dbf
HR
766 if (device->node_type == RDMA_NODE_IB_SWITCH &&
767 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
768 port_num = send_wr->wr.ud.port_num;
769 else
770 port_num = mad_agent_priv->agent.port_num;
771
8cf3f04f
RC
772 /*
773 * Directed route handling starts if the initial LID routed part of
774 * a request or the ending LID routed part of a response is empty.
775 * If we are at the start of the LID routed part, don't update the
776 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
777 */
778 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
779 IB_LID_PERMISSIVE &&
de493d47
HR
780 smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
781 IB_SMI_DISCARD) {
1da177e4 782 ret = -EINVAL;
7ef5d4b0 783 dev_err(&device->dev, "Invalid directed route\n");
1da177e4
LT
784 goto out;
785 }
de493d47 786
1da177e4 787 /* Check to post send on QP or process locally */
727792da
SW
788 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
789 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
1da177e4
LT
790 goto out;
791
792 local = kmalloc(sizeof *local, GFP_ATOMIC);
793 if (!local) {
794 ret = -ENOMEM;
7ef5d4b0 795 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
1da177e4
LT
796 goto out;
797 }
798 local->mad_priv = NULL;
799 local->recv_mad_agent = NULL;
c9082e51 800 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
1da177e4
LT
801 if (!mad_priv) {
802 ret = -ENOMEM;
7ef5d4b0 803 dev_err(&device->dev, "No memory for local response MAD\n");
1da177e4
LT
804 kfree(local);
805 goto out;
806 }
807
062dbb69
MT
808 build_smp_wc(mad_agent_priv->agent.qp,
809 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
97f52eb4 810 send_wr->wr.ud.pkey_index,
1da177e4
LT
811 send_wr->wr.ud.port_num, &mad_wc);
812
813 /* No GRH for DR SMP */
814 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
4cd7c947
IW
815 (const struct ib_mad_hdr *)smp, mad_size,
816 (struct ib_mad_hdr *)mad_priv->mad,
817 &mad_size, &out_mad_pkey_index);
1da177e4
LT
818 switch (ret)
819 {
820 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
c9082e51 821 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
1da177e4
LT
822 mad_agent_priv->agent.recv_handler) {
823 local->mad_priv = mad_priv;
824 local->recv_mad_agent = mad_agent_priv;
825 /*
826 * Reference MAD agent until receive
827 * side of local completion handled
828 */
829 atomic_inc(&mad_agent_priv->refcount);
830 } else
c9082e51 831 kfree(mad_priv);
1da177e4
LT
832 break;
833 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
c9082e51 834 kfree(mad_priv);
4780c195 835 break;
1da177e4
LT
836 case IB_MAD_RESULT_SUCCESS:
837 /* Treat like an incoming receive MAD */
1da177e4
LT
838 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
839 mad_agent_priv->agent.port_num);
840 if (port_priv) {
c9082e51 841 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
1da177e4 842 recv_mad_agent = find_mad_agent(port_priv,
c9082e51 843 (const struct ib_mad_hdr *)mad_priv->mad);
1da177e4
LT
844 }
845 if (!port_priv || !recv_mad_agent) {
4780c195
RC
846 /*
847 * No receiving agent so drop packet and
848 * generate send completion.
849 */
c9082e51 850 kfree(mad_priv);
4780c195 851 break;
1da177e4
LT
852 }
853 local->mad_priv = mad_priv;
854 local->recv_mad_agent = recv_mad_agent;
855 break;
856 default:
c9082e51 857 kfree(mad_priv);
1da177e4
LT
858 kfree(local);
859 ret = -EINVAL;
860 goto out;
861 }
862
34816ad9 863 local->mad_send_wr = mad_send_wr;
1da177e4
LT
864 /* Reference MAD agent until send side of local completion handled */
865 atomic_inc(&mad_agent_priv->refcount);
866 /* Queue local completion to local list */
867 spin_lock_irqsave(&mad_agent_priv->lock, flags);
868 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
869 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
870 queue_work(mad_agent_priv->qp_info->port_priv->wq,
b82cab6b 871 &mad_agent_priv->local_work);
1da177e4
LT
872 ret = 1;
873out:
874 return ret;
875}
876
f36e1793 877static int get_pad_size(int hdr_len, int data_len)
824c8ae7
HR
878{
879 int seg_size, pad;
880
881 seg_size = sizeof(struct ib_mad) - hdr_len;
882 if (data_len && seg_size) {
883 pad = seg_size - data_len % seg_size;
f36e1793 884 return pad == seg_size ? 0 : pad;
824c8ae7 885 } else
f36e1793
JM
886 return seg_size;
887}
888
889static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
890{
891 struct ib_rmpp_segment *s, *t;
892
893 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
894 list_del(&s->list);
895 kfree(s);
896 }
897}
898
899static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
900 gfp_t gfp_mask)
901{
902 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
903 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
904 struct ib_rmpp_segment *seg = NULL;
905 int left, seg_size, pad;
906
c9082e51 907 send_buf->seg_size = sizeof(struct ib_mad) - send_buf->hdr_len;
f36e1793
JM
908 seg_size = send_buf->seg_size;
909 pad = send_wr->pad;
910
911 /* Allocate data segments. */
912 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
913 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
914 if (!seg) {
7ef5d4b0
IW
915 dev_err(&send_buf->mad_agent->device->dev,
916 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
917 sizeof (*seg) + seg_size, gfp_mask);
f36e1793
JM
918 free_send_rmpp_list(send_wr);
919 return -ENOMEM;
920 }
921 seg->num = ++send_buf->seg_count;
922 list_add_tail(&seg->list, &send_wr->rmpp_list);
923 }
924
925 /* Zero any padding */
926 if (pad)
927 memset(seg->data + seg_size - pad, 0, pad);
928
929 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
930 agent.rmpp_version;
931 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
932 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
933
934 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
935 struct ib_rmpp_segment, list);
936 send_wr->last_ack_seg = send_wr->cur_seg;
937 return 0;
824c8ae7
HR
938}
939
f766c58f 940int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
1471cb6c
IW
941{
942 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
943}
944EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
945
824c8ae7
HR
946struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
947 u32 remote_qpn, u16 pkey_index,
34816ad9 948 int rmpp_active,
824c8ae7 949 int hdr_len, int data_len,
da2dfaa3
IW
950 gfp_t gfp_mask,
951 u8 base_version)
824c8ae7
HR
952{
953 struct ib_mad_agent_private *mad_agent_priv;
34816ad9 954 struct ib_mad_send_wr_private *mad_send_wr;
f36e1793 955 int pad, message_size, ret, size;
824c8ae7
HR
956 void *buf;
957
34816ad9
SH
958 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
959 agent);
f36e1793
JM
960 pad = get_pad_size(hdr_len, data_len);
961 message_size = hdr_len + data_len + pad;
824c8ae7 962
1471cb6c
IW
963 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
964 if (!rmpp_active && message_size > sizeof(struct ib_mad))
965 return ERR_PTR(-EINVAL);
966 } else
967 if (rmpp_active || message_size > sizeof(struct ib_mad))
968 return ERR_PTR(-EINVAL);
fa619a77 969
f36e1793
JM
970 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
971 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
824c8ae7
HR
972 if (!buf)
973 return ERR_PTR(-ENOMEM);
34816ad9 974
f36e1793
JM
975 mad_send_wr = buf + size;
976 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
34816ad9 977 mad_send_wr->send_buf.mad = buf;
f36e1793
JM
978 mad_send_wr->send_buf.hdr_len = hdr_len;
979 mad_send_wr->send_buf.data_len = data_len;
980 mad_send_wr->pad = pad;
34816ad9
SH
981
982 mad_send_wr->mad_agent_priv = mad_agent_priv;
f36e1793 983 mad_send_wr->sg_list[0].length = hdr_len;
34816ad9 984 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
f36e1793
JM
985 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
986 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
34816ad9
SH
987
988 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
989 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
f36e1793 990 mad_send_wr->send_wr.num_sge = 2;
34816ad9
SH
991 mad_send_wr->send_wr.opcode = IB_WR_SEND;
992 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
993 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
994 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
995 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
fa619a77
HR
996
997 if (rmpp_active) {
f36e1793
JM
998 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
999 if (ret) {
1000 kfree(buf);
1001 return ERR_PTR(ret);
1002 }
fa619a77
HR
1003 }
1004
34816ad9 1005 mad_send_wr->send_buf.mad_agent = mad_agent;
824c8ae7 1006 atomic_inc(&mad_agent_priv->refcount);
34816ad9 1007 return &mad_send_wr->send_buf;
824c8ae7
HR
1008}
1009EXPORT_SYMBOL(ib_create_send_mad);
1010
618a3c03
HR
1011int ib_get_mad_data_offset(u8 mgmt_class)
1012{
1013 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1014 return IB_MGMT_SA_HDR;
1015 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1016 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1017 (mgmt_class == IB_MGMT_CLASS_BIS))
1018 return IB_MGMT_DEVICE_HDR;
1019 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1020 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1021 return IB_MGMT_VENDOR_HDR;
1022 else
1023 return IB_MGMT_MAD_HDR;
1024}
1025EXPORT_SYMBOL(ib_get_mad_data_offset);
1026
1027int ib_is_mad_class_rmpp(u8 mgmt_class)
1028{
1029 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1030 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1031 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1032 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1033 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1034 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1035 return 1;
1036 return 0;
1037}
1038EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1039
f36e1793
JM
1040void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1041{
1042 struct ib_mad_send_wr_private *mad_send_wr;
1043 struct list_head *list;
1044
1045 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1046 send_buf);
1047 list = &mad_send_wr->cur_seg->list;
1048
1049 if (mad_send_wr->cur_seg->num < seg_num) {
1050 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1051 if (mad_send_wr->cur_seg->num == seg_num)
1052 break;
1053 } else if (mad_send_wr->cur_seg->num > seg_num) {
1054 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1055 if (mad_send_wr->cur_seg->num == seg_num)
1056 break;
1057 }
1058 return mad_send_wr->cur_seg->data;
1059}
1060EXPORT_SYMBOL(ib_get_rmpp_segment);
1061
1062static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1063{
1064 if (mad_send_wr->send_buf.seg_count)
1065 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1066 mad_send_wr->seg_num);
1067 else
1068 return mad_send_wr->send_buf.mad +
1069 mad_send_wr->send_buf.hdr_len;
1070}
1071
824c8ae7
HR
1072void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1073{
1074 struct ib_mad_agent_private *mad_agent_priv;
f36e1793 1075 struct ib_mad_send_wr_private *mad_send_wr;
824c8ae7
HR
1076
1077 mad_agent_priv = container_of(send_buf->mad_agent,
1078 struct ib_mad_agent_private, agent);
f36e1793
JM
1079 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1080 send_buf);
824c8ae7 1081
f36e1793
JM
1082 free_send_rmpp_list(mad_send_wr);
1083 kfree(send_buf->mad);
1b52fa98 1084 deref_mad_agent(mad_agent_priv);
824c8ae7
HR
1085}
1086EXPORT_SYMBOL(ib_free_send_mad);
1087
fa619a77 1088int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4
LT
1089{
1090 struct ib_mad_qp_info *qp_info;
cabe3cbc 1091 struct list_head *list;
34816ad9
SH
1092 struct ib_send_wr *bad_send_wr;
1093 struct ib_mad_agent *mad_agent;
1094 struct ib_sge *sge;
1da177e4
LT
1095 unsigned long flags;
1096 int ret;
1097
f8197a4e 1098 /* Set WR ID to find mad_send_wr upon completion */
d760ce8f 1099 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1da177e4
LT
1100 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1101 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1102
34816ad9
SH
1103 mad_agent = mad_send_wr->send_buf.mad_agent;
1104 sge = mad_send_wr->sg_list;
1527106f
RC
1105 sge[0].addr = ib_dma_map_single(mad_agent->device,
1106 mad_send_wr->send_buf.mad,
1107 sge[0].length,
1108 DMA_TO_DEVICE);
2c34e68f
YB
1109 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1110 return -ENOMEM;
1111
1527106f
RC
1112 mad_send_wr->header_mapping = sge[0].addr;
1113
1114 sge[1].addr = ib_dma_map_single(mad_agent->device,
1115 ib_get_payload(mad_send_wr),
1116 sge[1].length,
1117 DMA_TO_DEVICE);
2c34e68f
YB
1118 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1119 ib_dma_unmap_single(mad_agent->device,
1120 mad_send_wr->header_mapping,
1121 sge[0].length, DMA_TO_DEVICE);
1122 return -ENOMEM;
1123 }
1527106f 1124 mad_send_wr->payload_mapping = sge[1].addr;
34816ad9 1125
1da177e4 1126 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
cabe3cbc 1127 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
34816ad9
SH
1128 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1129 &bad_send_wr);
cabe3cbc 1130 list = &qp_info->send_queue.list;
1da177e4 1131 } else {
1da177e4 1132 ret = 0;
cabe3cbc 1133 list = &qp_info->overflow_list;
1da177e4 1134 }
cabe3cbc
HR
1135
1136 if (!ret) {
1137 qp_info->send_queue.count++;
1138 list_add_tail(&mad_send_wr->mad_list.list, list);
1139 }
1140 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
f36e1793 1141 if (ret) {
1527106f
RC
1142 ib_dma_unmap_single(mad_agent->device,
1143 mad_send_wr->header_mapping,
1144 sge[0].length, DMA_TO_DEVICE);
1145 ib_dma_unmap_single(mad_agent->device,
1146 mad_send_wr->payload_mapping,
1147 sge[1].length, DMA_TO_DEVICE);
f36e1793 1148 }
1da177e4
LT
1149 return ret;
1150}
1151
1152/*
1153 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1154 * with the registered client
1155 */
34816ad9
SH
1156int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1157 struct ib_mad_send_buf **bad_send_buf)
1da177e4 1158{
1da177e4 1159 struct ib_mad_agent_private *mad_agent_priv;
34816ad9
SH
1160 struct ib_mad_send_buf *next_send_buf;
1161 struct ib_mad_send_wr_private *mad_send_wr;
1162 unsigned long flags;
1163 int ret = -EINVAL;
1da177e4
LT
1164
1165 /* Walk list of send WRs and post each on send list */
34816ad9 1166 for (; send_buf; send_buf = next_send_buf) {
1da177e4 1167
34816ad9
SH
1168 mad_send_wr = container_of(send_buf,
1169 struct ib_mad_send_wr_private,
1170 send_buf);
1171 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4 1172
34816ad9
SH
1173 if (!send_buf->mad_agent->send_handler ||
1174 (send_buf->timeout_ms &&
1175 !send_buf->mad_agent->recv_handler)) {
1176 ret = -EINVAL;
1177 goto error;
1da177e4
LT
1178 }
1179
618a3c03
HR
1180 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1181 if (mad_agent_priv->agent.rmpp_version) {
1182 ret = -EINVAL;
1183 goto error;
1184 }
1185 }
1186
1da177e4
LT
1187 /*
1188 * Save pointer to next work request to post in case the
1189 * current one completes, and the user modifies the work
1190 * request associated with the completion
1191 */
34816ad9
SH
1192 next_send_buf = send_buf->next;
1193 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1da177e4 1194
34816ad9
SH
1195 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1196 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1197 ret = handle_outgoing_dr_smp(mad_agent_priv,
1198 mad_send_wr);
1da177e4 1199 if (ret < 0) /* error */
34816ad9 1200 goto error;
1da177e4 1201 else if (ret == 1) /* locally consumed */
34816ad9 1202 continue;
1da177e4
LT
1203 }
1204
34816ad9 1205 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1da177e4 1206 /* Timeout will be updated after send completes */
34816ad9 1207 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
4fc8cd49
SH
1208 mad_send_wr->max_retries = send_buf->retries;
1209 mad_send_wr->retries_left = send_buf->retries;
1210 send_buf->retries = 0;
34816ad9 1211 /* Reference for work request to QP + response */
1da177e4
LT
1212 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1213 mad_send_wr->status = IB_WC_SUCCESS;
1214
1215 /* Reference MAD agent until send completes */
1216 atomic_inc(&mad_agent_priv->refcount);
1217 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1218 list_add_tail(&mad_send_wr->agent_list,
1219 &mad_agent_priv->send_list);
1220 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1221
1471cb6c 1222 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
fa619a77
HR
1223 ret = ib_send_rmpp_mad(mad_send_wr);
1224 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1225 ret = ib_send_mad(mad_send_wr);
1226 } else
1227 ret = ib_send_mad(mad_send_wr);
1228 if (ret < 0) {
1da177e4
LT
1229 /* Fail send request */
1230 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1231 list_del(&mad_send_wr->agent_list);
1232 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1233 atomic_dec(&mad_agent_priv->refcount);
34816ad9 1234 goto error;
1da177e4 1235 }
1da177e4
LT
1236 }
1237 return 0;
34816ad9
SH
1238error:
1239 if (bad_send_buf)
1240 *bad_send_buf = send_buf;
1da177e4
LT
1241 return ret;
1242}
1243EXPORT_SYMBOL(ib_post_send_mad);
1244
1245/*
1246 * ib_free_recv_mad - Returns data buffers used to receive
1247 * a MAD to the access layer
1248 */
1249void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1250{
fa619a77 1251 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1da177e4
LT
1252 struct ib_mad_private_header *mad_priv_hdr;
1253 struct ib_mad_private *priv;
fa619a77 1254 struct list_head free_list;
1da177e4 1255
fa619a77
HR
1256 INIT_LIST_HEAD(&free_list);
1257 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1da177e4 1258
fa619a77
HR
1259 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1260 &free_list, list) {
1261 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1262 recv_buf);
1da177e4
LT
1263 mad_priv_hdr = container_of(mad_recv_wc,
1264 struct ib_mad_private_header,
1265 recv_wc);
1266 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1267 header);
c9082e51 1268 kfree(priv);
1da177e4 1269 }
1da177e4
LT
1270}
1271EXPORT_SYMBOL(ib_free_recv_mad);
1272
1da177e4
LT
1273struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1274 u8 rmpp_version,
1275 ib_mad_send_handler send_handler,
1276 ib_mad_recv_handler recv_handler,
1277 void *context)
1278{
1279 return ERR_PTR(-EINVAL); /* XXX: for now */
1280}
1281EXPORT_SYMBOL(ib_redirect_mad_qp);
1282
1283int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1284 struct ib_wc *wc)
1285{
7ef5d4b0
IW
1286 dev_err(&mad_agent->device->dev,
1287 "ib_process_mad_wc() not implemented yet\n");
1da177e4
LT
1288 return 0;
1289}
1290EXPORT_SYMBOL(ib_process_mad_wc);
1291
1292static int method_in_use(struct ib_mad_mgmt_method_table **method,
1293 struct ib_mad_reg_req *mad_reg_req)
1294{
1295 int i;
1296
19b629f5 1297 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1da177e4 1298 if ((*method)->agent[i]) {
7ef5d4b0 1299 pr_err("Method %d already in use\n", i);
1da177e4
LT
1300 return -EINVAL;
1301 }
1302 }
1303 return 0;
1304}
1305
1306static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1307{
1308 /* Allocate management method table */
de6eb66b 1309 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1da177e4 1310 if (!*method) {
7ef5d4b0 1311 pr_err("No memory for ib_mad_mgmt_method_table\n");
1da177e4
LT
1312 return -ENOMEM;
1313 }
1da177e4
LT
1314
1315 return 0;
1316}
1317
1318/*
1319 * Check to see if there are any methods still in use
1320 */
1321static int check_method_table(struct ib_mad_mgmt_method_table *method)
1322{
1323 int i;
1324
1325 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1326 if (method->agent[i])
1327 return 1;
1328 return 0;
1329}
1330
1331/*
1332 * Check to see if there are any method tables for this class still in use
1333 */
1334static int check_class_table(struct ib_mad_mgmt_class_table *class)
1335{
1336 int i;
1337
1338 for (i = 0; i < MAX_MGMT_CLASS; i++)
1339 if (class->method_table[i])
1340 return 1;
1341 return 0;
1342}
1343
1344static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1345{
1346 int i;
1347
1348 for (i = 0; i < MAX_MGMT_OUI; i++)
1349 if (vendor_class->method_table[i])
1350 return 1;
1351 return 0;
1352}
1353
1354static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
d94bd266 1355 const char *oui)
1da177e4
LT
1356{
1357 int i;
1358
1359 for (i = 0; i < MAX_MGMT_OUI; i++)
3cd96564
RD
1360 /* Is there matching OUI for this vendor class ? */
1361 if (!memcmp(vendor_class->oui[i], oui, 3))
1da177e4
LT
1362 return i;
1363
1364 return -1;
1365}
1366
1367static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1368{
1369 int i;
1370
1371 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1372 if (vendor->vendor_class[i])
1373 return 1;
1374
1375 return 0;
1376}
1377
1378static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1379 struct ib_mad_agent_private *agent)
1380{
1381 int i;
1382
1383 /* Remove any methods for this mad agent */
1384 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1385 if (method->agent[i] == agent) {
1386 method->agent[i] = NULL;
1387 }
1388 }
1389}
1390
1391static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1392 struct ib_mad_agent_private *agent_priv,
1393 u8 mgmt_class)
1394{
1395 struct ib_mad_port_private *port_priv;
1396 struct ib_mad_mgmt_class_table **class;
1397 struct ib_mad_mgmt_method_table **method;
1398 int i, ret;
1399
1400 port_priv = agent_priv->qp_info->port_priv;
1401 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1402 if (!*class) {
1403 /* Allocate management class table for "new" class version */
de6eb66b 1404 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1da177e4 1405 if (!*class) {
7ef5d4b0
IW
1406 dev_err(&agent_priv->agent.device->dev,
1407 "No memory for ib_mad_mgmt_class_table\n");
1da177e4
LT
1408 ret = -ENOMEM;
1409 goto error1;
1410 }
de6eb66b 1411
1da177e4
LT
1412 /* Allocate method table for this management class */
1413 method = &(*class)->method_table[mgmt_class];
1414 if ((ret = allocate_method_table(method)))
1415 goto error2;
1416 } else {
1417 method = &(*class)->method_table[mgmt_class];
1418 if (!*method) {
1419 /* Allocate method table for this management class */
1420 if ((ret = allocate_method_table(method)))
1421 goto error1;
1422 }
1423 }
1424
1425 /* Now, make sure methods are not already in use */
1426 if (method_in_use(method, mad_reg_req))
1427 goto error3;
1428
1429 /* Finally, add in methods being registered */
19b629f5 1430 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1da177e4 1431 (*method)->agent[i] = agent_priv;
19b629f5 1432
1da177e4
LT
1433 return 0;
1434
1435error3:
1436 /* Remove any methods for this mad agent */
1437 remove_methods_mad_agent(*method, agent_priv);
1438 /* Now, check to see if there are any methods in use */
1439 if (!check_method_table(*method)) {
1440 /* If not, release management method table */
1441 kfree(*method);
1442 *method = NULL;
1443 }
1444 ret = -EINVAL;
1445 goto error1;
1446error2:
1447 kfree(*class);
1448 *class = NULL;
1449error1:
1450 return ret;
1451}
1452
1453static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1454 struct ib_mad_agent_private *agent_priv)
1455{
1456 struct ib_mad_port_private *port_priv;
1457 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1458 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1459 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1460 struct ib_mad_mgmt_method_table **method;
1461 int i, ret = -ENOMEM;
1462 u8 vclass;
1463
1464 /* "New" vendor (with OUI) class */
1465 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1466 port_priv = agent_priv->qp_info->port_priv;
1467 vendor_table = &port_priv->version[
1468 mad_reg_req->mgmt_class_version].vendor;
1469 if (!*vendor_table) {
1470 /* Allocate mgmt vendor class table for "new" class version */
de6eb66b 1471 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1da177e4 1472 if (!vendor) {
7ef5d4b0
IW
1473 dev_err(&agent_priv->agent.device->dev,
1474 "No memory for ib_mad_mgmt_vendor_class_table\n");
1da177e4
LT
1475 goto error1;
1476 }
de6eb66b 1477
1da177e4
LT
1478 *vendor_table = vendor;
1479 }
1480 if (!(*vendor_table)->vendor_class[vclass]) {
1481 /* Allocate table for this management vendor class */
de6eb66b 1482 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1da177e4 1483 if (!vendor_class) {
7ef5d4b0
IW
1484 dev_err(&agent_priv->agent.device->dev,
1485 "No memory for ib_mad_mgmt_vendor_class\n");
1da177e4
LT
1486 goto error2;
1487 }
de6eb66b 1488
1da177e4
LT
1489 (*vendor_table)->vendor_class[vclass] = vendor_class;
1490 }
1491 for (i = 0; i < MAX_MGMT_OUI; i++) {
1492 /* Is there matching OUI for this vendor class ? */
1493 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1494 mad_reg_req->oui, 3)) {
1495 method = &(*vendor_table)->vendor_class[
1496 vclass]->method_table[i];
1497 BUG_ON(!*method);
1498 goto check_in_use;
1499 }
1500 }
1501 for (i = 0; i < MAX_MGMT_OUI; i++) {
1502 /* OUI slot available ? */
1503 if (!is_vendor_oui((*vendor_table)->vendor_class[
1504 vclass]->oui[i])) {
1505 method = &(*vendor_table)->vendor_class[
1506 vclass]->method_table[i];
1507 BUG_ON(*method);
1508 /* Allocate method table for this OUI */
1509 if ((ret = allocate_method_table(method)))
1510 goto error3;
1511 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1512 mad_reg_req->oui, 3);
1513 goto check_in_use;
1514 }
1515 }
7ef5d4b0 1516 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1da177e4
LT
1517 goto error3;
1518
1519check_in_use:
1520 /* Now, make sure methods are not already in use */
1521 if (method_in_use(method, mad_reg_req))
1522 goto error4;
1523
1524 /* Finally, add in methods being registered */
19b629f5 1525 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1da177e4 1526 (*method)->agent[i] = agent_priv;
19b629f5 1527
1da177e4
LT
1528 return 0;
1529
1530error4:
1531 /* Remove any methods for this mad agent */
1532 remove_methods_mad_agent(*method, agent_priv);
1533 /* Now, check to see if there are any methods in use */
1534 if (!check_method_table(*method)) {
1535 /* If not, release management method table */
1536 kfree(*method);
1537 *method = NULL;
1538 }
1539 ret = -EINVAL;
1540error3:
1541 if (vendor_class) {
1542 (*vendor_table)->vendor_class[vclass] = NULL;
1543 kfree(vendor_class);
1544 }
1545error2:
1546 if (vendor) {
1547 *vendor_table = NULL;
1548 kfree(vendor);
1549 }
1550error1:
1551 return ret;
1552}
1553
1554static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1555{
1556 struct ib_mad_port_private *port_priv;
1557 struct ib_mad_mgmt_class_table *class;
1558 struct ib_mad_mgmt_method_table *method;
1559 struct ib_mad_mgmt_vendor_class_table *vendor;
1560 struct ib_mad_mgmt_vendor_class *vendor_class;
1561 int index;
1562 u8 mgmt_class;
1563
1564 /*
1565 * Was MAD registration request supplied
1566 * with original registration ?
1567 */
1568 if (!agent_priv->reg_req) {
1569 goto out;
1570 }
1571
1572 port_priv = agent_priv->qp_info->port_priv;
1573 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1574 class = port_priv->version[
1575 agent_priv->reg_req->mgmt_class_version].class;
1576 if (!class)
1577 goto vendor_check;
1578
1579 method = class->method_table[mgmt_class];
1580 if (method) {
1581 /* Remove any methods for this mad agent */
1582 remove_methods_mad_agent(method, agent_priv);
1583 /* Now, check to see if there are any methods still in use */
1584 if (!check_method_table(method)) {
1585 /* If not, release management method table */
1586 kfree(method);
1587 class->method_table[mgmt_class] = NULL;
1588 /* Any management classes left ? */
1589 if (!check_class_table(class)) {
1590 /* If not, release management class table */
1591 kfree(class);
1592 port_priv->version[
1593 agent_priv->reg_req->
1594 mgmt_class_version].class = NULL;
1595 }
1596 }
1597 }
1598
1599vendor_check:
1600 if (!is_vendor_class(mgmt_class))
1601 goto out;
1602
1603 /* normalize mgmt_class to vendor range 2 */
1604 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1605 vendor = port_priv->version[
1606 agent_priv->reg_req->mgmt_class_version].vendor;
1607
1608 if (!vendor)
1609 goto out;
1610
1611 vendor_class = vendor->vendor_class[mgmt_class];
1612 if (vendor_class) {
1613 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1614 if (index < 0)
1615 goto out;
1616 method = vendor_class->method_table[index];
1617 if (method) {
1618 /* Remove any methods for this mad agent */
1619 remove_methods_mad_agent(method, agent_priv);
1620 /*
1621 * Now, check to see if there are
1622 * any methods still in use
1623 */
1624 if (!check_method_table(method)) {
1625 /* If not, release management method table */
1626 kfree(method);
1627 vendor_class->method_table[index] = NULL;
1628 memset(vendor_class->oui[index], 0, 3);
1629 /* Any OUIs left ? */
1630 if (!check_vendor_class(vendor_class)) {
1631 /* If not, release vendor class table */
1632 kfree(vendor_class);
1633 vendor->vendor_class[mgmt_class] = NULL;
1634 /* Any other vendor classes left ? */
1635 if (!check_vendor_table(vendor)) {
1636 kfree(vendor);
1637 port_priv->version[
1638 agent_priv->reg_req->
1639 mgmt_class_version].
1640 vendor = NULL;
1641 }
1642 }
1643 }
1644 }
1645 }
1646
1647out:
1648 return;
1649}
1650
1da177e4
LT
1651static struct ib_mad_agent_private *
1652find_mad_agent(struct ib_mad_port_private *port_priv,
d94bd266 1653 const struct ib_mad_hdr *mad_hdr)
1da177e4
LT
1654{
1655 struct ib_mad_agent_private *mad_agent = NULL;
1656 unsigned long flags;
1657
1658 spin_lock_irqsave(&port_priv->reg_lock, flags);
d94bd266 1659 if (ib_response_mad(mad_hdr)) {
1da177e4
LT
1660 u32 hi_tid;
1661 struct ib_mad_agent_private *entry;
1662
1663 /*
1664 * Routing is based on high 32 bits of transaction ID
1665 * of MAD.
1666 */
d94bd266 1667 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
34816ad9 1668 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1da177e4
LT
1669 if (entry->agent.hi_tid == hi_tid) {
1670 mad_agent = entry;
1671 break;
1672 }
1673 }
1674 } else {
1675 struct ib_mad_mgmt_class_table *class;
1676 struct ib_mad_mgmt_method_table *method;
1677 struct ib_mad_mgmt_vendor_class_table *vendor;
1678 struct ib_mad_mgmt_vendor_class *vendor_class;
d94bd266 1679 const struct ib_vendor_mad *vendor_mad;
1da177e4
LT
1680 int index;
1681
1682 /*
1683 * Routing is based on version, class, and method
1684 * For "newer" vendor MADs, also based on OUI
1685 */
d94bd266 1686 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1da177e4 1687 goto out;
d94bd266 1688 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1da177e4 1689 class = port_priv->version[
d94bd266 1690 mad_hdr->class_version].class;
1da177e4
LT
1691 if (!class)
1692 goto out;
d94bd266 1693 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
b7ab0b19
HS
1694 IB_MGMT_MAX_METHODS)
1695 goto out;
1da177e4 1696 method = class->method_table[convert_mgmt_class(
d94bd266 1697 mad_hdr->mgmt_class)];
1da177e4 1698 if (method)
d94bd266 1699 mad_agent = method->agent[mad_hdr->method &
1da177e4
LT
1700 ~IB_MGMT_METHOD_RESP];
1701 } else {
1702 vendor = port_priv->version[
d94bd266 1703 mad_hdr->class_version].vendor;
1da177e4
LT
1704 if (!vendor)
1705 goto out;
1706 vendor_class = vendor->vendor_class[vendor_class_index(
d94bd266 1707 mad_hdr->mgmt_class)];
1da177e4
LT
1708 if (!vendor_class)
1709 goto out;
1710 /* Find matching OUI */
d94bd266 1711 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1da177e4
LT
1712 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1713 if (index == -1)
1714 goto out;
1715 method = vendor_class->method_table[index];
1716 if (method) {
d94bd266 1717 mad_agent = method->agent[mad_hdr->method &
1da177e4
LT
1718 ~IB_MGMT_METHOD_RESP];
1719 }
1720 }
1721 }
1722
1723 if (mad_agent) {
1724 if (mad_agent->agent.recv_handler)
1725 atomic_inc(&mad_agent->refcount);
1726 else {
7ef5d4b0
IW
1727 dev_notice(&port_priv->device->dev,
1728 "No receive handler for client %p on port %d\n",
1729 &mad_agent->agent, port_priv->port_num);
1da177e4
LT
1730 mad_agent = NULL;
1731 }
1732 }
1733out:
1734 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1735
1736 return mad_agent;
1737}
1738
77f60833 1739static int validate_mad(const struct ib_mad_hdr *mad_hdr, u32 qp_num)
1da177e4
LT
1740{
1741 int valid = 0;
1742
1743 /* Make sure MAD base version is understood */
77f60833 1744 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION) {
7ef5d4b0 1745 pr_err("MAD received with unsupported base version %d\n",
77f60833 1746 mad_hdr->base_version);
1da177e4
LT
1747 goto out;
1748 }
1749
1750 /* Filter SMI packets sent to other than QP0 */
77f60833
IW
1751 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1752 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1da177e4
LT
1753 if (qp_num == 0)
1754 valid = 1;
1755 } else {
1756 /* Filter GSI packets sent to QP0 */
1757 if (qp_num != 0)
1758 valid = 1;
1759 }
1760
1761out:
1762 return valid;
1763}
1764
f766c58f
IW
1765static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1766 const struct ib_mad_hdr *mad_hdr)
fa619a77
HR
1767{
1768 struct ib_rmpp_mad *rmpp_mad;
1769
1770 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1771 return !mad_agent_priv->agent.rmpp_version ||
1471cb6c 1772 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
fa619a77
HR
1773 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1774 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1775 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1776}
1777
8bf4b30c
IW
1778static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1779 const struct ib_mad_recv_wc *rwc)
fa9656bb 1780{
8bf4b30c 1781 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
fa9656bb
JM
1782 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1783}
1784
f766c58f
IW
1785static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1786 const struct ib_mad_send_wr_private *wr,
1787 const struct ib_mad_recv_wc *rwc )
fa9656bb
JM
1788{
1789 struct ib_ah_attr attr;
1790 u8 send_resp, rcv_resp;
9874e746
JM
1791 union ib_gid sgid;
1792 struct ib_device *device = mad_agent_priv->agent.device;
1793 u8 port_num = mad_agent_priv->agent.port_num;
1794 u8 lmc;
fa9656bb 1795
96909308
IW
1796 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1797 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
fa9656bb 1798
fa9656bb
JM
1799 if (send_resp == rcv_resp)
1800 /* both requests, or both responses. GIDs different */
1801 return 0;
1802
1803 if (ib_query_ah(wr->send_buf.ah, &attr))
1804 /* Assume not equal, to avoid false positives. */
1805 return 0;
1806
9874e746
JM
1807 if (!!(attr.ah_flags & IB_AH_GRH) !=
1808 !!(rwc->wc->wc_flags & IB_WC_GRH))
fa9656bb
JM
1809 /* one has GID, other does not. Assume different */
1810 return 0;
9874e746
JM
1811
1812 if (!send_resp && rcv_resp) {
1813 /* is request/response. */
1814 if (!(attr.ah_flags & IB_AH_GRH)) {
1815 if (ib_get_cached_lmc(device, port_num, &lmc))
1816 return 0;
1817 return (!lmc || !((attr.src_path_bits ^
1818 rwc->wc->dlid_path_bits) &
1819 ((1 << lmc) - 1)));
1820 } else {
1821 if (ib_get_cached_gid(device, port_num,
1822 attr.grh.sgid_index, &sgid))
1823 return 0;
1824 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1825 16);
1826 }
1827 }
1828
1829 if (!(attr.ah_flags & IB_AH_GRH))
1830 return attr.dlid == rwc->wc->slid;
1831 else
1832 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1833 16);
1834}
1835
1836static inline int is_direct(u8 class)
1837{
1838 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
fa9656bb 1839}
9874e746 1840
fa619a77 1841struct ib_mad_send_wr_private*
f766c58f
IW
1842ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1843 const struct ib_mad_recv_wc *wc)
1da177e4 1844{
9874e746 1845 struct ib_mad_send_wr_private *wr;
83a1d228 1846 const struct ib_mad_hdr *mad_hdr;
fa9656bb 1847
83a1d228 1848 mad_hdr = &wc->recv_buf.mad->mad_hdr;
9874e746
JM
1849
1850 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
83a1d228 1851 if ((wr->tid == mad_hdr->tid) &&
9874e746
JM
1852 rcv_has_same_class(wr, wc) &&
1853 /*
1854 * Don't check GID for direct routed MADs.
1855 * These might have permissive LIDs.
1856 */
83a1d228 1857 (is_direct(mad_hdr->mgmt_class) ||
9874e746 1858 rcv_has_same_gid(mad_agent_priv, wr, wc)))
39798695 1859 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1da177e4
LT
1860 }
1861
1862 /*
1863 * It's possible to receive the response before we've
1864 * been notified that the send has completed
1865 */
9874e746 1866 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
c597eee5 1867 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
83a1d228 1868 wr->tid == mad_hdr->tid &&
9874e746
JM
1869 wr->timeout &&
1870 rcv_has_same_class(wr, wc) &&
1871 /*
1872 * Don't check GID for direct routed MADs.
1873 * These might have permissive LIDs.
1874 */
83a1d228 1875 (is_direct(mad_hdr->mgmt_class) ||
9874e746 1876 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1da177e4 1877 /* Verify request has not been canceled */
9874e746 1878 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1da177e4
LT
1879 }
1880 return NULL;
1881}
1882
fa619a77 1883void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
6a0c435e
HR
1884{
1885 mad_send_wr->timeout = 0;
179e0917
AM
1886 if (mad_send_wr->refcount == 1)
1887 list_move_tail(&mad_send_wr->agent_list,
6a0c435e 1888 &mad_send_wr->mad_agent_priv->done_list);
6a0c435e
HR
1889}
1890
1da177e4 1891static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
4a0754fa 1892 struct ib_mad_recv_wc *mad_recv_wc)
1da177e4
LT
1893{
1894 struct ib_mad_send_wr_private *mad_send_wr;
1895 struct ib_mad_send_wc mad_send_wc;
1896 unsigned long flags;
1897
fa619a77
HR
1898 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1899 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1471cb6c 1900 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
fa619a77
HR
1901 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1902 mad_recv_wc);
1903 if (!mad_recv_wc) {
1b52fa98 1904 deref_mad_agent(mad_agent_priv);
fa619a77
HR
1905 return;
1906 }
1907 }
1908
1da177e4 1909 /* Complete corresponding request */
96909308 1910 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1da177e4 1911 spin_lock_irqsave(&mad_agent_priv->lock, flags);
fa9656bb 1912 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1da177e4
LT
1913 if (!mad_send_wr) {
1914 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1471cb6c
IW
1915 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1916 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1917 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1918 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1919 /* user rmpp is in effect
1920 * and this is an active RMPP MAD
1921 */
1922 mad_recv_wc->wc->wr_id = 0;
1923 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1924 mad_recv_wc);
1925 atomic_dec(&mad_agent_priv->refcount);
1926 } else {
1927 /* not user rmpp, revert to normal behavior and
1928 * drop the mad */
1929 ib_free_recv_mad(mad_recv_wc);
1930 deref_mad_agent(mad_agent_priv);
1931 return;
1932 }
1933 } else {
1934 ib_mark_mad_done(mad_send_wr);
1935 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1da177e4 1936
1471cb6c
IW
1937 /* Defined behavior is to complete response before request */
1938 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1939 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1940 mad_recv_wc);
1941 atomic_dec(&mad_agent_priv->refcount);
1da177e4 1942
1471cb6c
IW
1943 mad_send_wc.status = IB_WC_SUCCESS;
1944 mad_send_wc.vendor_err = 0;
1945 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1946 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1947 }
1da177e4 1948 } else {
4a0754fa
HR
1949 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1950 mad_recv_wc);
1b52fa98 1951 deref_mad_agent(mad_agent_priv);
1da177e4
LT
1952 }
1953}
1954
e11ae8aa
IW
1955static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
1956 const struct ib_mad_qp_info *qp_info,
1957 const struct ib_wc *wc,
1958 int port_num,
1959 struct ib_mad_private *recv,
1960 struct ib_mad_private *response)
1961{
1962 enum smi_forward_action retsmi;
c9082e51 1963 struct ib_smp *smp = (struct ib_smp *)recv->mad;
e11ae8aa 1964
c9082e51 1965 if (smi_handle_dr_smp_recv(smp,
e11ae8aa
IW
1966 port_priv->device->node_type,
1967 port_num,
1968 port_priv->device->phys_port_cnt) ==
1969 IB_SMI_DISCARD)
1970 return IB_SMI_DISCARD;
1971
c9082e51 1972 retsmi = smi_check_forward_dr_smp(smp);
e11ae8aa
IW
1973 if (retsmi == IB_SMI_LOCAL)
1974 return IB_SMI_HANDLE;
1975
1976 if (retsmi == IB_SMI_SEND) { /* don't forward */
c9082e51 1977 if (smi_handle_dr_smp_send(smp,
e11ae8aa
IW
1978 port_priv->device->node_type,
1979 port_num) == IB_SMI_DISCARD)
1980 return IB_SMI_DISCARD;
1981
c9082e51 1982 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
e11ae8aa
IW
1983 return IB_SMI_DISCARD;
1984 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1985 /* forward case for switches */
c9082e51 1986 memcpy(response, recv, mad_priv_size(response));
e11ae8aa 1987 response->header.recv_wc.wc = &response->header.wc;
c9082e51 1988 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
e11ae8aa
IW
1989 response->header.recv_wc.recv_buf.grh = &response->grh;
1990
c9082e51 1991 agent_send_response((const struct ib_mad_hdr *)response->mad,
e11ae8aa
IW
1992 &response->grh, wc,
1993 port_priv->device,
c9082e51
IW
1994 smi_get_fwd_port(smp),
1995 qp_info->qp->qp_num,
1996 response->mad_size);
e11ae8aa
IW
1997
1998 return IB_SMI_DISCARD;
1999 }
2000 return IB_SMI_HANDLE;
2001}
2002
c9082e51 2003static bool generate_unmatched_resp(const struct ib_mad_private *recv,
0b307043
ST
2004 struct ib_mad_private *response)
2005{
c9082e51
IW
2006 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2007 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2008
2009 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2010 recv_hdr->method == IB_MGMT_METHOD_SET) {
2011 memcpy(response, recv, mad_priv_size(response));
0b307043 2012 response->header.recv_wc.wc = &response->header.wc;
c9082e51 2013 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
0b307043 2014 response->header.recv_wc.recv_buf.grh = &response->grh;
c9082e51
IW
2015 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2016 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2017 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2018 resp_hdr->status |= IB_SMP_DIRECTION;
0b307043
ST
2019
2020 return true;
2021 } else {
2022 return false;
2023 }
2024}
1da177e4
LT
2025static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
2026 struct ib_wc *wc)
2027{
2028 struct ib_mad_qp_info *qp_info;
2029 struct ib_mad_private_header *mad_priv_hdr;
445d6807 2030 struct ib_mad_private *recv, *response = NULL;
1da177e4
LT
2031 struct ib_mad_list_head *mad_list;
2032 struct ib_mad_agent_private *mad_agent;
1bae4dbf 2033 int port_num;
a9e74323 2034 int ret = IB_MAD_RESULT_SUCCESS;
4cd7c947
IW
2035 size_t mad_size;
2036 u16 resp_mad_pkey_index = 0;
1da177e4 2037
1da177e4
LT
2038 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2039 qp_info = mad_list->mad_queue->qp_info;
2040 dequeue_mad(mad_list);
2041
2042 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2043 mad_list);
2044 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1527106f
RC
2045 ib_dma_unmap_single(port_priv->device,
2046 recv->header.mapping,
c9082e51 2047 mad_priv_dma_size(recv),
1527106f 2048 DMA_FROM_DEVICE);
1da177e4
LT
2049
2050 /* Setup MAD receive work completion from "normal" work completion */
24239aff
SH
2051 recv->header.wc = *wc;
2052 recv->header.recv_wc.wc = &recv->header.wc;
1da177e4 2053 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
c9082e51 2054 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
1da177e4
LT
2055 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2056
2057 if (atomic_read(&qp_info->snoop_count))
2058 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2059
2060 /* Validate MAD */
c9082e51 2061 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info->qp->qp_num))
1da177e4
LT
2062 goto out;
2063
4cd7c947
IW
2064 mad_size = recv->mad_size;
2065 response = alloc_mad_private(mad_size, GFP_KERNEL);
445d6807 2066 if (!response) {
7ef5d4b0
IW
2067 dev_err(&port_priv->device->dev,
2068 "ib_mad_recv_done_handler no memory for response buffer\n");
445d6807
HR
2069 goto out;
2070 }
2071
1bae4dbf
HR
2072 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
2073 port_num = wc->port_num;
2074 else
2075 port_num = port_priv->port_num;
2076
c9082e51 2077 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
1da177e4 2078 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
e11ae8aa
IW
2079 if (handle_ib_smi(port_priv, qp_info, wc, port_num, recv,
2080 response)
2081 == IB_SMI_DISCARD)
1da177e4 2082 goto out;
1da177e4
LT
2083 }
2084
1da177e4
LT
2085 /* Give driver "right of first refusal" on incoming MAD */
2086 if (port_priv->device->process_mad) {
1da177e4
LT
2087 ret = port_priv->device->process_mad(port_priv->device, 0,
2088 port_priv->port_num,
2089 wc, &recv->grh,
4cd7c947
IW
2090 (const struct ib_mad_hdr *)recv->mad,
2091 recv->mad_size,
2092 (struct ib_mad_hdr *)response->mad,
2093 &mad_size, &resp_mad_pkey_index);
1da177e4
LT
2094 if (ret & IB_MAD_RESULT_SUCCESS) {
2095 if (ret & IB_MAD_RESULT_CONSUMED)
2096 goto out;
2097 if (ret & IB_MAD_RESULT_REPLY) {
c9082e51 2098 agent_send_response((const struct ib_mad_hdr *)response->mad,
34816ad9
SH
2099 &recv->grh, wc,
2100 port_priv->device,
1bae4dbf 2101 port_num,
c9082e51
IW
2102 qp_info->qp->qp_num,
2103 response->mad_size);
1da177e4
LT
2104 goto out;
2105 }
2106 }
2107 }
2108
c9082e51 2109 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
1da177e4 2110 if (mad_agent) {
4a0754fa 2111 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1da177e4
LT
2112 /*
2113 * recv is freed up in error cases in ib_mad_complete_recv
2114 * or via recv_handler in ib_mad_complete_recv()
2115 */
2116 recv = NULL;
a9e74323
JM
2117 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2118 generate_unmatched_resp(recv, response)) {
c9082e51
IW
2119 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2120 port_priv->device, port_num,
2121 qp_info->qp->qp_num, response->mad_size);
1da177e4
LT
2122 }
2123
2124out:
2125 /* Post another receive request for this QP */
2126 if (response) {
2127 ib_mad_post_receive_mads(qp_info, response);
c9082e51 2128 kfree(recv);
1da177e4
LT
2129 } else
2130 ib_mad_post_receive_mads(qp_info, recv);
2131}
2132
2133static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2134{
2135 struct ib_mad_send_wr_private *mad_send_wr;
2136 unsigned long delay;
2137
2138 if (list_empty(&mad_agent_priv->wait_list)) {
136b5721 2139 cancel_delayed_work(&mad_agent_priv->timed_work);
1da177e4
LT
2140 } else {
2141 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2142 struct ib_mad_send_wr_private,
2143 agent_list);
2144
2145 if (time_after(mad_agent_priv->timeout,
2146 mad_send_wr->timeout)) {
2147 mad_agent_priv->timeout = mad_send_wr->timeout;
1da177e4
LT
2148 delay = mad_send_wr->timeout - jiffies;
2149 if ((long)delay <= 0)
2150 delay = 1;
e7c2f967
TH
2151 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2152 &mad_agent_priv->timed_work, delay);
1da177e4
LT
2153 }
2154 }
2155}
2156
d760ce8f 2157static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4 2158{
d760ce8f 2159 struct ib_mad_agent_private *mad_agent_priv;
1da177e4
LT
2160 struct ib_mad_send_wr_private *temp_mad_send_wr;
2161 struct list_head *list_item;
2162 unsigned long delay;
2163
d760ce8f 2164 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4
LT
2165 list_del(&mad_send_wr->agent_list);
2166
2167 delay = mad_send_wr->timeout;
2168 mad_send_wr->timeout += jiffies;
2169
29bb33dd
HR
2170 if (delay) {
2171 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2172 temp_mad_send_wr = list_entry(list_item,
2173 struct ib_mad_send_wr_private,
2174 agent_list);
2175 if (time_after(mad_send_wr->timeout,
2176 temp_mad_send_wr->timeout))
2177 break;
2178 }
1da177e4 2179 }
29bb33dd
HR
2180 else
2181 list_item = &mad_agent_priv->wait_list;
1da177e4
LT
2182 list_add(&mad_send_wr->agent_list, list_item);
2183
2184 /* Reschedule a work item if we have a shorter timeout */
e7c2f967
TH
2185 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2186 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2187 &mad_agent_priv->timed_work, delay);
1da177e4
LT
2188}
2189
03b61ad2
HR
2190void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2191 int timeout_ms)
2192{
2193 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2194 wait_for_response(mad_send_wr);
2195}
2196
1da177e4
LT
2197/*
2198 * Process a send work completion
2199 */
fa619a77
HR
2200void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2201 struct ib_mad_send_wc *mad_send_wc)
1da177e4
LT
2202{
2203 struct ib_mad_agent_private *mad_agent_priv;
2204 unsigned long flags;
fa619a77 2205 int ret;
1da177e4 2206
d760ce8f 2207 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4 2208 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1471cb6c 2209 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
fa619a77
HR
2210 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2211 if (ret == IB_RMPP_RESULT_CONSUMED)
2212 goto done;
2213 } else
2214 ret = IB_RMPP_RESULT_UNHANDLED;
2215
1da177e4
LT
2216 if (mad_send_wc->status != IB_WC_SUCCESS &&
2217 mad_send_wr->status == IB_WC_SUCCESS) {
2218 mad_send_wr->status = mad_send_wc->status;
2219 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2220 }
2221
2222 if (--mad_send_wr->refcount > 0) {
2223 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2224 mad_send_wr->status == IB_WC_SUCCESS) {
d760ce8f 2225 wait_for_response(mad_send_wr);
1da177e4 2226 }
fa619a77 2227 goto done;
1da177e4
LT
2228 }
2229
2230 /* Remove send from MAD agent and notify client of completion */
2231 list_del(&mad_send_wr->agent_list);
2232 adjust_timeout(mad_agent_priv);
2233 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2234
2235 if (mad_send_wr->status != IB_WC_SUCCESS )
2236 mad_send_wc->status = mad_send_wr->status;
34816ad9
SH
2237 if (ret == IB_RMPP_RESULT_INTERNAL)
2238 ib_rmpp_send_handler(mad_send_wc);
2239 else
fa619a77
HR
2240 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2241 mad_send_wc);
1da177e4
LT
2242
2243 /* Release reference on agent taken when sending */
1b52fa98 2244 deref_mad_agent(mad_agent_priv);
fa619a77
HR
2245 return;
2246done:
2247 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1da177e4
LT
2248}
2249
2250static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2251 struct ib_wc *wc)
2252{
2253 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2254 struct ib_mad_list_head *mad_list;
2255 struct ib_mad_qp_info *qp_info;
2256 struct ib_mad_queue *send_queue;
2257 struct ib_send_wr *bad_send_wr;
34816ad9 2258 struct ib_mad_send_wc mad_send_wc;
1da177e4
LT
2259 unsigned long flags;
2260 int ret;
2261
2262 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2263 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2264 mad_list);
2265 send_queue = mad_list->mad_queue;
2266 qp_info = send_queue->qp_info;
2267
2268retry:
1527106f
RC
2269 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2270 mad_send_wr->header_mapping,
2271 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2272 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2273 mad_send_wr->payload_mapping,
2274 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
1da177e4
LT
2275 queued_send_wr = NULL;
2276 spin_lock_irqsave(&send_queue->lock, flags);
2277 list_del(&mad_list->list);
2278
2279 /* Move queued send to the send queue */
2280 if (send_queue->count-- > send_queue->max_active) {
2281 mad_list = container_of(qp_info->overflow_list.next,
2282 struct ib_mad_list_head, list);
2283 queued_send_wr = container_of(mad_list,
2284 struct ib_mad_send_wr_private,
2285 mad_list);
179e0917 2286 list_move_tail(&mad_list->list, &send_queue->list);
1da177e4
LT
2287 }
2288 spin_unlock_irqrestore(&send_queue->lock, flags);
2289
34816ad9
SH
2290 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2291 mad_send_wc.status = wc->status;
2292 mad_send_wc.vendor_err = wc->vendor_err;
1da177e4 2293 if (atomic_read(&qp_info->snoop_count))
34816ad9 2294 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
1da177e4 2295 IB_MAD_SNOOP_SEND_COMPLETIONS);
34816ad9 2296 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1da177e4
LT
2297
2298 if (queued_send_wr) {
2299 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
34816ad9 2300 &bad_send_wr);
1da177e4 2301 if (ret) {
7ef5d4b0
IW
2302 dev_err(&port_priv->device->dev,
2303 "ib_post_send failed: %d\n", ret);
1da177e4
LT
2304 mad_send_wr = queued_send_wr;
2305 wc->status = IB_WC_LOC_QP_OP_ERR;
2306 goto retry;
2307 }
2308 }
2309}
2310
2311static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2312{
2313 struct ib_mad_send_wr_private *mad_send_wr;
2314 struct ib_mad_list_head *mad_list;
2315 unsigned long flags;
2316
2317 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2318 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2319 mad_send_wr = container_of(mad_list,
2320 struct ib_mad_send_wr_private,
2321 mad_list);
2322 mad_send_wr->retry = 1;
2323 }
2324 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2325}
2326
2327static void mad_error_handler(struct ib_mad_port_private *port_priv,
2328 struct ib_wc *wc)
2329{
2330 struct ib_mad_list_head *mad_list;
2331 struct ib_mad_qp_info *qp_info;
2332 struct ib_mad_send_wr_private *mad_send_wr;
2333 int ret;
2334
2335 /* Determine if failure was a send or receive */
2336 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2337 qp_info = mad_list->mad_queue->qp_info;
2338 if (mad_list->mad_queue == &qp_info->recv_queue)
2339 /*
2340 * Receive errors indicate that the QP has entered the error
2341 * state - error handling/shutdown code will cleanup
2342 */
2343 return;
2344
2345 /*
2346 * Send errors will transition the QP to SQE - move
2347 * QP to RTS and repost flushed work requests
2348 */
2349 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2350 mad_list);
2351 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2352 if (mad_send_wr->retry) {
2353 /* Repost send */
2354 struct ib_send_wr *bad_send_wr;
2355
2356 mad_send_wr->retry = 0;
2357 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2358 &bad_send_wr);
2359 if (ret)
2360 ib_mad_send_done_handler(port_priv, wc);
2361 } else
2362 ib_mad_send_done_handler(port_priv, wc);
2363 } else {
2364 struct ib_qp_attr *attr;
2365
2366 /* Transition QP to RTS and fail offending send */
2367 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2368 if (attr) {
2369 attr->qp_state = IB_QPS_RTS;
2370 attr->cur_qp_state = IB_QPS_SQE;
2371 ret = ib_modify_qp(qp_info->qp, attr,
2372 IB_QP_STATE | IB_QP_CUR_STATE);
2373 kfree(attr);
2374 if (ret)
7ef5d4b0
IW
2375 dev_err(&port_priv->device->dev,
2376 "mad_error_handler - ib_modify_qp to RTS : %d\n",
2377 ret);
1da177e4
LT
2378 else
2379 mark_sends_for_retry(qp_info);
2380 }
2381 ib_mad_send_done_handler(port_priv, wc);
2382 }
2383}
2384
2385/*
2386 * IB MAD completion callback
2387 */
c4028958 2388static void ib_mad_completion_handler(struct work_struct *work)
1da177e4
LT
2389{
2390 struct ib_mad_port_private *port_priv;
2391 struct ib_wc wc;
2392
c4028958 2393 port_priv = container_of(work, struct ib_mad_port_private, work);
1da177e4
LT
2394 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2395
2396 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2397 if (wc.status == IB_WC_SUCCESS) {
2398 switch (wc.opcode) {
2399 case IB_WC_SEND:
2400 ib_mad_send_done_handler(port_priv, &wc);
2401 break;
2402 case IB_WC_RECV:
2403 ib_mad_recv_done_handler(port_priv, &wc);
2404 break;
2405 default:
2406 BUG_ON(1);
2407 break;
2408 }
2409 } else
2410 mad_error_handler(port_priv, &wc);
2411 }
2412}
2413
2414static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2415{
2416 unsigned long flags;
2417 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2418 struct ib_mad_send_wc mad_send_wc;
2419 struct list_head cancel_list;
2420
2421 INIT_LIST_HEAD(&cancel_list);
2422
2423 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2424 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2425 &mad_agent_priv->send_list, agent_list) {
2426 if (mad_send_wr->status == IB_WC_SUCCESS) {
3cd96564 2427 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
1da177e4
LT
2428 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2429 }
2430 }
2431
2432 /* Empty wait list to prevent receives from finding a request */
2433 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2434 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2435
2436 /* Report all cancelled requests */
2437 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2438 mad_send_wc.vendor_err = 0;
2439
2440 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2441 &cancel_list, agent_list) {
34816ad9
SH
2442 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2443 list_del(&mad_send_wr->agent_list);
1da177e4
LT
2444 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2445 &mad_send_wc);
1da177e4
LT
2446 atomic_dec(&mad_agent_priv->refcount);
2447 }
2448}
2449
2450static struct ib_mad_send_wr_private*
34816ad9
SH
2451find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2452 struct ib_mad_send_buf *send_buf)
1da177e4
LT
2453{
2454 struct ib_mad_send_wr_private *mad_send_wr;
2455
2456 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2457 agent_list) {
34816ad9 2458 if (&mad_send_wr->send_buf == send_buf)
1da177e4
LT
2459 return mad_send_wr;
2460 }
2461
2462 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2463 agent_list) {
c597eee5
IW
2464 if (is_rmpp_data_mad(mad_agent_priv,
2465 mad_send_wr->send_buf.mad) &&
34816ad9 2466 &mad_send_wr->send_buf == send_buf)
1da177e4
LT
2467 return mad_send_wr;
2468 }
2469 return NULL;
2470}
2471
34816ad9
SH
2472int ib_modify_mad(struct ib_mad_agent *mad_agent,
2473 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
1da177e4
LT
2474{
2475 struct ib_mad_agent_private *mad_agent_priv;
2476 struct ib_mad_send_wr_private *mad_send_wr;
2477 unsigned long flags;
cabe3cbc 2478 int active;
1da177e4
LT
2479
2480 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2481 agent);
2482 spin_lock_irqsave(&mad_agent_priv->lock, flags);
34816ad9 2483 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
03b61ad2 2484 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
1da177e4 2485 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
03b61ad2 2486 return -EINVAL;
1da177e4
LT
2487 }
2488
cabe3cbc 2489 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
03b61ad2 2490 if (!timeout_ms) {
1da177e4 2491 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
03b61ad2 2492 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1da177e4
LT
2493 }
2494
34816ad9 2495 mad_send_wr->send_buf.timeout_ms = timeout_ms;
cabe3cbc 2496 if (active)
03b61ad2
HR
2497 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2498 else
2499 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2500
1da177e4 2501 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
03b61ad2
HR
2502 return 0;
2503}
2504EXPORT_SYMBOL(ib_modify_mad);
1da177e4 2505
34816ad9
SH
2506void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2507 struct ib_mad_send_buf *send_buf)
03b61ad2 2508{
34816ad9 2509 ib_modify_mad(mad_agent, send_buf, 0);
1da177e4
LT
2510}
2511EXPORT_SYMBOL(ib_cancel_mad);
2512
c4028958 2513static void local_completions(struct work_struct *work)
1da177e4
LT
2514{
2515 struct ib_mad_agent_private *mad_agent_priv;
2516 struct ib_mad_local_private *local;
2517 struct ib_mad_agent_private *recv_mad_agent;
2518 unsigned long flags;
1d9bc6d6 2519 int free_mad;
1da177e4
LT
2520 struct ib_wc wc;
2521 struct ib_mad_send_wc mad_send_wc;
2522
c4028958
DH
2523 mad_agent_priv =
2524 container_of(work, struct ib_mad_agent_private, local_work);
1da177e4
LT
2525
2526 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2527 while (!list_empty(&mad_agent_priv->local_list)) {
2528 local = list_entry(mad_agent_priv->local_list.next,
2529 struct ib_mad_local_private,
2530 completion_list);
37289efe 2531 list_del(&local->completion_list);
1da177e4 2532 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1d9bc6d6 2533 free_mad = 0;
1da177e4
LT
2534 if (local->mad_priv) {
2535 recv_mad_agent = local->recv_mad_agent;
2536 if (!recv_mad_agent) {
7ef5d4b0
IW
2537 dev_err(&mad_agent_priv->agent.device->dev,
2538 "No receive MAD agent for local completion\n");
1d9bc6d6 2539 free_mad = 1;
1da177e4
LT
2540 goto local_send_completion;
2541 }
2542
2543 /*
2544 * Defined behavior is to complete response
2545 * before request
2546 */
062dbb69
MT
2547 build_smp_wc(recv_mad_agent->agent.qp,
2548 (unsigned long) local->mad_send_wr,
97f52eb4 2549 be16_to_cpu(IB_LID_PERMISSIVE),
34816ad9 2550 0, recv_mad_agent->agent.port_num, &wc);
1da177e4
LT
2551
2552 local->mad_priv->header.recv_wc.wc = &wc;
2553 local->mad_priv->header.recv_wc.mad_len =
2554 sizeof(struct ib_mad);
fa619a77
HR
2555 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2556 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2557 &local->mad_priv->header.recv_wc.rmpp_list);
1da177e4
LT
2558 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2559 local->mad_priv->header.recv_wc.recv_buf.mad =
c9082e51 2560 (struct ib_mad *)local->mad_priv->mad;
1da177e4
LT
2561 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2562 snoop_recv(recv_mad_agent->qp_info,
2563 &local->mad_priv->header.recv_wc,
2564 IB_MAD_SNOOP_RECVS);
2565 recv_mad_agent->agent.recv_handler(
2566 &recv_mad_agent->agent,
2567 &local->mad_priv->header.recv_wc);
2568 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2569 atomic_dec(&recv_mad_agent->refcount);
2570 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2571 }
2572
2573local_send_completion:
2574 /* Complete send */
2575 mad_send_wc.status = IB_WC_SUCCESS;
2576 mad_send_wc.vendor_err = 0;
34816ad9 2577 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
1da177e4 2578 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
34816ad9
SH
2579 snoop_send(mad_agent_priv->qp_info,
2580 &local->mad_send_wr->send_buf,
2581 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
1da177e4
LT
2582 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2583 &mad_send_wc);
2584
2585 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1da177e4 2586 atomic_dec(&mad_agent_priv->refcount);
1d9bc6d6 2587 if (free_mad)
c9082e51 2588 kfree(local->mad_priv);
1da177e4
LT
2589 kfree(local);
2590 }
2591 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2592}
2593
f75b7a52
HR
2594static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2595{
2596 int ret;
2597
4fc8cd49 2598 if (!mad_send_wr->retries_left)
f75b7a52
HR
2599 return -ETIMEDOUT;
2600
4fc8cd49
SH
2601 mad_send_wr->retries_left--;
2602 mad_send_wr->send_buf.retries++;
2603
34816ad9 2604 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
f75b7a52 2605
1471cb6c 2606 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
fa619a77
HR
2607 ret = ib_retry_rmpp(mad_send_wr);
2608 switch (ret) {
2609 case IB_RMPP_RESULT_UNHANDLED:
2610 ret = ib_send_mad(mad_send_wr);
2611 break;
2612 case IB_RMPP_RESULT_CONSUMED:
2613 ret = 0;
2614 break;
2615 default:
2616 ret = -ECOMM;
2617 break;
2618 }
2619 } else
2620 ret = ib_send_mad(mad_send_wr);
f75b7a52
HR
2621
2622 if (!ret) {
2623 mad_send_wr->refcount++;
f75b7a52
HR
2624 list_add_tail(&mad_send_wr->agent_list,
2625 &mad_send_wr->mad_agent_priv->send_list);
2626 }
2627 return ret;
2628}
2629
c4028958 2630static void timeout_sends(struct work_struct *work)
1da177e4
LT
2631{
2632 struct ib_mad_agent_private *mad_agent_priv;
2633 struct ib_mad_send_wr_private *mad_send_wr;
2634 struct ib_mad_send_wc mad_send_wc;
2635 unsigned long flags, delay;
2636
c4028958
DH
2637 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2638 timed_work.work);
1da177e4
LT
2639 mad_send_wc.vendor_err = 0;
2640
2641 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2642 while (!list_empty(&mad_agent_priv->wait_list)) {
2643 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2644 struct ib_mad_send_wr_private,
2645 agent_list);
2646
2647 if (time_after(mad_send_wr->timeout, jiffies)) {
2648 delay = mad_send_wr->timeout - jiffies;
2649 if ((long)delay <= 0)
2650 delay = 1;
2651 queue_delayed_work(mad_agent_priv->qp_info->
2652 port_priv->wq,
2653 &mad_agent_priv->timed_work, delay);
2654 break;
2655 }
2656
dbf9227b 2657 list_del(&mad_send_wr->agent_list);
29bb33dd
HR
2658 if (mad_send_wr->status == IB_WC_SUCCESS &&
2659 !retry_send(mad_send_wr))
f75b7a52
HR
2660 continue;
2661
1da177e4
LT
2662 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2663
03b61ad2
HR
2664 if (mad_send_wr->status == IB_WC_SUCCESS)
2665 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2666 else
2667 mad_send_wc.status = mad_send_wr->status;
34816ad9 2668 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1da177e4
LT
2669 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2670 &mad_send_wc);
2671
1da177e4
LT
2672 atomic_dec(&mad_agent_priv->refcount);
2673 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2674 }
2675 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2676}
2677
5dd2ce12 2678static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
1da177e4
LT
2679{
2680 struct ib_mad_port_private *port_priv = cq->cq_context;
dc05980d 2681 unsigned long flags;
1da177e4 2682
dc05980d
MT
2683 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2684 if (!list_empty(&port_priv->port_list))
2685 queue_work(port_priv->wq, &port_priv->work);
2686 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
1da177e4
LT
2687}
2688
2689/*
2690 * Allocate receive MADs and post receive WRs for them
2691 */
2692static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2693 struct ib_mad_private *mad)
2694{
2695 unsigned long flags;
2696 int post, ret;
2697 struct ib_mad_private *mad_priv;
2698 struct ib_sge sg_list;
2699 struct ib_recv_wr recv_wr, *bad_recv_wr;
2700 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2701
2702 /* Initialize common scatter list fields */
1da177e4
LT
2703 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2704
2705 /* Initialize common receive WR fields */
2706 recv_wr.next = NULL;
2707 recv_wr.sg_list = &sg_list;
2708 recv_wr.num_sge = 1;
2709
2710 do {
2711 /* Allocate and map receive buffer */
2712 if (mad) {
2713 mad_priv = mad;
2714 mad = NULL;
2715 } else {
c9082e51
IW
2716 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2717 GFP_ATOMIC);
1da177e4 2718 if (!mad_priv) {
7ef5d4b0
IW
2719 dev_err(&qp_info->port_priv->device->dev,
2720 "No memory for receive buffer\n");
1da177e4
LT
2721 ret = -ENOMEM;
2722 break;
2723 }
2724 }
c9082e51 2725 sg_list.length = mad_priv_dma_size(mad_priv);
1527106f
RC
2726 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2727 &mad_priv->grh,
c9082e51 2728 mad_priv_dma_size(mad_priv),
1527106f 2729 DMA_FROM_DEVICE);
2c34e68f
YB
2730 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2731 sg_list.addr))) {
2732 ret = -ENOMEM;
2733 break;
2734 }
1527106f 2735 mad_priv->header.mapping = sg_list.addr;
1da177e4
LT
2736 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2737 mad_priv->header.mad_list.mad_queue = recv_queue;
2738
2739 /* Post receive WR */
2740 spin_lock_irqsave(&recv_queue->lock, flags);
2741 post = (++recv_queue->count < recv_queue->max_active);
2742 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2743 spin_unlock_irqrestore(&recv_queue->lock, flags);
2744 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2745 if (ret) {
2746 spin_lock_irqsave(&recv_queue->lock, flags);
2747 list_del(&mad_priv->header.mad_list.list);
2748 recv_queue->count--;
2749 spin_unlock_irqrestore(&recv_queue->lock, flags);
1527106f
RC
2750 ib_dma_unmap_single(qp_info->port_priv->device,
2751 mad_priv->header.mapping,
c9082e51 2752 mad_priv_dma_size(mad_priv),
1527106f 2753 DMA_FROM_DEVICE);
c9082e51 2754 kfree(mad_priv);
7ef5d4b0
IW
2755 dev_err(&qp_info->port_priv->device->dev,
2756 "ib_post_recv failed: %d\n", ret);
1da177e4
LT
2757 break;
2758 }
2759 } while (post);
2760
2761 return ret;
2762}
2763
2764/*
2765 * Return all the posted receive MADs
2766 */
2767static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2768{
2769 struct ib_mad_private_header *mad_priv_hdr;
2770 struct ib_mad_private *recv;
2771 struct ib_mad_list_head *mad_list;
2772
fac70d51
EC
2773 if (!qp_info->qp)
2774 return;
2775
1da177e4
LT
2776 while (!list_empty(&qp_info->recv_queue.list)) {
2777
2778 mad_list = list_entry(qp_info->recv_queue.list.next,
2779 struct ib_mad_list_head, list);
2780 mad_priv_hdr = container_of(mad_list,
2781 struct ib_mad_private_header,
2782 mad_list);
2783 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2784 header);
2785
2786 /* Remove from posted receive MAD list */
2787 list_del(&mad_list->list);
2788
1527106f
RC
2789 ib_dma_unmap_single(qp_info->port_priv->device,
2790 recv->header.mapping,
c9082e51 2791 mad_priv_dma_size(recv),
1527106f 2792 DMA_FROM_DEVICE);
c9082e51 2793 kfree(recv);
1da177e4
LT
2794 }
2795
2796 qp_info->recv_queue.count = 0;
2797}
2798
2799/*
2800 * Start the port
2801 */
2802static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2803{
2804 int ret, i;
2805 struct ib_qp_attr *attr;
2806 struct ib_qp *qp;
ef5ed416 2807 u16 pkey_index;
1da177e4
LT
2808
2809 attr = kmalloc(sizeof *attr, GFP_KERNEL);
3cd96564 2810 if (!attr) {
7ef5d4b0
IW
2811 dev_err(&port_priv->device->dev,
2812 "Couldn't kmalloc ib_qp_attr\n");
1da177e4
LT
2813 return -ENOMEM;
2814 }
2815
ef5ed416
JM
2816 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2817 IB_DEFAULT_PKEY_FULL, &pkey_index);
2818 if (ret)
2819 pkey_index = 0;
2820
1da177e4
LT
2821 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2822 qp = port_priv->qp_info[i].qp;
fac70d51
EC
2823 if (!qp)
2824 continue;
2825
1da177e4
LT
2826 /*
2827 * PKey index for QP1 is irrelevant but
2828 * one is needed for the Reset to Init transition
2829 */
2830 attr->qp_state = IB_QPS_INIT;
ef5ed416 2831 attr->pkey_index = pkey_index;
1da177e4
LT
2832 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2833 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2834 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2835 if (ret) {
7ef5d4b0
IW
2836 dev_err(&port_priv->device->dev,
2837 "Couldn't change QP%d state to INIT: %d\n",
2838 i, ret);
1da177e4
LT
2839 goto out;
2840 }
2841
2842 attr->qp_state = IB_QPS_RTR;
2843 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2844 if (ret) {
7ef5d4b0
IW
2845 dev_err(&port_priv->device->dev,
2846 "Couldn't change QP%d state to RTR: %d\n",
2847 i, ret);
1da177e4
LT
2848 goto out;
2849 }
2850
2851 attr->qp_state = IB_QPS_RTS;
2852 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2853 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2854 if (ret) {
7ef5d4b0
IW
2855 dev_err(&port_priv->device->dev,
2856 "Couldn't change QP%d state to RTS: %d\n",
2857 i, ret);
1da177e4
LT
2858 goto out;
2859 }
2860 }
2861
2862 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2863 if (ret) {
7ef5d4b0
IW
2864 dev_err(&port_priv->device->dev,
2865 "Failed to request completion notification: %d\n",
2866 ret);
1da177e4
LT
2867 goto out;
2868 }
2869
2870 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
fac70d51
EC
2871 if (!port_priv->qp_info[i].qp)
2872 continue;
2873
1da177e4
LT
2874 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2875 if (ret) {
7ef5d4b0
IW
2876 dev_err(&port_priv->device->dev,
2877 "Couldn't post receive WRs\n");
1da177e4
LT
2878 goto out;
2879 }
2880 }
2881out:
2882 kfree(attr);
2883 return ret;
2884}
2885
2886static void qp_event_handler(struct ib_event *event, void *qp_context)
2887{
2888 struct ib_mad_qp_info *qp_info = qp_context;
2889
2890 /* It's worse than that! He's dead, Jim! */
7ef5d4b0
IW
2891 dev_err(&qp_info->port_priv->device->dev,
2892 "Fatal error (%d) on MAD QP (%d)\n",
1da177e4
LT
2893 event->event, qp_info->qp->qp_num);
2894}
2895
2896static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2897 struct ib_mad_queue *mad_queue)
2898{
2899 mad_queue->qp_info = qp_info;
2900 mad_queue->count = 0;
2901 spin_lock_init(&mad_queue->lock);
2902 INIT_LIST_HEAD(&mad_queue->list);
2903}
2904
2905static void init_mad_qp(struct ib_mad_port_private *port_priv,
2906 struct ib_mad_qp_info *qp_info)
2907{
2908 qp_info->port_priv = port_priv;
2909 init_mad_queue(qp_info, &qp_info->send_queue);
2910 init_mad_queue(qp_info, &qp_info->recv_queue);
2911 INIT_LIST_HEAD(&qp_info->overflow_list);
2912 spin_lock_init(&qp_info->snoop_lock);
2913 qp_info->snoop_table = NULL;
2914 qp_info->snoop_table_size = 0;
2915 atomic_set(&qp_info->snoop_count, 0);
2916}
2917
2918static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2919 enum ib_qp_type qp_type)
2920{
2921 struct ib_qp_init_attr qp_init_attr;
2922 int ret;
2923
2924 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2925 qp_init_attr.send_cq = qp_info->port_priv->cq;
2926 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2927 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
b76aabc3
HR
2928 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2929 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
1da177e4
LT
2930 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2931 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2932 qp_init_attr.qp_type = qp_type;
2933 qp_init_attr.port_num = qp_info->port_priv->port_num;
2934 qp_init_attr.qp_context = qp_info;
2935 qp_init_attr.event_handler = qp_event_handler;
2936 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2937 if (IS_ERR(qp_info->qp)) {
7ef5d4b0
IW
2938 dev_err(&qp_info->port_priv->device->dev,
2939 "Couldn't create ib_mad QP%d\n",
2940 get_spl_qp_index(qp_type));
1da177e4
LT
2941 ret = PTR_ERR(qp_info->qp);
2942 goto error;
2943 }
2944 /* Use minimum queue sizes unless the CQ is resized */
b76aabc3
HR
2945 qp_info->send_queue.max_active = mad_sendq_size;
2946 qp_info->recv_queue.max_active = mad_recvq_size;
1da177e4
LT
2947 return 0;
2948
2949error:
2950 return ret;
2951}
2952
2953static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2954{
fac70d51
EC
2955 if (!qp_info->qp)
2956 return;
2957
1da177e4 2958 ib_destroy_qp(qp_info->qp);
6044ec88 2959 kfree(qp_info->snoop_table);
1da177e4
LT
2960}
2961
2962/*
2963 * Open the port
2964 * Create the QP, PD, MR, and CQ if needed
2965 */
2966static int ib_mad_port_open(struct ib_device *device,
2967 int port_num)
2968{
2969 int ret, cq_size;
2970 struct ib_mad_port_private *port_priv;
2971 unsigned long flags;
2972 char name[sizeof "ib_mad123"];
fac70d51 2973 int has_smi;
8e37210b 2974 struct ib_cq_init_attr cq_attr = {};
1da177e4 2975
337877a4
IW
2976 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
2977 return -EFAULT;
2978
1da177e4 2979 /* Create new device info */
de6eb66b 2980 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
1da177e4 2981 if (!port_priv) {
7ef5d4b0 2982 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
1da177e4
LT
2983 return -ENOMEM;
2984 }
de6eb66b 2985
1da177e4
LT
2986 port_priv->device = device;
2987 port_priv->port_num = port_num;
2988 spin_lock_init(&port_priv->reg_lock);
2989 INIT_LIST_HEAD(&port_priv->agent_list);
2990 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2991 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2992
fac70d51 2993 cq_size = mad_sendq_size + mad_recvq_size;
29541e3a 2994 has_smi = rdma_cap_ib_smi(device, port_num);
fac70d51
EC
2995 if (has_smi)
2996 cq_size *= 2;
2997
8e37210b 2998 cq_attr.cqe = cq_size;
1da177e4 2999 port_priv->cq = ib_create_cq(port_priv->device,
5dd2ce12 3000 ib_mad_thread_completion_handler,
8e37210b 3001 NULL, port_priv, &cq_attr);
1da177e4 3002 if (IS_ERR(port_priv->cq)) {
7ef5d4b0 3003 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
1da177e4
LT
3004 ret = PTR_ERR(port_priv->cq);
3005 goto error3;
3006 }
3007
3008 port_priv->pd = ib_alloc_pd(device);
3009 if (IS_ERR(port_priv->pd)) {
7ef5d4b0 3010 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
1da177e4
LT
3011 ret = PTR_ERR(port_priv->pd);
3012 goto error4;
3013 }
3014
3015 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
3016 if (IS_ERR(port_priv->mr)) {
7ef5d4b0 3017 dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
1da177e4
LT
3018 ret = PTR_ERR(port_priv->mr);
3019 goto error5;
3020 }
3021
fac70d51
EC
3022 if (has_smi) {
3023 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3024 if (ret)
3025 goto error6;
3026 }
1da177e4
LT
3027 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3028 if (ret)
3029 goto error7;
3030
3031 snprintf(name, sizeof name, "ib_mad%d", port_num);
3032 port_priv->wq = create_singlethread_workqueue(name);
3033 if (!port_priv->wq) {
3034 ret = -ENOMEM;
3035 goto error8;
3036 }
c4028958 3037 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
1da177e4 3038
dc05980d
MT
3039 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3040 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3041 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3042
1da177e4
LT
3043 ret = ib_mad_port_start(port_priv);
3044 if (ret) {
7ef5d4b0 3045 dev_err(&device->dev, "Couldn't start port\n");
1da177e4
LT
3046 goto error9;
3047 }
3048
1da177e4
LT
3049 return 0;
3050
3051error9:
dc05980d
MT
3052 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3053 list_del_init(&port_priv->port_list);
3054 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3055
1da177e4
LT
3056 destroy_workqueue(port_priv->wq);
3057error8:
3058 destroy_mad_qp(&port_priv->qp_info[1]);
3059error7:
3060 destroy_mad_qp(&port_priv->qp_info[0]);
3061error6:
3062 ib_dereg_mr(port_priv->mr);
3063error5:
3064 ib_dealloc_pd(port_priv->pd);
3065error4:
3066 ib_destroy_cq(port_priv->cq);
3067 cleanup_recv_queue(&port_priv->qp_info[1]);
3068 cleanup_recv_queue(&port_priv->qp_info[0]);
3069error3:
3070 kfree(port_priv);
3071
3072 return ret;
3073}
3074
3075/*
3076 * Close the port
3077 * If there are no classes using the port, free the port
3078 * resources (CQ, MR, PD, QP) and remove the port's info structure
3079 */
3080static int ib_mad_port_close(struct ib_device *device, int port_num)
3081{
3082 struct ib_mad_port_private *port_priv;
3083 unsigned long flags;
3084
3085 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3086 port_priv = __ib_get_mad_port(device, port_num);
3087 if (port_priv == NULL) {
3088 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
7ef5d4b0 3089 dev_err(&device->dev, "Port %d not found\n", port_num);
1da177e4
LT
3090 return -ENODEV;
3091 }
dc05980d 3092 list_del_init(&port_priv->port_list);
1da177e4
LT
3093 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3094
1da177e4
LT
3095 destroy_workqueue(port_priv->wq);
3096 destroy_mad_qp(&port_priv->qp_info[1]);
3097 destroy_mad_qp(&port_priv->qp_info[0]);
3098 ib_dereg_mr(port_priv->mr);
3099 ib_dealloc_pd(port_priv->pd);
3100 ib_destroy_cq(port_priv->cq);
3101 cleanup_recv_queue(&port_priv->qp_info[1]);
3102 cleanup_recv_queue(&port_priv->qp_info[0]);
3103 /* XXX: Handle deallocation of MAD registration tables */
3104
3105 kfree(port_priv);
3106
3107 return 0;
3108}
3109
3110static void ib_mad_init_device(struct ib_device *device)
3111{
4ab6fb7e 3112 int start, end, i;
1da177e4 3113
07ebafba 3114 if (device->node_type == RDMA_NODE_IB_SWITCH) {
4ab6fb7e
RD
3115 start = 0;
3116 end = 0;
1da177e4 3117 } else {
4ab6fb7e
RD
3118 start = 1;
3119 end = device->phys_port_cnt;
1da177e4 3120 }
4ab6fb7e
RD
3121
3122 for (i = start; i <= end; i++) {
c757dea8 3123 if (!rdma_cap_ib_mad(device, i))
827f2a8b
MW
3124 continue;
3125
4ab6fb7e 3126 if (ib_mad_port_open(device, i)) {
7ef5d4b0 3127 dev_err(&device->dev, "Couldn't open port %d\n", i);
4ab6fb7e 3128 goto error;
1da177e4 3129 }
4ab6fb7e 3130 if (ib_agent_port_open(device, i)) {
7ef5d4b0
IW
3131 dev_err(&device->dev,
3132 "Couldn't open port %d for agents\n", i);
4ab6fb7e 3133 goto error_agent;
1da177e4
LT
3134 }
3135 }
f68bcc2d 3136 return;
1da177e4 3137
4ab6fb7e
RD
3138error_agent:
3139 if (ib_mad_port_close(device, i))
7ef5d4b0 3140 dev_err(&device->dev, "Couldn't close port %d\n", i);
4ab6fb7e
RD
3141
3142error:
827f2a8b 3143 while (--i >= start) {
c757dea8 3144 if (!rdma_cap_ib_mad(device, i))
827f2a8b 3145 continue;
4ab6fb7e 3146
4ab6fb7e 3147 if (ib_agent_port_close(device, i))
7ef5d4b0
IW
3148 dev_err(&device->dev,
3149 "Couldn't close port %d for agents\n", i);
4ab6fb7e 3150 if (ib_mad_port_close(device, i))
7ef5d4b0 3151 dev_err(&device->dev, "Couldn't close port %d\n", i);
1da177e4 3152 }
1da177e4
LT
3153}
3154
3155static void ib_mad_remove_device(struct ib_device *device)
3156{
827f2a8b 3157 int start, end, i;
070e140c 3158
07ebafba 3159 if (device->node_type == RDMA_NODE_IB_SWITCH) {
827f2a8b
MW
3160 start = 0;
3161 end = 0;
1da177e4 3162 } else {
827f2a8b
MW
3163 start = 1;
3164 end = device->phys_port_cnt;
1da177e4 3165 }
827f2a8b
MW
3166
3167 for (i = start; i <= end; i++) {
c757dea8 3168 if (!rdma_cap_ib_mad(device, i))
827f2a8b
MW
3169 continue;
3170
3171 if (ib_agent_port_close(device, i))
7ef5d4b0 3172 dev_err(&device->dev,
827f2a8b
MW
3173 "Couldn't close port %d for agents\n", i);
3174 if (ib_mad_port_close(device, i))
3175 dev_err(&device->dev, "Couldn't close port %d\n", i);
1da177e4
LT
3176 }
3177}
3178
3179static struct ib_client mad_client = {
3180 .name = "mad",
3181 .add = ib_mad_init_device,
3182 .remove = ib_mad_remove_device
3183};
3184
3185static int __init ib_mad_init_module(void)
3186{
b76aabc3
HR
3187 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3188 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3189
3190 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3191 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3192
1da177e4
LT
3193 INIT_LIST_HEAD(&ib_mad_port_list);
3194
3195 if (ib_register_client(&mad_client)) {
7ef5d4b0 3196 pr_err("Couldn't register ib_mad client\n");
c9082e51 3197 return -EINVAL;
1da177e4
LT
3198 }
3199
3200 return 0;
1da177e4
LT
3201}
3202
3203static void __exit ib_mad_cleanup_module(void)
3204{
3205 ib_unregister_client(&mad_client);
1da177e4
LT
3206}
3207
3208module_init(ib_mad_init_module);
3209module_exit(ib_mad_cleanup_module);