]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/infiniband/core/mad.c
IB/mad: Convert allocations from kmem_cache to kzalloc
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / core / mad.c
1 /*
2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 */
36
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/module.h>
42 #include <rdma/ib_cache.h>
43
44 #include "mad_priv.h"
45 #include "mad_rmpp.h"
46 #include "smi.h"
47 #include "agent.h"
48
49 MODULE_LICENSE("Dual BSD/GPL");
50 MODULE_DESCRIPTION("kernel IB MAD API");
51 MODULE_AUTHOR("Hal Rosenstock");
52 MODULE_AUTHOR("Sean Hefty");
53
54 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
55 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
56
57 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
58 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
59 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
60 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
61
62 static struct list_head ib_mad_port_list;
63 static u32 ib_mad_client_id = 0;
64
65 /* Port list lock */
66 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
67
68 /* Forward declarations */
69 static int method_in_use(struct ib_mad_mgmt_method_table **method,
70 struct ib_mad_reg_req *mad_reg_req);
71 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
72 static struct ib_mad_agent_private *find_mad_agent(
73 struct ib_mad_port_private *port_priv,
74 const struct ib_mad_hdr *mad);
75 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
76 struct ib_mad_private *mad);
77 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
78 static void timeout_sends(struct work_struct *work);
79 static void local_completions(struct work_struct *work);
80 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
81 struct ib_mad_agent_private *agent_priv,
82 u8 mgmt_class);
83 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
84 struct ib_mad_agent_private *agent_priv);
85
86 /*
87 * Returns a ib_mad_port_private structure or NULL for a device/port
88 * Assumes ib_mad_port_list_lock is being held
89 */
90 static inline struct ib_mad_port_private *
91 __ib_get_mad_port(struct ib_device *device, int port_num)
92 {
93 struct ib_mad_port_private *entry;
94
95 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
96 if (entry->device == device && entry->port_num == port_num)
97 return entry;
98 }
99 return NULL;
100 }
101
102 /*
103 * Wrapper function to return a ib_mad_port_private structure or NULL
104 * for a device/port
105 */
106 static inline struct ib_mad_port_private *
107 ib_get_mad_port(struct ib_device *device, int port_num)
108 {
109 struct ib_mad_port_private *entry;
110 unsigned long flags;
111
112 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
113 entry = __ib_get_mad_port(device, port_num);
114 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
115
116 return entry;
117 }
118
119 static inline u8 convert_mgmt_class(u8 mgmt_class)
120 {
121 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
122 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
123 0 : mgmt_class;
124 }
125
126 static int get_spl_qp_index(enum ib_qp_type qp_type)
127 {
128 switch (qp_type)
129 {
130 case IB_QPT_SMI:
131 return 0;
132 case IB_QPT_GSI:
133 return 1;
134 default:
135 return -1;
136 }
137 }
138
139 static int vendor_class_index(u8 mgmt_class)
140 {
141 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
142 }
143
144 static int is_vendor_class(u8 mgmt_class)
145 {
146 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
147 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
148 return 0;
149 return 1;
150 }
151
152 static int is_vendor_oui(char *oui)
153 {
154 if (oui[0] || oui[1] || oui[2])
155 return 1;
156 return 0;
157 }
158
159 static int is_vendor_method_in_use(
160 struct ib_mad_mgmt_vendor_class *vendor_class,
161 struct ib_mad_reg_req *mad_reg_req)
162 {
163 struct ib_mad_mgmt_method_table *method;
164 int i;
165
166 for (i = 0; i < MAX_MGMT_OUI; i++) {
167 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
168 method = vendor_class->method_table[i];
169 if (method) {
170 if (method_in_use(&method, mad_reg_req))
171 return 1;
172 else
173 break;
174 }
175 }
176 }
177 return 0;
178 }
179
180 int ib_response_mad(const struct ib_mad_hdr *hdr)
181 {
182 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
183 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
184 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
185 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
186 }
187 EXPORT_SYMBOL(ib_response_mad);
188
189 /*
190 * ib_register_mad_agent - Register to send/receive MADs
191 */
192 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
193 u8 port_num,
194 enum ib_qp_type qp_type,
195 struct ib_mad_reg_req *mad_reg_req,
196 u8 rmpp_version,
197 ib_mad_send_handler send_handler,
198 ib_mad_recv_handler recv_handler,
199 void *context,
200 u32 registration_flags)
201 {
202 struct ib_mad_port_private *port_priv;
203 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
204 struct ib_mad_agent_private *mad_agent_priv;
205 struct ib_mad_reg_req *reg_req = NULL;
206 struct ib_mad_mgmt_class_table *class;
207 struct ib_mad_mgmt_vendor_class_table *vendor;
208 struct ib_mad_mgmt_vendor_class *vendor_class;
209 struct ib_mad_mgmt_method_table *method;
210 int ret2, qpn;
211 unsigned long flags;
212 u8 mgmt_class, vclass;
213
214 /* Validate parameters */
215 qpn = get_spl_qp_index(qp_type);
216 if (qpn == -1) {
217 dev_notice(&device->dev,
218 "ib_register_mad_agent: invalid QP Type %d\n",
219 qp_type);
220 goto error1;
221 }
222
223 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
224 dev_notice(&device->dev,
225 "ib_register_mad_agent: invalid RMPP Version %u\n",
226 rmpp_version);
227 goto error1;
228 }
229
230 /* Validate MAD registration request if supplied */
231 if (mad_reg_req) {
232 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
233 dev_notice(&device->dev,
234 "ib_register_mad_agent: invalid Class Version %u\n",
235 mad_reg_req->mgmt_class_version);
236 goto error1;
237 }
238 if (!recv_handler) {
239 dev_notice(&device->dev,
240 "ib_register_mad_agent: no recv_handler\n");
241 goto error1;
242 }
243 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
244 /*
245 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
246 * one in this range currently allowed
247 */
248 if (mad_reg_req->mgmt_class !=
249 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
250 dev_notice(&device->dev,
251 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
252 mad_reg_req->mgmt_class);
253 goto error1;
254 }
255 } else if (mad_reg_req->mgmt_class == 0) {
256 /*
257 * Class 0 is reserved in IBA and is used for
258 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
259 */
260 dev_notice(&device->dev,
261 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
262 goto error1;
263 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
264 /*
265 * If class is in "new" vendor range,
266 * ensure supplied OUI is not zero
267 */
268 if (!is_vendor_oui(mad_reg_req->oui)) {
269 dev_notice(&device->dev,
270 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
271 mad_reg_req->mgmt_class);
272 goto error1;
273 }
274 }
275 /* Make sure class supplied is consistent with RMPP */
276 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
277 if (rmpp_version) {
278 dev_notice(&device->dev,
279 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
280 mad_reg_req->mgmt_class);
281 goto error1;
282 }
283 }
284
285 /* Make sure class supplied is consistent with QP type */
286 if (qp_type == IB_QPT_SMI) {
287 if ((mad_reg_req->mgmt_class !=
288 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
289 (mad_reg_req->mgmt_class !=
290 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
291 dev_notice(&device->dev,
292 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
293 mad_reg_req->mgmt_class);
294 goto error1;
295 }
296 } else {
297 if ((mad_reg_req->mgmt_class ==
298 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
299 (mad_reg_req->mgmt_class ==
300 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
301 dev_notice(&device->dev,
302 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
303 mad_reg_req->mgmt_class);
304 goto error1;
305 }
306 }
307 } else {
308 /* No registration request supplied */
309 if (!send_handler)
310 goto error1;
311 if (registration_flags & IB_MAD_USER_RMPP)
312 goto error1;
313 }
314
315 /* Validate device and port */
316 port_priv = ib_get_mad_port(device, port_num);
317 if (!port_priv) {
318 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
319 ret = ERR_PTR(-ENODEV);
320 goto error1;
321 }
322
323 /* Verify the QP requested is supported. For example, Ethernet devices
324 * will not have QP0 */
325 if (!port_priv->qp_info[qpn].qp) {
326 dev_notice(&device->dev,
327 "ib_register_mad_agent: QP %d not supported\n", qpn);
328 ret = ERR_PTR(-EPROTONOSUPPORT);
329 goto error1;
330 }
331
332 /* Allocate structures */
333 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
334 if (!mad_agent_priv) {
335 ret = ERR_PTR(-ENOMEM);
336 goto error1;
337 }
338
339 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
340 IB_ACCESS_LOCAL_WRITE);
341 if (IS_ERR(mad_agent_priv->agent.mr)) {
342 ret = ERR_PTR(-ENOMEM);
343 goto error2;
344 }
345
346 if (mad_reg_req) {
347 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
348 if (!reg_req) {
349 ret = ERR_PTR(-ENOMEM);
350 goto error3;
351 }
352 }
353
354 /* Now, fill in the various structures */
355 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
356 mad_agent_priv->reg_req = reg_req;
357 mad_agent_priv->agent.rmpp_version = rmpp_version;
358 mad_agent_priv->agent.device = device;
359 mad_agent_priv->agent.recv_handler = recv_handler;
360 mad_agent_priv->agent.send_handler = send_handler;
361 mad_agent_priv->agent.context = context;
362 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
363 mad_agent_priv->agent.port_num = port_num;
364 mad_agent_priv->agent.flags = registration_flags;
365 spin_lock_init(&mad_agent_priv->lock);
366 INIT_LIST_HEAD(&mad_agent_priv->send_list);
367 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
368 INIT_LIST_HEAD(&mad_agent_priv->done_list);
369 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
370 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
371 INIT_LIST_HEAD(&mad_agent_priv->local_list);
372 INIT_WORK(&mad_agent_priv->local_work, local_completions);
373 atomic_set(&mad_agent_priv->refcount, 1);
374 init_completion(&mad_agent_priv->comp);
375
376 spin_lock_irqsave(&port_priv->reg_lock, flags);
377 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
378
379 /*
380 * Make sure MAD registration (if supplied)
381 * is non overlapping with any existing ones
382 */
383 if (mad_reg_req) {
384 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
385 if (!is_vendor_class(mgmt_class)) {
386 class = port_priv->version[mad_reg_req->
387 mgmt_class_version].class;
388 if (class) {
389 method = class->method_table[mgmt_class];
390 if (method) {
391 if (method_in_use(&method,
392 mad_reg_req))
393 goto error4;
394 }
395 }
396 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
397 mgmt_class);
398 } else {
399 /* "New" vendor class range */
400 vendor = port_priv->version[mad_reg_req->
401 mgmt_class_version].vendor;
402 if (vendor) {
403 vclass = vendor_class_index(mgmt_class);
404 vendor_class = vendor->vendor_class[vclass];
405 if (vendor_class) {
406 if (is_vendor_method_in_use(
407 vendor_class,
408 mad_reg_req))
409 goto error4;
410 }
411 }
412 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
413 }
414 if (ret2) {
415 ret = ERR_PTR(ret2);
416 goto error4;
417 }
418 }
419
420 /* Add mad agent into port's agent list */
421 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
422 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
423
424 return &mad_agent_priv->agent;
425
426 error4:
427 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
428 kfree(reg_req);
429 error3:
430 ib_dereg_mr(mad_agent_priv->agent.mr);
431 error2:
432 kfree(mad_agent_priv);
433 error1:
434 return ret;
435 }
436 EXPORT_SYMBOL(ib_register_mad_agent);
437
438 static inline int is_snooping_sends(int mad_snoop_flags)
439 {
440 return (mad_snoop_flags &
441 (/*IB_MAD_SNOOP_POSTED_SENDS |
442 IB_MAD_SNOOP_RMPP_SENDS |*/
443 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
444 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
445 }
446
447 static inline int is_snooping_recvs(int mad_snoop_flags)
448 {
449 return (mad_snoop_flags &
450 (IB_MAD_SNOOP_RECVS /*|
451 IB_MAD_SNOOP_RMPP_RECVS*/));
452 }
453
454 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
455 struct ib_mad_snoop_private *mad_snoop_priv)
456 {
457 struct ib_mad_snoop_private **new_snoop_table;
458 unsigned long flags;
459 int i;
460
461 spin_lock_irqsave(&qp_info->snoop_lock, flags);
462 /* Check for empty slot in array. */
463 for (i = 0; i < qp_info->snoop_table_size; i++)
464 if (!qp_info->snoop_table[i])
465 break;
466
467 if (i == qp_info->snoop_table_size) {
468 /* Grow table. */
469 new_snoop_table = krealloc(qp_info->snoop_table,
470 sizeof mad_snoop_priv *
471 (qp_info->snoop_table_size + 1),
472 GFP_ATOMIC);
473 if (!new_snoop_table) {
474 i = -ENOMEM;
475 goto out;
476 }
477
478 qp_info->snoop_table = new_snoop_table;
479 qp_info->snoop_table_size++;
480 }
481 qp_info->snoop_table[i] = mad_snoop_priv;
482 atomic_inc(&qp_info->snoop_count);
483 out:
484 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
485 return i;
486 }
487
488 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
489 u8 port_num,
490 enum ib_qp_type qp_type,
491 int mad_snoop_flags,
492 ib_mad_snoop_handler snoop_handler,
493 ib_mad_recv_handler recv_handler,
494 void *context)
495 {
496 struct ib_mad_port_private *port_priv;
497 struct ib_mad_agent *ret;
498 struct ib_mad_snoop_private *mad_snoop_priv;
499 int qpn;
500
501 /* Validate parameters */
502 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
503 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
504 ret = ERR_PTR(-EINVAL);
505 goto error1;
506 }
507 qpn = get_spl_qp_index(qp_type);
508 if (qpn == -1) {
509 ret = ERR_PTR(-EINVAL);
510 goto error1;
511 }
512 port_priv = ib_get_mad_port(device, port_num);
513 if (!port_priv) {
514 ret = ERR_PTR(-ENODEV);
515 goto error1;
516 }
517 /* Allocate structures */
518 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
519 if (!mad_snoop_priv) {
520 ret = ERR_PTR(-ENOMEM);
521 goto error1;
522 }
523
524 /* Now, fill in the various structures */
525 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
526 mad_snoop_priv->agent.device = device;
527 mad_snoop_priv->agent.recv_handler = recv_handler;
528 mad_snoop_priv->agent.snoop_handler = snoop_handler;
529 mad_snoop_priv->agent.context = context;
530 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
531 mad_snoop_priv->agent.port_num = port_num;
532 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
533 init_completion(&mad_snoop_priv->comp);
534 mad_snoop_priv->snoop_index = register_snoop_agent(
535 &port_priv->qp_info[qpn],
536 mad_snoop_priv);
537 if (mad_snoop_priv->snoop_index < 0) {
538 ret = ERR_PTR(mad_snoop_priv->snoop_index);
539 goto error2;
540 }
541
542 atomic_set(&mad_snoop_priv->refcount, 1);
543 return &mad_snoop_priv->agent;
544
545 error2:
546 kfree(mad_snoop_priv);
547 error1:
548 return ret;
549 }
550 EXPORT_SYMBOL(ib_register_mad_snoop);
551
552 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
553 {
554 if (atomic_dec_and_test(&mad_agent_priv->refcount))
555 complete(&mad_agent_priv->comp);
556 }
557
558 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
559 {
560 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
561 complete(&mad_snoop_priv->comp);
562 }
563
564 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
565 {
566 struct ib_mad_port_private *port_priv;
567 unsigned long flags;
568
569 /* Note that we could still be handling received MADs */
570
571 /*
572 * Canceling all sends results in dropping received response
573 * MADs, preventing us from queuing additional work
574 */
575 cancel_mads(mad_agent_priv);
576 port_priv = mad_agent_priv->qp_info->port_priv;
577 cancel_delayed_work(&mad_agent_priv->timed_work);
578
579 spin_lock_irqsave(&port_priv->reg_lock, flags);
580 remove_mad_reg_req(mad_agent_priv);
581 list_del(&mad_agent_priv->agent_list);
582 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
583
584 flush_workqueue(port_priv->wq);
585 ib_cancel_rmpp_recvs(mad_agent_priv);
586
587 deref_mad_agent(mad_agent_priv);
588 wait_for_completion(&mad_agent_priv->comp);
589
590 kfree(mad_agent_priv->reg_req);
591 ib_dereg_mr(mad_agent_priv->agent.mr);
592 kfree(mad_agent_priv);
593 }
594
595 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
596 {
597 struct ib_mad_qp_info *qp_info;
598 unsigned long flags;
599
600 qp_info = mad_snoop_priv->qp_info;
601 spin_lock_irqsave(&qp_info->snoop_lock, flags);
602 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
603 atomic_dec(&qp_info->snoop_count);
604 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
605
606 deref_snoop_agent(mad_snoop_priv);
607 wait_for_completion(&mad_snoop_priv->comp);
608
609 kfree(mad_snoop_priv);
610 }
611
612 /*
613 * ib_unregister_mad_agent - Unregisters a client from using MAD services
614 */
615 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
616 {
617 struct ib_mad_agent_private *mad_agent_priv;
618 struct ib_mad_snoop_private *mad_snoop_priv;
619
620 /* If the TID is zero, the agent can only snoop. */
621 if (mad_agent->hi_tid) {
622 mad_agent_priv = container_of(mad_agent,
623 struct ib_mad_agent_private,
624 agent);
625 unregister_mad_agent(mad_agent_priv);
626 } else {
627 mad_snoop_priv = container_of(mad_agent,
628 struct ib_mad_snoop_private,
629 agent);
630 unregister_mad_snoop(mad_snoop_priv);
631 }
632 return 0;
633 }
634 EXPORT_SYMBOL(ib_unregister_mad_agent);
635
636 static void dequeue_mad(struct ib_mad_list_head *mad_list)
637 {
638 struct ib_mad_queue *mad_queue;
639 unsigned long flags;
640
641 BUG_ON(!mad_list->mad_queue);
642 mad_queue = mad_list->mad_queue;
643 spin_lock_irqsave(&mad_queue->lock, flags);
644 list_del(&mad_list->list);
645 mad_queue->count--;
646 spin_unlock_irqrestore(&mad_queue->lock, flags);
647 }
648
649 static void snoop_send(struct ib_mad_qp_info *qp_info,
650 struct ib_mad_send_buf *send_buf,
651 struct ib_mad_send_wc *mad_send_wc,
652 int mad_snoop_flags)
653 {
654 struct ib_mad_snoop_private *mad_snoop_priv;
655 unsigned long flags;
656 int i;
657
658 spin_lock_irqsave(&qp_info->snoop_lock, flags);
659 for (i = 0; i < qp_info->snoop_table_size; i++) {
660 mad_snoop_priv = qp_info->snoop_table[i];
661 if (!mad_snoop_priv ||
662 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
663 continue;
664
665 atomic_inc(&mad_snoop_priv->refcount);
666 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
667 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
668 send_buf, mad_send_wc);
669 deref_snoop_agent(mad_snoop_priv);
670 spin_lock_irqsave(&qp_info->snoop_lock, flags);
671 }
672 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
673 }
674
675 static void snoop_recv(struct ib_mad_qp_info *qp_info,
676 struct ib_mad_recv_wc *mad_recv_wc,
677 int mad_snoop_flags)
678 {
679 struct ib_mad_snoop_private *mad_snoop_priv;
680 unsigned long flags;
681 int i;
682
683 spin_lock_irqsave(&qp_info->snoop_lock, flags);
684 for (i = 0; i < qp_info->snoop_table_size; i++) {
685 mad_snoop_priv = qp_info->snoop_table[i];
686 if (!mad_snoop_priv ||
687 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
688 continue;
689
690 atomic_inc(&mad_snoop_priv->refcount);
691 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
692 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
693 mad_recv_wc);
694 deref_snoop_agent(mad_snoop_priv);
695 spin_lock_irqsave(&qp_info->snoop_lock, flags);
696 }
697 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
698 }
699
700 static void build_smp_wc(struct ib_qp *qp,
701 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
702 struct ib_wc *wc)
703 {
704 memset(wc, 0, sizeof *wc);
705 wc->wr_id = wr_id;
706 wc->status = IB_WC_SUCCESS;
707 wc->opcode = IB_WC_RECV;
708 wc->pkey_index = pkey_index;
709 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
710 wc->src_qp = IB_QP0;
711 wc->qp = qp;
712 wc->slid = slid;
713 wc->sl = 0;
714 wc->dlid_path_bits = 0;
715 wc->port_num = port_num;
716 }
717
718 static size_t mad_priv_size(const struct ib_mad_private *mp)
719 {
720 return sizeof(struct ib_mad_private) + mp->mad_size;
721 }
722
723 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
724 {
725 size_t size = sizeof(struct ib_mad_private) + mad_size;
726 struct ib_mad_private *ret = kzalloc(size, flags);
727
728 if (ret)
729 ret->mad_size = mad_size;
730
731 return ret;
732 }
733
734 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
735 {
736 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
737 }
738
739 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
740 {
741 return sizeof(struct ib_grh) + mp->mad_size;
742 }
743
744 /*
745 * Return 0 if SMP is to be sent
746 * Return 1 if SMP was consumed locally (whether or not solicited)
747 * Return < 0 if error
748 */
749 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
750 struct ib_mad_send_wr_private *mad_send_wr)
751 {
752 int ret = 0;
753 struct ib_smp *smp = mad_send_wr->send_buf.mad;
754 unsigned long flags;
755 struct ib_mad_local_private *local;
756 struct ib_mad_private *mad_priv;
757 struct ib_mad_port_private *port_priv;
758 struct ib_mad_agent_private *recv_mad_agent = NULL;
759 struct ib_device *device = mad_agent_priv->agent.device;
760 u8 port_num;
761 struct ib_wc mad_wc;
762 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
763 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
764
765 if (device->node_type == RDMA_NODE_IB_SWITCH &&
766 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
767 port_num = send_wr->wr.ud.port_num;
768 else
769 port_num = mad_agent_priv->agent.port_num;
770
771 /*
772 * Directed route handling starts if the initial LID routed part of
773 * a request or the ending LID routed part of a response is empty.
774 * If we are at the start of the LID routed part, don't update the
775 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
776 */
777 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
778 IB_LID_PERMISSIVE &&
779 smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
780 IB_SMI_DISCARD) {
781 ret = -EINVAL;
782 dev_err(&device->dev, "Invalid directed route\n");
783 goto out;
784 }
785
786 /* Check to post send on QP or process locally */
787 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
788 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
789 goto out;
790
791 local = kmalloc(sizeof *local, GFP_ATOMIC);
792 if (!local) {
793 ret = -ENOMEM;
794 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
795 goto out;
796 }
797 local->mad_priv = NULL;
798 local->recv_mad_agent = NULL;
799 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
800 if (!mad_priv) {
801 ret = -ENOMEM;
802 dev_err(&device->dev, "No memory for local response MAD\n");
803 kfree(local);
804 goto out;
805 }
806
807 build_smp_wc(mad_agent_priv->agent.qp,
808 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
809 send_wr->wr.ud.pkey_index,
810 send_wr->wr.ud.port_num, &mad_wc);
811
812 /* No GRH for DR SMP */
813 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
814 (const struct ib_mad *)smp,
815 (struct ib_mad *)mad_priv->mad);
816 switch (ret)
817 {
818 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
819 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
820 mad_agent_priv->agent.recv_handler) {
821 local->mad_priv = mad_priv;
822 local->recv_mad_agent = mad_agent_priv;
823 /*
824 * Reference MAD agent until receive
825 * side of local completion handled
826 */
827 atomic_inc(&mad_agent_priv->refcount);
828 } else
829 kfree(mad_priv);
830 break;
831 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
832 kfree(mad_priv);
833 break;
834 case IB_MAD_RESULT_SUCCESS:
835 /* Treat like an incoming receive MAD */
836 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
837 mad_agent_priv->agent.port_num);
838 if (port_priv) {
839 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
840 recv_mad_agent = find_mad_agent(port_priv,
841 (const struct ib_mad_hdr *)mad_priv->mad);
842 }
843 if (!port_priv || !recv_mad_agent) {
844 /*
845 * No receiving agent so drop packet and
846 * generate send completion.
847 */
848 kfree(mad_priv);
849 break;
850 }
851 local->mad_priv = mad_priv;
852 local->recv_mad_agent = recv_mad_agent;
853 break;
854 default:
855 kfree(mad_priv);
856 kfree(local);
857 ret = -EINVAL;
858 goto out;
859 }
860
861 local->mad_send_wr = mad_send_wr;
862 /* Reference MAD agent until send side of local completion handled */
863 atomic_inc(&mad_agent_priv->refcount);
864 /* Queue local completion to local list */
865 spin_lock_irqsave(&mad_agent_priv->lock, flags);
866 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
867 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
868 queue_work(mad_agent_priv->qp_info->port_priv->wq,
869 &mad_agent_priv->local_work);
870 ret = 1;
871 out:
872 return ret;
873 }
874
875 static int get_pad_size(int hdr_len, int data_len)
876 {
877 int seg_size, pad;
878
879 seg_size = sizeof(struct ib_mad) - hdr_len;
880 if (data_len && seg_size) {
881 pad = seg_size - data_len % seg_size;
882 return pad == seg_size ? 0 : pad;
883 } else
884 return seg_size;
885 }
886
887 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
888 {
889 struct ib_rmpp_segment *s, *t;
890
891 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
892 list_del(&s->list);
893 kfree(s);
894 }
895 }
896
897 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
898 gfp_t gfp_mask)
899 {
900 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
901 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
902 struct ib_rmpp_segment *seg = NULL;
903 int left, seg_size, pad;
904
905 send_buf->seg_size = sizeof(struct ib_mad) - send_buf->hdr_len;
906 seg_size = send_buf->seg_size;
907 pad = send_wr->pad;
908
909 /* Allocate data segments. */
910 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
911 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
912 if (!seg) {
913 dev_err(&send_buf->mad_agent->device->dev,
914 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
915 sizeof (*seg) + seg_size, gfp_mask);
916 free_send_rmpp_list(send_wr);
917 return -ENOMEM;
918 }
919 seg->num = ++send_buf->seg_count;
920 list_add_tail(&seg->list, &send_wr->rmpp_list);
921 }
922
923 /* Zero any padding */
924 if (pad)
925 memset(seg->data + seg_size - pad, 0, pad);
926
927 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
928 agent.rmpp_version;
929 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
930 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
931
932 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
933 struct ib_rmpp_segment, list);
934 send_wr->last_ack_seg = send_wr->cur_seg;
935 return 0;
936 }
937
938 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
939 {
940 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
941 }
942 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
943
944 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
945 u32 remote_qpn, u16 pkey_index,
946 int rmpp_active,
947 int hdr_len, int data_len,
948 gfp_t gfp_mask,
949 u8 base_version)
950 {
951 struct ib_mad_agent_private *mad_agent_priv;
952 struct ib_mad_send_wr_private *mad_send_wr;
953 int pad, message_size, ret, size;
954 void *buf;
955
956 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
957 agent);
958 pad = get_pad_size(hdr_len, data_len);
959 message_size = hdr_len + data_len + pad;
960
961 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
962 if (!rmpp_active && message_size > sizeof(struct ib_mad))
963 return ERR_PTR(-EINVAL);
964 } else
965 if (rmpp_active || message_size > sizeof(struct ib_mad))
966 return ERR_PTR(-EINVAL);
967
968 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
969 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
970 if (!buf)
971 return ERR_PTR(-ENOMEM);
972
973 mad_send_wr = buf + size;
974 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
975 mad_send_wr->send_buf.mad = buf;
976 mad_send_wr->send_buf.hdr_len = hdr_len;
977 mad_send_wr->send_buf.data_len = data_len;
978 mad_send_wr->pad = pad;
979
980 mad_send_wr->mad_agent_priv = mad_agent_priv;
981 mad_send_wr->sg_list[0].length = hdr_len;
982 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
983 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
984 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
985
986 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
987 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
988 mad_send_wr->send_wr.num_sge = 2;
989 mad_send_wr->send_wr.opcode = IB_WR_SEND;
990 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
991 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
992 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
993 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
994
995 if (rmpp_active) {
996 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
997 if (ret) {
998 kfree(buf);
999 return ERR_PTR(ret);
1000 }
1001 }
1002
1003 mad_send_wr->send_buf.mad_agent = mad_agent;
1004 atomic_inc(&mad_agent_priv->refcount);
1005 return &mad_send_wr->send_buf;
1006 }
1007 EXPORT_SYMBOL(ib_create_send_mad);
1008
1009 int ib_get_mad_data_offset(u8 mgmt_class)
1010 {
1011 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1012 return IB_MGMT_SA_HDR;
1013 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1014 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1015 (mgmt_class == IB_MGMT_CLASS_BIS))
1016 return IB_MGMT_DEVICE_HDR;
1017 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1018 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1019 return IB_MGMT_VENDOR_HDR;
1020 else
1021 return IB_MGMT_MAD_HDR;
1022 }
1023 EXPORT_SYMBOL(ib_get_mad_data_offset);
1024
1025 int ib_is_mad_class_rmpp(u8 mgmt_class)
1026 {
1027 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1028 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1029 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1030 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1031 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1032 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1033 return 1;
1034 return 0;
1035 }
1036 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1037
1038 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1039 {
1040 struct ib_mad_send_wr_private *mad_send_wr;
1041 struct list_head *list;
1042
1043 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1044 send_buf);
1045 list = &mad_send_wr->cur_seg->list;
1046
1047 if (mad_send_wr->cur_seg->num < seg_num) {
1048 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1049 if (mad_send_wr->cur_seg->num == seg_num)
1050 break;
1051 } else if (mad_send_wr->cur_seg->num > seg_num) {
1052 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1053 if (mad_send_wr->cur_seg->num == seg_num)
1054 break;
1055 }
1056 return mad_send_wr->cur_seg->data;
1057 }
1058 EXPORT_SYMBOL(ib_get_rmpp_segment);
1059
1060 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1061 {
1062 if (mad_send_wr->send_buf.seg_count)
1063 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1064 mad_send_wr->seg_num);
1065 else
1066 return mad_send_wr->send_buf.mad +
1067 mad_send_wr->send_buf.hdr_len;
1068 }
1069
1070 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1071 {
1072 struct ib_mad_agent_private *mad_agent_priv;
1073 struct ib_mad_send_wr_private *mad_send_wr;
1074
1075 mad_agent_priv = container_of(send_buf->mad_agent,
1076 struct ib_mad_agent_private, agent);
1077 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1078 send_buf);
1079
1080 free_send_rmpp_list(mad_send_wr);
1081 kfree(send_buf->mad);
1082 deref_mad_agent(mad_agent_priv);
1083 }
1084 EXPORT_SYMBOL(ib_free_send_mad);
1085
1086 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1087 {
1088 struct ib_mad_qp_info *qp_info;
1089 struct list_head *list;
1090 struct ib_send_wr *bad_send_wr;
1091 struct ib_mad_agent *mad_agent;
1092 struct ib_sge *sge;
1093 unsigned long flags;
1094 int ret;
1095
1096 /* Set WR ID to find mad_send_wr upon completion */
1097 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1098 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1099 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1100
1101 mad_agent = mad_send_wr->send_buf.mad_agent;
1102 sge = mad_send_wr->sg_list;
1103 sge[0].addr = ib_dma_map_single(mad_agent->device,
1104 mad_send_wr->send_buf.mad,
1105 sge[0].length,
1106 DMA_TO_DEVICE);
1107 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1108 return -ENOMEM;
1109
1110 mad_send_wr->header_mapping = sge[0].addr;
1111
1112 sge[1].addr = ib_dma_map_single(mad_agent->device,
1113 ib_get_payload(mad_send_wr),
1114 sge[1].length,
1115 DMA_TO_DEVICE);
1116 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1117 ib_dma_unmap_single(mad_agent->device,
1118 mad_send_wr->header_mapping,
1119 sge[0].length, DMA_TO_DEVICE);
1120 return -ENOMEM;
1121 }
1122 mad_send_wr->payload_mapping = sge[1].addr;
1123
1124 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1125 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1126 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1127 &bad_send_wr);
1128 list = &qp_info->send_queue.list;
1129 } else {
1130 ret = 0;
1131 list = &qp_info->overflow_list;
1132 }
1133
1134 if (!ret) {
1135 qp_info->send_queue.count++;
1136 list_add_tail(&mad_send_wr->mad_list.list, list);
1137 }
1138 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1139 if (ret) {
1140 ib_dma_unmap_single(mad_agent->device,
1141 mad_send_wr->header_mapping,
1142 sge[0].length, DMA_TO_DEVICE);
1143 ib_dma_unmap_single(mad_agent->device,
1144 mad_send_wr->payload_mapping,
1145 sge[1].length, DMA_TO_DEVICE);
1146 }
1147 return ret;
1148 }
1149
1150 /*
1151 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1152 * with the registered client
1153 */
1154 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1155 struct ib_mad_send_buf **bad_send_buf)
1156 {
1157 struct ib_mad_agent_private *mad_agent_priv;
1158 struct ib_mad_send_buf *next_send_buf;
1159 struct ib_mad_send_wr_private *mad_send_wr;
1160 unsigned long flags;
1161 int ret = -EINVAL;
1162
1163 /* Walk list of send WRs and post each on send list */
1164 for (; send_buf; send_buf = next_send_buf) {
1165
1166 mad_send_wr = container_of(send_buf,
1167 struct ib_mad_send_wr_private,
1168 send_buf);
1169 mad_agent_priv = mad_send_wr->mad_agent_priv;
1170
1171 if (!send_buf->mad_agent->send_handler ||
1172 (send_buf->timeout_ms &&
1173 !send_buf->mad_agent->recv_handler)) {
1174 ret = -EINVAL;
1175 goto error;
1176 }
1177
1178 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1179 if (mad_agent_priv->agent.rmpp_version) {
1180 ret = -EINVAL;
1181 goto error;
1182 }
1183 }
1184
1185 /*
1186 * Save pointer to next work request to post in case the
1187 * current one completes, and the user modifies the work
1188 * request associated with the completion
1189 */
1190 next_send_buf = send_buf->next;
1191 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1192
1193 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1194 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1195 ret = handle_outgoing_dr_smp(mad_agent_priv,
1196 mad_send_wr);
1197 if (ret < 0) /* error */
1198 goto error;
1199 else if (ret == 1) /* locally consumed */
1200 continue;
1201 }
1202
1203 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1204 /* Timeout will be updated after send completes */
1205 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1206 mad_send_wr->max_retries = send_buf->retries;
1207 mad_send_wr->retries_left = send_buf->retries;
1208 send_buf->retries = 0;
1209 /* Reference for work request to QP + response */
1210 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1211 mad_send_wr->status = IB_WC_SUCCESS;
1212
1213 /* Reference MAD agent until send completes */
1214 atomic_inc(&mad_agent_priv->refcount);
1215 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1216 list_add_tail(&mad_send_wr->agent_list,
1217 &mad_agent_priv->send_list);
1218 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1219
1220 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1221 ret = ib_send_rmpp_mad(mad_send_wr);
1222 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1223 ret = ib_send_mad(mad_send_wr);
1224 } else
1225 ret = ib_send_mad(mad_send_wr);
1226 if (ret < 0) {
1227 /* Fail send request */
1228 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1229 list_del(&mad_send_wr->agent_list);
1230 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1231 atomic_dec(&mad_agent_priv->refcount);
1232 goto error;
1233 }
1234 }
1235 return 0;
1236 error:
1237 if (bad_send_buf)
1238 *bad_send_buf = send_buf;
1239 return ret;
1240 }
1241 EXPORT_SYMBOL(ib_post_send_mad);
1242
1243 /*
1244 * ib_free_recv_mad - Returns data buffers used to receive
1245 * a MAD to the access layer
1246 */
1247 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1248 {
1249 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1250 struct ib_mad_private_header *mad_priv_hdr;
1251 struct ib_mad_private *priv;
1252 struct list_head free_list;
1253
1254 INIT_LIST_HEAD(&free_list);
1255 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1256
1257 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1258 &free_list, list) {
1259 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1260 recv_buf);
1261 mad_priv_hdr = container_of(mad_recv_wc,
1262 struct ib_mad_private_header,
1263 recv_wc);
1264 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1265 header);
1266 kfree(priv);
1267 }
1268 }
1269 EXPORT_SYMBOL(ib_free_recv_mad);
1270
1271 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1272 u8 rmpp_version,
1273 ib_mad_send_handler send_handler,
1274 ib_mad_recv_handler recv_handler,
1275 void *context)
1276 {
1277 return ERR_PTR(-EINVAL); /* XXX: for now */
1278 }
1279 EXPORT_SYMBOL(ib_redirect_mad_qp);
1280
1281 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1282 struct ib_wc *wc)
1283 {
1284 dev_err(&mad_agent->device->dev,
1285 "ib_process_mad_wc() not implemented yet\n");
1286 return 0;
1287 }
1288 EXPORT_SYMBOL(ib_process_mad_wc);
1289
1290 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1291 struct ib_mad_reg_req *mad_reg_req)
1292 {
1293 int i;
1294
1295 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1296 if ((*method)->agent[i]) {
1297 pr_err("Method %d already in use\n", i);
1298 return -EINVAL;
1299 }
1300 }
1301 return 0;
1302 }
1303
1304 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1305 {
1306 /* Allocate management method table */
1307 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1308 if (!*method) {
1309 pr_err("No memory for ib_mad_mgmt_method_table\n");
1310 return -ENOMEM;
1311 }
1312
1313 return 0;
1314 }
1315
1316 /*
1317 * Check to see if there are any methods still in use
1318 */
1319 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1320 {
1321 int i;
1322
1323 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1324 if (method->agent[i])
1325 return 1;
1326 return 0;
1327 }
1328
1329 /*
1330 * Check to see if there are any method tables for this class still in use
1331 */
1332 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1333 {
1334 int i;
1335
1336 for (i = 0; i < MAX_MGMT_CLASS; i++)
1337 if (class->method_table[i])
1338 return 1;
1339 return 0;
1340 }
1341
1342 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1343 {
1344 int i;
1345
1346 for (i = 0; i < MAX_MGMT_OUI; i++)
1347 if (vendor_class->method_table[i])
1348 return 1;
1349 return 0;
1350 }
1351
1352 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1353 const char *oui)
1354 {
1355 int i;
1356
1357 for (i = 0; i < MAX_MGMT_OUI; i++)
1358 /* Is there matching OUI for this vendor class ? */
1359 if (!memcmp(vendor_class->oui[i], oui, 3))
1360 return i;
1361
1362 return -1;
1363 }
1364
1365 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1366 {
1367 int i;
1368
1369 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1370 if (vendor->vendor_class[i])
1371 return 1;
1372
1373 return 0;
1374 }
1375
1376 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1377 struct ib_mad_agent_private *agent)
1378 {
1379 int i;
1380
1381 /* Remove any methods for this mad agent */
1382 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1383 if (method->agent[i] == agent) {
1384 method->agent[i] = NULL;
1385 }
1386 }
1387 }
1388
1389 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1390 struct ib_mad_agent_private *agent_priv,
1391 u8 mgmt_class)
1392 {
1393 struct ib_mad_port_private *port_priv;
1394 struct ib_mad_mgmt_class_table **class;
1395 struct ib_mad_mgmt_method_table **method;
1396 int i, ret;
1397
1398 port_priv = agent_priv->qp_info->port_priv;
1399 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1400 if (!*class) {
1401 /* Allocate management class table for "new" class version */
1402 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1403 if (!*class) {
1404 dev_err(&agent_priv->agent.device->dev,
1405 "No memory for ib_mad_mgmt_class_table\n");
1406 ret = -ENOMEM;
1407 goto error1;
1408 }
1409
1410 /* Allocate method table for this management class */
1411 method = &(*class)->method_table[mgmt_class];
1412 if ((ret = allocate_method_table(method)))
1413 goto error2;
1414 } else {
1415 method = &(*class)->method_table[mgmt_class];
1416 if (!*method) {
1417 /* Allocate method table for this management class */
1418 if ((ret = allocate_method_table(method)))
1419 goto error1;
1420 }
1421 }
1422
1423 /* Now, make sure methods are not already in use */
1424 if (method_in_use(method, mad_reg_req))
1425 goto error3;
1426
1427 /* Finally, add in methods being registered */
1428 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1429 (*method)->agent[i] = agent_priv;
1430
1431 return 0;
1432
1433 error3:
1434 /* Remove any methods for this mad agent */
1435 remove_methods_mad_agent(*method, agent_priv);
1436 /* Now, check to see if there are any methods in use */
1437 if (!check_method_table(*method)) {
1438 /* If not, release management method table */
1439 kfree(*method);
1440 *method = NULL;
1441 }
1442 ret = -EINVAL;
1443 goto error1;
1444 error2:
1445 kfree(*class);
1446 *class = NULL;
1447 error1:
1448 return ret;
1449 }
1450
1451 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1452 struct ib_mad_agent_private *agent_priv)
1453 {
1454 struct ib_mad_port_private *port_priv;
1455 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1456 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1457 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1458 struct ib_mad_mgmt_method_table **method;
1459 int i, ret = -ENOMEM;
1460 u8 vclass;
1461
1462 /* "New" vendor (with OUI) class */
1463 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1464 port_priv = agent_priv->qp_info->port_priv;
1465 vendor_table = &port_priv->version[
1466 mad_reg_req->mgmt_class_version].vendor;
1467 if (!*vendor_table) {
1468 /* Allocate mgmt vendor class table for "new" class version */
1469 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1470 if (!vendor) {
1471 dev_err(&agent_priv->agent.device->dev,
1472 "No memory for ib_mad_mgmt_vendor_class_table\n");
1473 goto error1;
1474 }
1475
1476 *vendor_table = vendor;
1477 }
1478 if (!(*vendor_table)->vendor_class[vclass]) {
1479 /* Allocate table for this management vendor class */
1480 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1481 if (!vendor_class) {
1482 dev_err(&agent_priv->agent.device->dev,
1483 "No memory for ib_mad_mgmt_vendor_class\n");
1484 goto error2;
1485 }
1486
1487 (*vendor_table)->vendor_class[vclass] = vendor_class;
1488 }
1489 for (i = 0; i < MAX_MGMT_OUI; i++) {
1490 /* Is there matching OUI for this vendor class ? */
1491 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1492 mad_reg_req->oui, 3)) {
1493 method = &(*vendor_table)->vendor_class[
1494 vclass]->method_table[i];
1495 BUG_ON(!*method);
1496 goto check_in_use;
1497 }
1498 }
1499 for (i = 0; i < MAX_MGMT_OUI; i++) {
1500 /* OUI slot available ? */
1501 if (!is_vendor_oui((*vendor_table)->vendor_class[
1502 vclass]->oui[i])) {
1503 method = &(*vendor_table)->vendor_class[
1504 vclass]->method_table[i];
1505 BUG_ON(*method);
1506 /* Allocate method table for this OUI */
1507 if ((ret = allocate_method_table(method)))
1508 goto error3;
1509 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1510 mad_reg_req->oui, 3);
1511 goto check_in_use;
1512 }
1513 }
1514 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1515 goto error3;
1516
1517 check_in_use:
1518 /* Now, make sure methods are not already in use */
1519 if (method_in_use(method, mad_reg_req))
1520 goto error4;
1521
1522 /* Finally, add in methods being registered */
1523 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1524 (*method)->agent[i] = agent_priv;
1525
1526 return 0;
1527
1528 error4:
1529 /* Remove any methods for this mad agent */
1530 remove_methods_mad_agent(*method, agent_priv);
1531 /* Now, check to see if there are any methods in use */
1532 if (!check_method_table(*method)) {
1533 /* If not, release management method table */
1534 kfree(*method);
1535 *method = NULL;
1536 }
1537 ret = -EINVAL;
1538 error3:
1539 if (vendor_class) {
1540 (*vendor_table)->vendor_class[vclass] = NULL;
1541 kfree(vendor_class);
1542 }
1543 error2:
1544 if (vendor) {
1545 *vendor_table = NULL;
1546 kfree(vendor);
1547 }
1548 error1:
1549 return ret;
1550 }
1551
1552 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1553 {
1554 struct ib_mad_port_private *port_priv;
1555 struct ib_mad_mgmt_class_table *class;
1556 struct ib_mad_mgmt_method_table *method;
1557 struct ib_mad_mgmt_vendor_class_table *vendor;
1558 struct ib_mad_mgmt_vendor_class *vendor_class;
1559 int index;
1560 u8 mgmt_class;
1561
1562 /*
1563 * Was MAD registration request supplied
1564 * with original registration ?
1565 */
1566 if (!agent_priv->reg_req) {
1567 goto out;
1568 }
1569
1570 port_priv = agent_priv->qp_info->port_priv;
1571 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1572 class = port_priv->version[
1573 agent_priv->reg_req->mgmt_class_version].class;
1574 if (!class)
1575 goto vendor_check;
1576
1577 method = class->method_table[mgmt_class];
1578 if (method) {
1579 /* Remove any methods for this mad agent */
1580 remove_methods_mad_agent(method, agent_priv);
1581 /* Now, check to see if there are any methods still in use */
1582 if (!check_method_table(method)) {
1583 /* If not, release management method table */
1584 kfree(method);
1585 class->method_table[mgmt_class] = NULL;
1586 /* Any management classes left ? */
1587 if (!check_class_table(class)) {
1588 /* If not, release management class table */
1589 kfree(class);
1590 port_priv->version[
1591 agent_priv->reg_req->
1592 mgmt_class_version].class = NULL;
1593 }
1594 }
1595 }
1596
1597 vendor_check:
1598 if (!is_vendor_class(mgmt_class))
1599 goto out;
1600
1601 /* normalize mgmt_class to vendor range 2 */
1602 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1603 vendor = port_priv->version[
1604 agent_priv->reg_req->mgmt_class_version].vendor;
1605
1606 if (!vendor)
1607 goto out;
1608
1609 vendor_class = vendor->vendor_class[mgmt_class];
1610 if (vendor_class) {
1611 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1612 if (index < 0)
1613 goto out;
1614 method = vendor_class->method_table[index];
1615 if (method) {
1616 /* Remove any methods for this mad agent */
1617 remove_methods_mad_agent(method, agent_priv);
1618 /*
1619 * Now, check to see if there are
1620 * any methods still in use
1621 */
1622 if (!check_method_table(method)) {
1623 /* If not, release management method table */
1624 kfree(method);
1625 vendor_class->method_table[index] = NULL;
1626 memset(vendor_class->oui[index], 0, 3);
1627 /* Any OUIs left ? */
1628 if (!check_vendor_class(vendor_class)) {
1629 /* If not, release vendor class table */
1630 kfree(vendor_class);
1631 vendor->vendor_class[mgmt_class] = NULL;
1632 /* Any other vendor classes left ? */
1633 if (!check_vendor_table(vendor)) {
1634 kfree(vendor);
1635 port_priv->version[
1636 agent_priv->reg_req->
1637 mgmt_class_version].
1638 vendor = NULL;
1639 }
1640 }
1641 }
1642 }
1643 }
1644
1645 out:
1646 return;
1647 }
1648
1649 static struct ib_mad_agent_private *
1650 find_mad_agent(struct ib_mad_port_private *port_priv,
1651 const struct ib_mad_hdr *mad_hdr)
1652 {
1653 struct ib_mad_agent_private *mad_agent = NULL;
1654 unsigned long flags;
1655
1656 spin_lock_irqsave(&port_priv->reg_lock, flags);
1657 if (ib_response_mad(mad_hdr)) {
1658 u32 hi_tid;
1659 struct ib_mad_agent_private *entry;
1660
1661 /*
1662 * Routing is based on high 32 bits of transaction ID
1663 * of MAD.
1664 */
1665 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1666 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1667 if (entry->agent.hi_tid == hi_tid) {
1668 mad_agent = entry;
1669 break;
1670 }
1671 }
1672 } else {
1673 struct ib_mad_mgmt_class_table *class;
1674 struct ib_mad_mgmt_method_table *method;
1675 struct ib_mad_mgmt_vendor_class_table *vendor;
1676 struct ib_mad_mgmt_vendor_class *vendor_class;
1677 const struct ib_vendor_mad *vendor_mad;
1678 int index;
1679
1680 /*
1681 * Routing is based on version, class, and method
1682 * For "newer" vendor MADs, also based on OUI
1683 */
1684 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1685 goto out;
1686 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1687 class = port_priv->version[
1688 mad_hdr->class_version].class;
1689 if (!class)
1690 goto out;
1691 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1692 IB_MGMT_MAX_METHODS)
1693 goto out;
1694 method = class->method_table[convert_mgmt_class(
1695 mad_hdr->mgmt_class)];
1696 if (method)
1697 mad_agent = method->agent[mad_hdr->method &
1698 ~IB_MGMT_METHOD_RESP];
1699 } else {
1700 vendor = port_priv->version[
1701 mad_hdr->class_version].vendor;
1702 if (!vendor)
1703 goto out;
1704 vendor_class = vendor->vendor_class[vendor_class_index(
1705 mad_hdr->mgmt_class)];
1706 if (!vendor_class)
1707 goto out;
1708 /* Find matching OUI */
1709 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1710 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1711 if (index == -1)
1712 goto out;
1713 method = vendor_class->method_table[index];
1714 if (method) {
1715 mad_agent = method->agent[mad_hdr->method &
1716 ~IB_MGMT_METHOD_RESP];
1717 }
1718 }
1719 }
1720
1721 if (mad_agent) {
1722 if (mad_agent->agent.recv_handler)
1723 atomic_inc(&mad_agent->refcount);
1724 else {
1725 dev_notice(&port_priv->device->dev,
1726 "No receive handler for client %p on port %d\n",
1727 &mad_agent->agent, port_priv->port_num);
1728 mad_agent = NULL;
1729 }
1730 }
1731 out:
1732 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1733
1734 return mad_agent;
1735 }
1736
1737 static int validate_mad(const struct ib_mad_hdr *mad_hdr, u32 qp_num)
1738 {
1739 int valid = 0;
1740
1741 /* Make sure MAD base version is understood */
1742 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION) {
1743 pr_err("MAD received with unsupported base version %d\n",
1744 mad_hdr->base_version);
1745 goto out;
1746 }
1747
1748 /* Filter SMI packets sent to other than QP0 */
1749 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1750 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1751 if (qp_num == 0)
1752 valid = 1;
1753 } else {
1754 /* Filter GSI packets sent to QP0 */
1755 if (qp_num != 0)
1756 valid = 1;
1757 }
1758
1759 out:
1760 return valid;
1761 }
1762
1763 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1764 const struct ib_mad_hdr *mad_hdr)
1765 {
1766 struct ib_rmpp_mad *rmpp_mad;
1767
1768 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1769 return !mad_agent_priv->agent.rmpp_version ||
1770 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1771 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1772 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1773 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1774 }
1775
1776 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1777 const struct ib_mad_recv_wc *rwc)
1778 {
1779 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1780 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1781 }
1782
1783 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1784 const struct ib_mad_send_wr_private *wr,
1785 const struct ib_mad_recv_wc *rwc )
1786 {
1787 struct ib_ah_attr attr;
1788 u8 send_resp, rcv_resp;
1789 union ib_gid sgid;
1790 struct ib_device *device = mad_agent_priv->agent.device;
1791 u8 port_num = mad_agent_priv->agent.port_num;
1792 u8 lmc;
1793
1794 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1795 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1796
1797 if (send_resp == rcv_resp)
1798 /* both requests, or both responses. GIDs different */
1799 return 0;
1800
1801 if (ib_query_ah(wr->send_buf.ah, &attr))
1802 /* Assume not equal, to avoid false positives. */
1803 return 0;
1804
1805 if (!!(attr.ah_flags & IB_AH_GRH) !=
1806 !!(rwc->wc->wc_flags & IB_WC_GRH))
1807 /* one has GID, other does not. Assume different */
1808 return 0;
1809
1810 if (!send_resp && rcv_resp) {
1811 /* is request/response. */
1812 if (!(attr.ah_flags & IB_AH_GRH)) {
1813 if (ib_get_cached_lmc(device, port_num, &lmc))
1814 return 0;
1815 return (!lmc || !((attr.src_path_bits ^
1816 rwc->wc->dlid_path_bits) &
1817 ((1 << lmc) - 1)));
1818 } else {
1819 if (ib_get_cached_gid(device, port_num,
1820 attr.grh.sgid_index, &sgid))
1821 return 0;
1822 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1823 16);
1824 }
1825 }
1826
1827 if (!(attr.ah_flags & IB_AH_GRH))
1828 return attr.dlid == rwc->wc->slid;
1829 else
1830 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1831 16);
1832 }
1833
1834 static inline int is_direct(u8 class)
1835 {
1836 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1837 }
1838
1839 struct ib_mad_send_wr_private*
1840 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1841 const struct ib_mad_recv_wc *wc)
1842 {
1843 struct ib_mad_send_wr_private *wr;
1844 const struct ib_mad_hdr *mad_hdr;
1845
1846 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1847
1848 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1849 if ((wr->tid == mad_hdr->tid) &&
1850 rcv_has_same_class(wr, wc) &&
1851 /*
1852 * Don't check GID for direct routed MADs.
1853 * These might have permissive LIDs.
1854 */
1855 (is_direct(mad_hdr->mgmt_class) ||
1856 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1857 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1858 }
1859
1860 /*
1861 * It's possible to receive the response before we've
1862 * been notified that the send has completed
1863 */
1864 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1865 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1866 wr->tid == mad_hdr->tid &&
1867 wr->timeout &&
1868 rcv_has_same_class(wr, wc) &&
1869 /*
1870 * Don't check GID for direct routed MADs.
1871 * These might have permissive LIDs.
1872 */
1873 (is_direct(mad_hdr->mgmt_class) ||
1874 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1875 /* Verify request has not been canceled */
1876 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1877 }
1878 return NULL;
1879 }
1880
1881 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1882 {
1883 mad_send_wr->timeout = 0;
1884 if (mad_send_wr->refcount == 1)
1885 list_move_tail(&mad_send_wr->agent_list,
1886 &mad_send_wr->mad_agent_priv->done_list);
1887 }
1888
1889 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1890 struct ib_mad_recv_wc *mad_recv_wc)
1891 {
1892 struct ib_mad_send_wr_private *mad_send_wr;
1893 struct ib_mad_send_wc mad_send_wc;
1894 unsigned long flags;
1895
1896 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1897 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1898 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1899 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1900 mad_recv_wc);
1901 if (!mad_recv_wc) {
1902 deref_mad_agent(mad_agent_priv);
1903 return;
1904 }
1905 }
1906
1907 /* Complete corresponding request */
1908 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1909 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1910 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1911 if (!mad_send_wr) {
1912 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1913 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1914 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1915 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1916 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1917 /* user rmpp is in effect
1918 * and this is an active RMPP MAD
1919 */
1920 mad_recv_wc->wc->wr_id = 0;
1921 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1922 mad_recv_wc);
1923 atomic_dec(&mad_agent_priv->refcount);
1924 } else {
1925 /* not user rmpp, revert to normal behavior and
1926 * drop the mad */
1927 ib_free_recv_mad(mad_recv_wc);
1928 deref_mad_agent(mad_agent_priv);
1929 return;
1930 }
1931 } else {
1932 ib_mark_mad_done(mad_send_wr);
1933 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1934
1935 /* Defined behavior is to complete response before request */
1936 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1937 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1938 mad_recv_wc);
1939 atomic_dec(&mad_agent_priv->refcount);
1940
1941 mad_send_wc.status = IB_WC_SUCCESS;
1942 mad_send_wc.vendor_err = 0;
1943 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1944 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1945 }
1946 } else {
1947 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1948 mad_recv_wc);
1949 deref_mad_agent(mad_agent_priv);
1950 }
1951 }
1952
1953 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
1954 const struct ib_mad_qp_info *qp_info,
1955 const struct ib_wc *wc,
1956 int port_num,
1957 struct ib_mad_private *recv,
1958 struct ib_mad_private *response)
1959 {
1960 enum smi_forward_action retsmi;
1961 struct ib_smp *smp = (struct ib_smp *)recv->mad;
1962
1963 if (smi_handle_dr_smp_recv(smp,
1964 port_priv->device->node_type,
1965 port_num,
1966 port_priv->device->phys_port_cnt) ==
1967 IB_SMI_DISCARD)
1968 return IB_SMI_DISCARD;
1969
1970 retsmi = smi_check_forward_dr_smp(smp);
1971 if (retsmi == IB_SMI_LOCAL)
1972 return IB_SMI_HANDLE;
1973
1974 if (retsmi == IB_SMI_SEND) { /* don't forward */
1975 if (smi_handle_dr_smp_send(smp,
1976 port_priv->device->node_type,
1977 port_num) == IB_SMI_DISCARD)
1978 return IB_SMI_DISCARD;
1979
1980 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
1981 return IB_SMI_DISCARD;
1982 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1983 /* forward case for switches */
1984 memcpy(response, recv, mad_priv_size(response));
1985 response->header.recv_wc.wc = &response->header.wc;
1986 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
1987 response->header.recv_wc.recv_buf.grh = &response->grh;
1988
1989 agent_send_response((const struct ib_mad_hdr *)response->mad,
1990 &response->grh, wc,
1991 port_priv->device,
1992 smi_get_fwd_port(smp),
1993 qp_info->qp->qp_num,
1994 response->mad_size);
1995
1996 return IB_SMI_DISCARD;
1997 }
1998 return IB_SMI_HANDLE;
1999 }
2000
2001 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2002 struct ib_mad_private *response)
2003 {
2004 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2005 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2006
2007 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2008 recv_hdr->method == IB_MGMT_METHOD_SET) {
2009 memcpy(response, recv, mad_priv_size(response));
2010 response->header.recv_wc.wc = &response->header.wc;
2011 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2012 response->header.recv_wc.recv_buf.grh = &response->grh;
2013 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2014 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2015 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2016 resp_hdr->status |= IB_SMP_DIRECTION;
2017
2018 return true;
2019 } else {
2020 return false;
2021 }
2022 }
2023 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
2024 struct ib_wc *wc)
2025 {
2026 struct ib_mad_qp_info *qp_info;
2027 struct ib_mad_private_header *mad_priv_hdr;
2028 struct ib_mad_private *recv, *response = NULL;
2029 struct ib_mad_list_head *mad_list;
2030 struct ib_mad_agent_private *mad_agent;
2031 int port_num;
2032 int ret = IB_MAD_RESULT_SUCCESS;
2033
2034 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2035 qp_info = mad_list->mad_queue->qp_info;
2036 dequeue_mad(mad_list);
2037
2038 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2039 mad_list);
2040 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2041 ib_dma_unmap_single(port_priv->device,
2042 recv->header.mapping,
2043 mad_priv_dma_size(recv),
2044 DMA_FROM_DEVICE);
2045
2046 /* Setup MAD receive work completion from "normal" work completion */
2047 recv->header.wc = *wc;
2048 recv->header.recv_wc.wc = &recv->header.wc;
2049 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2050 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2051 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2052
2053 if (atomic_read(&qp_info->snoop_count))
2054 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2055
2056 /* Validate MAD */
2057 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info->qp->qp_num))
2058 goto out;
2059
2060 response = alloc_mad_private(recv->mad_size, GFP_ATOMIC);
2061 if (!response) {
2062 dev_err(&port_priv->device->dev,
2063 "ib_mad_recv_done_handler no memory for response buffer\n");
2064 goto out;
2065 }
2066
2067 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
2068 port_num = wc->port_num;
2069 else
2070 port_num = port_priv->port_num;
2071
2072 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2073 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2074 if (handle_ib_smi(port_priv, qp_info, wc, port_num, recv,
2075 response)
2076 == IB_SMI_DISCARD)
2077 goto out;
2078 }
2079
2080 /* Give driver "right of first refusal" on incoming MAD */
2081 if (port_priv->device->process_mad) {
2082 ret = port_priv->device->process_mad(port_priv->device, 0,
2083 port_priv->port_num,
2084 wc, &recv->grh,
2085 (const struct ib_mad *)recv->mad,
2086 (struct ib_mad *)response->mad);
2087 if (ret & IB_MAD_RESULT_SUCCESS) {
2088 if (ret & IB_MAD_RESULT_CONSUMED)
2089 goto out;
2090 if (ret & IB_MAD_RESULT_REPLY) {
2091 agent_send_response((const struct ib_mad_hdr *)response->mad,
2092 &recv->grh, wc,
2093 port_priv->device,
2094 port_num,
2095 qp_info->qp->qp_num,
2096 response->mad_size);
2097 goto out;
2098 }
2099 }
2100 }
2101
2102 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2103 if (mad_agent) {
2104 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2105 /*
2106 * recv is freed up in error cases in ib_mad_complete_recv
2107 * or via recv_handler in ib_mad_complete_recv()
2108 */
2109 recv = NULL;
2110 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2111 generate_unmatched_resp(recv, response)) {
2112 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2113 port_priv->device, port_num,
2114 qp_info->qp->qp_num, response->mad_size);
2115 }
2116
2117 out:
2118 /* Post another receive request for this QP */
2119 if (response) {
2120 ib_mad_post_receive_mads(qp_info, response);
2121 kfree(recv);
2122 } else
2123 ib_mad_post_receive_mads(qp_info, recv);
2124 }
2125
2126 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2127 {
2128 struct ib_mad_send_wr_private *mad_send_wr;
2129 unsigned long delay;
2130
2131 if (list_empty(&mad_agent_priv->wait_list)) {
2132 cancel_delayed_work(&mad_agent_priv->timed_work);
2133 } else {
2134 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2135 struct ib_mad_send_wr_private,
2136 agent_list);
2137
2138 if (time_after(mad_agent_priv->timeout,
2139 mad_send_wr->timeout)) {
2140 mad_agent_priv->timeout = mad_send_wr->timeout;
2141 delay = mad_send_wr->timeout - jiffies;
2142 if ((long)delay <= 0)
2143 delay = 1;
2144 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2145 &mad_agent_priv->timed_work, delay);
2146 }
2147 }
2148 }
2149
2150 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2151 {
2152 struct ib_mad_agent_private *mad_agent_priv;
2153 struct ib_mad_send_wr_private *temp_mad_send_wr;
2154 struct list_head *list_item;
2155 unsigned long delay;
2156
2157 mad_agent_priv = mad_send_wr->mad_agent_priv;
2158 list_del(&mad_send_wr->agent_list);
2159
2160 delay = mad_send_wr->timeout;
2161 mad_send_wr->timeout += jiffies;
2162
2163 if (delay) {
2164 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2165 temp_mad_send_wr = list_entry(list_item,
2166 struct ib_mad_send_wr_private,
2167 agent_list);
2168 if (time_after(mad_send_wr->timeout,
2169 temp_mad_send_wr->timeout))
2170 break;
2171 }
2172 }
2173 else
2174 list_item = &mad_agent_priv->wait_list;
2175 list_add(&mad_send_wr->agent_list, list_item);
2176
2177 /* Reschedule a work item if we have a shorter timeout */
2178 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2179 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2180 &mad_agent_priv->timed_work, delay);
2181 }
2182
2183 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2184 int timeout_ms)
2185 {
2186 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2187 wait_for_response(mad_send_wr);
2188 }
2189
2190 /*
2191 * Process a send work completion
2192 */
2193 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2194 struct ib_mad_send_wc *mad_send_wc)
2195 {
2196 struct ib_mad_agent_private *mad_agent_priv;
2197 unsigned long flags;
2198 int ret;
2199
2200 mad_agent_priv = mad_send_wr->mad_agent_priv;
2201 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2202 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2203 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2204 if (ret == IB_RMPP_RESULT_CONSUMED)
2205 goto done;
2206 } else
2207 ret = IB_RMPP_RESULT_UNHANDLED;
2208
2209 if (mad_send_wc->status != IB_WC_SUCCESS &&
2210 mad_send_wr->status == IB_WC_SUCCESS) {
2211 mad_send_wr->status = mad_send_wc->status;
2212 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2213 }
2214
2215 if (--mad_send_wr->refcount > 0) {
2216 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2217 mad_send_wr->status == IB_WC_SUCCESS) {
2218 wait_for_response(mad_send_wr);
2219 }
2220 goto done;
2221 }
2222
2223 /* Remove send from MAD agent and notify client of completion */
2224 list_del(&mad_send_wr->agent_list);
2225 adjust_timeout(mad_agent_priv);
2226 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2227
2228 if (mad_send_wr->status != IB_WC_SUCCESS )
2229 mad_send_wc->status = mad_send_wr->status;
2230 if (ret == IB_RMPP_RESULT_INTERNAL)
2231 ib_rmpp_send_handler(mad_send_wc);
2232 else
2233 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2234 mad_send_wc);
2235
2236 /* Release reference on agent taken when sending */
2237 deref_mad_agent(mad_agent_priv);
2238 return;
2239 done:
2240 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2241 }
2242
2243 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2244 struct ib_wc *wc)
2245 {
2246 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2247 struct ib_mad_list_head *mad_list;
2248 struct ib_mad_qp_info *qp_info;
2249 struct ib_mad_queue *send_queue;
2250 struct ib_send_wr *bad_send_wr;
2251 struct ib_mad_send_wc mad_send_wc;
2252 unsigned long flags;
2253 int ret;
2254
2255 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2256 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2257 mad_list);
2258 send_queue = mad_list->mad_queue;
2259 qp_info = send_queue->qp_info;
2260
2261 retry:
2262 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2263 mad_send_wr->header_mapping,
2264 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2265 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2266 mad_send_wr->payload_mapping,
2267 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2268 queued_send_wr = NULL;
2269 spin_lock_irqsave(&send_queue->lock, flags);
2270 list_del(&mad_list->list);
2271
2272 /* Move queued send to the send queue */
2273 if (send_queue->count-- > send_queue->max_active) {
2274 mad_list = container_of(qp_info->overflow_list.next,
2275 struct ib_mad_list_head, list);
2276 queued_send_wr = container_of(mad_list,
2277 struct ib_mad_send_wr_private,
2278 mad_list);
2279 list_move_tail(&mad_list->list, &send_queue->list);
2280 }
2281 spin_unlock_irqrestore(&send_queue->lock, flags);
2282
2283 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2284 mad_send_wc.status = wc->status;
2285 mad_send_wc.vendor_err = wc->vendor_err;
2286 if (atomic_read(&qp_info->snoop_count))
2287 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2288 IB_MAD_SNOOP_SEND_COMPLETIONS);
2289 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2290
2291 if (queued_send_wr) {
2292 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2293 &bad_send_wr);
2294 if (ret) {
2295 dev_err(&port_priv->device->dev,
2296 "ib_post_send failed: %d\n", ret);
2297 mad_send_wr = queued_send_wr;
2298 wc->status = IB_WC_LOC_QP_OP_ERR;
2299 goto retry;
2300 }
2301 }
2302 }
2303
2304 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2305 {
2306 struct ib_mad_send_wr_private *mad_send_wr;
2307 struct ib_mad_list_head *mad_list;
2308 unsigned long flags;
2309
2310 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2311 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2312 mad_send_wr = container_of(mad_list,
2313 struct ib_mad_send_wr_private,
2314 mad_list);
2315 mad_send_wr->retry = 1;
2316 }
2317 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2318 }
2319
2320 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2321 struct ib_wc *wc)
2322 {
2323 struct ib_mad_list_head *mad_list;
2324 struct ib_mad_qp_info *qp_info;
2325 struct ib_mad_send_wr_private *mad_send_wr;
2326 int ret;
2327
2328 /* Determine if failure was a send or receive */
2329 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2330 qp_info = mad_list->mad_queue->qp_info;
2331 if (mad_list->mad_queue == &qp_info->recv_queue)
2332 /*
2333 * Receive errors indicate that the QP has entered the error
2334 * state - error handling/shutdown code will cleanup
2335 */
2336 return;
2337
2338 /*
2339 * Send errors will transition the QP to SQE - move
2340 * QP to RTS and repost flushed work requests
2341 */
2342 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2343 mad_list);
2344 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2345 if (mad_send_wr->retry) {
2346 /* Repost send */
2347 struct ib_send_wr *bad_send_wr;
2348
2349 mad_send_wr->retry = 0;
2350 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2351 &bad_send_wr);
2352 if (ret)
2353 ib_mad_send_done_handler(port_priv, wc);
2354 } else
2355 ib_mad_send_done_handler(port_priv, wc);
2356 } else {
2357 struct ib_qp_attr *attr;
2358
2359 /* Transition QP to RTS and fail offending send */
2360 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2361 if (attr) {
2362 attr->qp_state = IB_QPS_RTS;
2363 attr->cur_qp_state = IB_QPS_SQE;
2364 ret = ib_modify_qp(qp_info->qp, attr,
2365 IB_QP_STATE | IB_QP_CUR_STATE);
2366 kfree(attr);
2367 if (ret)
2368 dev_err(&port_priv->device->dev,
2369 "mad_error_handler - ib_modify_qp to RTS : %d\n",
2370 ret);
2371 else
2372 mark_sends_for_retry(qp_info);
2373 }
2374 ib_mad_send_done_handler(port_priv, wc);
2375 }
2376 }
2377
2378 /*
2379 * IB MAD completion callback
2380 */
2381 static void ib_mad_completion_handler(struct work_struct *work)
2382 {
2383 struct ib_mad_port_private *port_priv;
2384 struct ib_wc wc;
2385
2386 port_priv = container_of(work, struct ib_mad_port_private, work);
2387 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2388
2389 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2390 if (wc.status == IB_WC_SUCCESS) {
2391 switch (wc.opcode) {
2392 case IB_WC_SEND:
2393 ib_mad_send_done_handler(port_priv, &wc);
2394 break;
2395 case IB_WC_RECV:
2396 ib_mad_recv_done_handler(port_priv, &wc);
2397 break;
2398 default:
2399 BUG_ON(1);
2400 break;
2401 }
2402 } else
2403 mad_error_handler(port_priv, &wc);
2404 }
2405 }
2406
2407 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2408 {
2409 unsigned long flags;
2410 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2411 struct ib_mad_send_wc mad_send_wc;
2412 struct list_head cancel_list;
2413
2414 INIT_LIST_HEAD(&cancel_list);
2415
2416 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2417 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2418 &mad_agent_priv->send_list, agent_list) {
2419 if (mad_send_wr->status == IB_WC_SUCCESS) {
2420 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2421 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2422 }
2423 }
2424
2425 /* Empty wait list to prevent receives from finding a request */
2426 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2427 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2428
2429 /* Report all cancelled requests */
2430 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2431 mad_send_wc.vendor_err = 0;
2432
2433 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2434 &cancel_list, agent_list) {
2435 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2436 list_del(&mad_send_wr->agent_list);
2437 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2438 &mad_send_wc);
2439 atomic_dec(&mad_agent_priv->refcount);
2440 }
2441 }
2442
2443 static struct ib_mad_send_wr_private*
2444 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2445 struct ib_mad_send_buf *send_buf)
2446 {
2447 struct ib_mad_send_wr_private *mad_send_wr;
2448
2449 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2450 agent_list) {
2451 if (&mad_send_wr->send_buf == send_buf)
2452 return mad_send_wr;
2453 }
2454
2455 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2456 agent_list) {
2457 if (is_rmpp_data_mad(mad_agent_priv,
2458 mad_send_wr->send_buf.mad) &&
2459 &mad_send_wr->send_buf == send_buf)
2460 return mad_send_wr;
2461 }
2462 return NULL;
2463 }
2464
2465 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2466 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2467 {
2468 struct ib_mad_agent_private *mad_agent_priv;
2469 struct ib_mad_send_wr_private *mad_send_wr;
2470 unsigned long flags;
2471 int active;
2472
2473 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2474 agent);
2475 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2476 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2477 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2478 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2479 return -EINVAL;
2480 }
2481
2482 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2483 if (!timeout_ms) {
2484 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2485 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2486 }
2487
2488 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2489 if (active)
2490 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2491 else
2492 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2493
2494 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2495 return 0;
2496 }
2497 EXPORT_SYMBOL(ib_modify_mad);
2498
2499 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2500 struct ib_mad_send_buf *send_buf)
2501 {
2502 ib_modify_mad(mad_agent, send_buf, 0);
2503 }
2504 EXPORT_SYMBOL(ib_cancel_mad);
2505
2506 static void local_completions(struct work_struct *work)
2507 {
2508 struct ib_mad_agent_private *mad_agent_priv;
2509 struct ib_mad_local_private *local;
2510 struct ib_mad_agent_private *recv_mad_agent;
2511 unsigned long flags;
2512 int free_mad;
2513 struct ib_wc wc;
2514 struct ib_mad_send_wc mad_send_wc;
2515
2516 mad_agent_priv =
2517 container_of(work, struct ib_mad_agent_private, local_work);
2518
2519 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2520 while (!list_empty(&mad_agent_priv->local_list)) {
2521 local = list_entry(mad_agent_priv->local_list.next,
2522 struct ib_mad_local_private,
2523 completion_list);
2524 list_del(&local->completion_list);
2525 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2526 free_mad = 0;
2527 if (local->mad_priv) {
2528 recv_mad_agent = local->recv_mad_agent;
2529 if (!recv_mad_agent) {
2530 dev_err(&mad_agent_priv->agent.device->dev,
2531 "No receive MAD agent for local completion\n");
2532 free_mad = 1;
2533 goto local_send_completion;
2534 }
2535
2536 /*
2537 * Defined behavior is to complete response
2538 * before request
2539 */
2540 build_smp_wc(recv_mad_agent->agent.qp,
2541 (unsigned long) local->mad_send_wr,
2542 be16_to_cpu(IB_LID_PERMISSIVE),
2543 0, recv_mad_agent->agent.port_num, &wc);
2544
2545 local->mad_priv->header.recv_wc.wc = &wc;
2546 local->mad_priv->header.recv_wc.mad_len =
2547 sizeof(struct ib_mad);
2548 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2549 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2550 &local->mad_priv->header.recv_wc.rmpp_list);
2551 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2552 local->mad_priv->header.recv_wc.recv_buf.mad =
2553 (struct ib_mad *)local->mad_priv->mad;
2554 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2555 snoop_recv(recv_mad_agent->qp_info,
2556 &local->mad_priv->header.recv_wc,
2557 IB_MAD_SNOOP_RECVS);
2558 recv_mad_agent->agent.recv_handler(
2559 &recv_mad_agent->agent,
2560 &local->mad_priv->header.recv_wc);
2561 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2562 atomic_dec(&recv_mad_agent->refcount);
2563 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2564 }
2565
2566 local_send_completion:
2567 /* Complete send */
2568 mad_send_wc.status = IB_WC_SUCCESS;
2569 mad_send_wc.vendor_err = 0;
2570 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2571 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2572 snoop_send(mad_agent_priv->qp_info,
2573 &local->mad_send_wr->send_buf,
2574 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2575 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2576 &mad_send_wc);
2577
2578 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2579 atomic_dec(&mad_agent_priv->refcount);
2580 if (free_mad)
2581 kfree(local->mad_priv);
2582 kfree(local);
2583 }
2584 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2585 }
2586
2587 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2588 {
2589 int ret;
2590
2591 if (!mad_send_wr->retries_left)
2592 return -ETIMEDOUT;
2593
2594 mad_send_wr->retries_left--;
2595 mad_send_wr->send_buf.retries++;
2596
2597 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2598
2599 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2600 ret = ib_retry_rmpp(mad_send_wr);
2601 switch (ret) {
2602 case IB_RMPP_RESULT_UNHANDLED:
2603 ret = ib_send_mad(mad_send_wr);
2604 break;
2605 case IB_RMPP_RESULT_CONSUMED:
2606 ret = 0;
2607 break;
2608 default:
2609 ret = -ECOMM;
2610 break;
2611 }
2612 } else
2613 ret = ib_send_mad(mad_send_wr);
2614
2615 if (!ret) {
2616 mad_send_wr->refcount++;
2617 list_add_tail(&mad_send_wr->agent_list,
2618 &mad_send_wr->mad_agent_priv->send_list);
2619 }
2620 return ret;
2621 }
2622
2623 static void timeout_sends(struct work_struct *work)
2624 {
2625 struct ib_mad_agent_private *mad_agent_priv;
2626 struct ib_mad_send_wr_private *mad_send_wr;
2627 struct ib_mad_send_wc mad_send_wc;
2628 unsigned long flags, delay;
2629
2630 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2631 timed_work.work);
2632 mad_send_wc.vendor_err = 0;
2633
2634 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2635 while (!list_empty(&mad_agent_priv->wait_list)) {
2636 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2637 struct ib_mad_send_wr_private,
2638 agent_list);
2639
2640 if (time_after(mad_send_wr->timeout, jiffies)) {
2641 delay = mad_send_wr->timeout - jiffies;
2642 if ((long)delay <= 0)
2643 delay = 1;
2644 queue_delayed_work(mad_agent_priv->qp_info->
2645 port_priv->wq,
2646 &mad_agent_priv->timed_work, delay);
2647 break;
2648 }
2649
2650 list_del(&mad_send_wr->agent_list);
2651 if (mad_send_wr->status == IB_WC_SUCCESS &&
2652 !retry_send(mad_send_wr))
2653 continue;
2654
2655 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2656
2657 if (mad_send_wr->status == IB_WC_SUCCESS)
2658 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2659 else
2660 mad_send_wc.status = mad_send_wr->status;
2661 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2662 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2663 &mad_send_wc);
2664
2665 atomic_dec(&mad_agent_priv->refcount);
2666 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2667 }
2668 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2669 }
2670
2671 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2672 {
2673 struct ib_mad_port_private *port_priv = cq->cq_context;
2674 unsigned long flags;
2675
2676 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2677 if (!list_empty(&port_priv->port_list))
2678 queue_work(port_priv->wq, &port_priv->work);
2679 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2680 }
2681
2682 /*
2683 * Allocate receive MADs and post receive WRs for them
2684 */
2685 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2686 struct ib_mad_private *mad)
2687 {
2688 unsigned long flags;
2689 int post, ret;
2690 struct ib_mad_private *mad_priv;
2691 struct ib_sge sg_list;
2692 struct ib_recv_wr recv_wr, *bad_recv_wr;
2693 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2694
2695 /* Initialize common scatter list fields */
2696 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2697
2698 /* Initialize common receive WR fields */
2699 recv_wr.next = NULL;
2700 recv_wr.sg_list = &sg_list;
2701 recv_wr.num_sge = 1;
2702
2703 do {
2704 /* Allocate and map receive buffer */
2705 if (mad) {
2706 mad_priv = mad;
2707 mad = NULL;
2708 } else {
2709 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2710 GFP_ATOMIC);
2711 if (!mad_priv) {
2712 dev_err(&qp_info->port_priv->device->dev,
2713 "No memory for receive buffer\n");
2714 ret = -ENOMEM;
2715 break;
2716 }
2717 }
2718 sg_list.length = mad_priv_dma_size(mad_priv);
2719 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2720 &mad_priv->grh,
2721 mad_priv_dma_size(mad_priv),
2722 DMA_FROM_DEVICE);
2723 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2724 sg_list.addr))) {
2725 ret = -ENOMEM;
2726 break;
2727 }
2728 mad_priv->header.mapping = sg_list.addr;
2729 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2730 mad_priv->header.mad_list.mad_queue = recv_queue;
2731
2732 /* Post receive WR */
2733 spin_lock_irqsave(&recv_queue->lock, flags);
2734 post = (++recv_queue->count < recv_queue->max_active);
2735 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2736 spin_unlock_irqrestore(&recv_queue->lock, flags);
2737 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2738 if (ret) {
2739 spin_lock_irqsave(&recv_queue->lock, flags);
2740 list_del(&mad_priv->header.mad_list.list);
2741 recv_queue->count--;
2742 spin_unlock_irqrestore(&recv_queue->lock, flags);
2743 ib_dma_unmap_single(qp_info->port_priv->device,
2744 mad_priv->header.mapping,
2745 mad_priv_dma_size(mad_priv),
2746 DMA_FROM_DEVICE);
2747 kfree(mad_priv);
2748 dev_err(&qp_info->port_priv->device->dev,
2749 "ib_post_recv failed: %d\n", ret);
2750 break;
2751 }
2752 } while (post);
2753
2754 return ret;
2755 }
2756
2757 /*
2758 * Return all the posted receive MADs
2759 */
2760 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2761 {
2762 struct ib_mad_private_header *mad_priv_hdr;
2763 struct ib_mad_private *recv;
2764 struct ib_mad_list_head *mad_list;
2765
2766 if (!qp_info->qp)
2767 return;
2768
2769 while (!list_empty(&qp_info->recv_queue.list)) {
2770
2771 mad_list = list_entry(qp_info->recv_queue.list.next,
2772 struct ib_mad_list_head, list);
2773 mad_priv_hdr = container_of(mad_list,
2774 struct ib_mad_private_header,
2775 mad_list);
2776 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2777 header);
2778
2779 /* Remove from posted receive MAD list */
2780 list_del(&mad_list->list);
2781
2782 ib_dma_unmap_single(qp_info->port_priv->device,
2783 recv->header.mapping,
2784 mad_priv_dma_size(recv),
2785 DMA_FROM_DEVICE);
2786 kfree(recv);
2787 }
2788
2789 qp_info->recv_queue.count = 0;
2790 }
2791
2792 /*
2793 * Start the port
2794 */
2795 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2796 {
2797 int ret, i;
2798 struct ib_qp_attr *attr;
2799 struct ib_qp *qp;
2800 u16 pkey_index;
2801
2802 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2803 if (!attr) {
2804 dev_err(&port_priv->device->dev,
2805 "Couldn't kmalloc ib_qp_attr\n");
2806 return -ENOMEM;
2807 }
2808
2809 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2810 IB_DEFAULT_PKEY_FULL, &pkey_index);
2811 if (ret)
2812 pkey_index = 0;
2813
2814 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2815 qp = port_priv->qp_info[i].qp;
2816 if (!qp)
2817 continue;
2818
2819 /*
2820 * PKey index for QP1 is irrelevant but
2821 * one is needed for the Reset to Init transition
2822 */
2823 attr->qp_state = IB_QPS_INIT;
2824 attr->pkey_index = pkey_index;
2825 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2826 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2827 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2828 if (ret) {
2829 dev_err(&port_priv->device->dev,
2830 "Couldn't change QP%d state to INIT: %d\n",
2831 i, ret);
2832 goto out;
2833 }
2834
2835 attr->qp_state = IB_QPS_RTR;
2836 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2837 if (ret) {
2838 dev_err(&port_priv->device->dev,
2839 "Couldn't change QP%d state to RTR: %d\n",
2840 i, ret);
2841 goto out;
2842 }
2843
2844 attr->qp_state = IB_QPS_RTS;
2845 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2846 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2847 if (ret) {
2848 dev_err(&port_priv->device->dev,
2849 "Couldn't change QP%d state to RTS: %d\n",
2850 i, ret);
2851 goto out;
2852 }
2853 }
2854
2855 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2856 if (ret) {
2857 dev_err(&port_priv->device->dev,
2858 "Failed to request completion notification: %d\n",
2859 ret);
2860 goto out;
2861 }
2862
2863 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2864 if (!port_priv->qp_info[i].qp)
2865 continue;
2866
2867 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2868 if (ret) {
2869 dev_err(&port_priv->device->dev,
2870 "Couldn't post receive WRs\n");
2871 goto out;
2872 }
2873 }
2874 out:
2875 kfree(attr);
2876 return ret;
2877 }
2878
2879 static void qp_event_handler(struct ib_event *event, void *qp_context)
2880 {
2881 struct ib_mad_qp_info *qp_info = qp_context;
2882
2883 /* It's worse than that! He's dead, Jim! */
2884 dev_err(&qp_info->port_priv->device->dev,
2885 "Fatal error (%d) on MAD QP (%d)\n",
2886 event->event, qp_info->qp->qp_num);
2887 }
2888
2889 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2890 struct ib_mad_queue *mad_queue)
2891 {
2892 mad_queue->qp_info = qp_info;
2893 mad_queue->count = 0;
2894 spin_lock_init(&mad_queue->lock);
2895 INIT_LIST_HEAD(&mad_queue->list);
2896 }
2897
2898 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2899 struct ib_mad_qp_info *qp_info)
2900 {
2901 qp_info->port_priv = port_priv;
2902 init_mad_queue(qp_info, &qp_info->send_queue);
2903 init_mad_queue(qp_info, &qp_info->recv_queue);
2904 INIT_LIST_HEAD(&qp_info->overflow_list);
2905 spin_lock_init(&qp_info->snoop_lock);
2906 qp_info->snoop_table = NULL;
2907 qp_info->snoop_table_size = 0;
2908 atomic_set(&qp_info->snoop_count, 0);
2909 }
2910
2911 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2912 enum ib_qp_type qp_type)
2913 {
2914 struct ib_qp_init_attr qp_init_attr;
2915 int ret;
2916
2917 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2918 qp_init_attr.send_cq = qp_info->port_priv->cq;
2919 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2920 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2921 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2922 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2923 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2924 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2925 qp_init_attr.qp_type = qp_type;
2926 qp_init_attr.port_num = qp_info->port_priv->port_num;
2927 qp_init_attr.qp_context = qp_info;
2928 qp_init_attr.event_handler = qp_event_handler;
2929 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2930 if (IS_ERR(qp_info->qp)) {
2931 dev_err(&qp_info->port_priv->device->dev,
2932 "Couldn't create ib_mad QP%d\n",
2933 get_spl_qp_index(qp_type));
2934 ret = PTR_ERR(qp_info->qp);
2935 goto error;
2936 }
2937 /* Use minimum queue sizes unless the CQ is resized */
2938 qp_info->send_queue.max_active = mad_sendq_size;
2939 qp_info->recv_queue.max_active = mad_recvq_size;
2940 return 0;
2941
2942 error:
2943 return ret;
2944 }
2945
2946 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2947 {
2948 if (!qp_info->qp)
2949 return;
2950
2951 ib_destroy_qp(qp_info->qp);
2952 kfree(qp_info->snoop_table);
2953 }
2954
2955 /*
2956 * Open the port
2957 * Create the QP, PD, MR, and CQ if needed
2958 */
2959 static int ib_mad_port_open(struct ib_device *device,
2960 int port_num)
2961 {
2962 int ret, cq_size;
2963 struct ib_mad_port_private *port_priv;
2964 unsigned long flags;
2965 char name[sizeof "ib_mad123"];
2966 int has_smi;
2967 struct ib_cq_init_attr cq_attr = {};
2968
2969 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
2970 return -EFAULT;
2971
2972 /* Create new device info */
2973 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2974 if (!port_priv) {
2975 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
2976 return -ENOMEM;
2977 }
2978
2979 port_priv->device = device;
2980 port_priv->port_num = port_num;
2981 spin_lock_init(&port_priv->reg_lock);
2982 INIT_LIST_HEAD(&port_priv->agent_list);
2983 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2984 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2985
2986 cq_size = mad_sendq_size + mad_recvq_size;
2987 has_smi = rdma_cap_ib_smi(device, port_num);
2988 if (has_smi)
2989 cq_size *= 2;
2990
2991 cq_attr.cqe = cq_size;
2992 port_priv->cq = ib_create_cq(port_priv->device,
2993 ib_mad_thread_completion_handler,
2994 NULL, port_priv, &cq_attr);
2995 if (IS_ERR(port_priv->cq)) {
2996 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
2997 ret = PTR_ERR(port_priv->cq);
2998 goto error3;
2999 }
3000
3001 port_priv->pd = ib_alloc_pd(device);
3002 if (IS_ERR(port_priv->pd)) {
3003 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3004 ret = PTR_ERR(port_priv->pd);
3005 goto error4;
3006 }
3007
3008 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
3009 if (IS_ERR(port_priv->mr)) {
3010 dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
3011 ret = PTR_ERR(port_priv->mr);
3012 goto error5;
3013 }
3014
3015 if (has_smi) {
3016 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3017 if (ret)
3018 goto error6;
3019 }
3020 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3021 if (ret)
3022 goto error7;
3023
3024 snprintf(name, sizeof name, "ib_mad%d", port_num);
3025 port_priv->wq = create_singlethread_workqueue(name);
3026 if (!port_priv->wq) {
3027 ret = -ENOMEM;
3028 goto error8;
3029 }
3030 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
3031
3032 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3033 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3034 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3035
3036 ret = ib_mad_port_start(port_priv);
3037 if (ret) {
3038 dev_err(&device->dev, "Couldn't start port\n");
3039 goto error9;
3040 }
3041
3042 return 0;
3043
3044 error9:
3045 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3046 list_del_init(&port_priv->port_list);
3047 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3048
3049 destroy_workqueue(port_priv->wq);
3050 error8:
3051 destroy_mad_qp(&port_priv->qp_info[1]);
3052 error7:
3053 destroy_mad_qp(&port_priv->qp_info[0]);
3054 error6:
3055 ib_dereg_mr(port_priv->mr);
3056 error5:
3057 ib_dealloc_pd(port_priv->pd);
3058 error4:
3059 ib_destroy_cq(port_priv->cq);
3060 cleanup_recv_queue(&port_priv->qp_info[1]);
3061 cleanup_recv_queue(&port_priv->qp_info[0]);
3062 error3:
3063 kfree(port_priv);
3064
3065 return ret;
3066 }
3067
3068 /*
3069 * Close the port
3070 * If there are no classes using the port, free the port
3071 * resources (CQ, MR, PD, QP) and remove the port's info structure
3072 */
3073 static int ib_mad_port_close(struct ib_device *device, int port_num)
3074 {
3075 struct ib_mad_port_private *port_priv;
3076 unsigned long flags;
3077
3078 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3079 port_priv = __ib_get_mad_port(device, port_num);
3080 if (port_priv == NULL) {
3081 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3082 dev_err(&device->dev, "Port %d not found\n", port_num);
3083 return -ENODEV;
3084 }
3085 list_del_init(&port_priv->port_list);
3086 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3087
3088 destroy_workqueue(port_priv->wq);
3089 destroy_mad_qp(&port_priv->qp_info[1]);
3090 destroy_mad_qp(&port_priv->qp_info[0]);
3091 ib_dereg_mr(port_priv->mr);
3092 ib_dealloc_pd(port_priv->pd);
3093 ib_destroy_cq(port_priv->cq);
3094 cleanup_recv_queue(&port_priv->qp_info[1]);
3095 cleanup_recv_queue(&port_priv->qp_info[0]);
3096 /* XXX: Handle deallocation of MAD registration tables */
3097
3098 kfree(port_priv);
3099
3100 return 0;
3101 }
3102
3103 static void ib_mad_init_device(struct ib_device *device)
3104 {
3105 int start, end, i;
3106
3107 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3108 start = 0;
3109 end = 0;
3110 } else {
3111 start = 1;
3112 end = device->phys_port_cnt;
3113 }
3114
3115 for (i = start; i <= end; i++) {
3116 if (!rdma_cap_ib_mad(device, i))
3117 continue;
3118
3119 if (ib_mad_port_open(device, i)) {
3120 dev_err(&device->dev, "Couldn't open port %d\n", i);
3121 goto error;
3122 }
3123 if (ib_agent_port_open(device, i)) {
3124 dev_err(&device->dev,
3125 "Couldn't open port %d for agents\n", i);
3126 goto error_agent;
3127 }
3128 }
3129 return;
3130
3131 error_agent:
3132 if (ib_mad_port_close(device, i))
3133 dev_err(&device->dev, "Couldn't close port %d\n", i);
3134
3135 error:
3136 while (--i >= start) {
3137 if (!rdma_cap_ib_mad(device, i))
3138 continue;
3139
3140 if (ib_agent_port_close(device, i))
3141 dev_err(&device->dev,
3142 "Couldn't close port %d for agents\n", i);
3143 if (ib_mad_port_close(device, i))
3144 dev_err(&device->dev, "Couldn't close port %d\n", i);
3145 }
3146 }
3147
3148 static void ib_mad_remove_device(struct ib_device *device)
3149 {
3150 int start, end, i;
3151
3152 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3153 start = 0;
3154 end = 0;
3155 } else {
3156 start = 1;
3157 end = device->phys_port_cnt;
3158 }
3159
3160 for (i = start; i <= end; i++) {
3161 if (!rdma_cap_ib_mad(device, i))
3162 continue;
3163
3164 if (ib_agent_port_close(device, i))
3165 dev_err(&device->dev,
3166 "Couldn't close port %d for agents\n", i);
3167 if (ib_mad_port_close(device, i))
3168 dev_err(&device->dev, "Couldn't close port %d\n", i);
3169 }
3170 }
3171
3172 static struct ib_client mad_client = {
3173 .name = "mad",
3174 .add = ib_mad_init_device,
3175 .remove = ib_mad_remove_device
3176 };
3177
3178 static int __init ib_mad_init_module(void)
3179 {
3180 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3181 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3182
3183 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3184 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3185
3186 INIT_LIST_HEAD(&ib_mad_port_list);
3187
3188 if (ib_register_client(&mad_client)) {
3189 pr_err("Couldn't register ib_mad client\n");
3190 return -EINVAL;
3191 }
3192
3193 return 0;
3194 }
3195
3196 static void __exit ib_mad_cleanup_module(void)
3197 {
3198 ib_unregister_client(&mad_client);
3199 }
3200
3201 module_init(ib_mad_init_module);
3202 module_exit(ib_mad_cleanup_module);