]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/infiniband/core/mad.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / core / mad.c
1 /*
2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 */
37
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/security.h>
44 #include <rdma/ib_cache.h>
45
46 #include "mad_priv.h"
47 #include "core_priv.h"
48 #include "mad_rmpp.h"
49 #include "smi.h"
50 #include "opa_smi.h"
51 #include "agent.h"
52 #include "core_priv.h"
53
54 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
55 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
56
57 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
58 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
59 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
60 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
61
62 static struct list_head ib_mad_port_list;
63 static atomic_t ib_mad_client_id = ATOMIC_INIT(0);
64
65 /* Port list lock */
66 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
67
68 /* Forward declarations */
69 static int method_in_use(struct ib_mad_mgmt_method_table **method,
70 struct ib_mad_reg_req *mad_reg_req);
71 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
72 static struct ib_mad_agent_private *find_mad_agent(
73 struct ib_mad_port_private *port_priv,
74 const struct ib_mad_hdr *mad);
75 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
76 struct ib_mad_private *mad);
77 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
78 static void timeout_sends(struct work_struct *work);
79 static void local_completions(struct work_struct *work);
80 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
81 struct ib_mad_agent_private *agent_priv,
82 u8 mgmt_class);
83 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
84 struct ib_mad_agent_private *agent_priv);
85 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
86 struct ib_wc *wc);
87 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
88
89 /*
90 * Returns a ib_mad_port_private structure or NULL for a device/port
91 * Assumes ib_mad_port_list_lock is being held
92 */
93 static inline struct ib_mad_port_private *
94 __ib_get_mad_port(struct ib_device *device, int port_num)
95 {
96 struct ib_mad_port_private *entry;
97
98 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
99 if (entry->device == device && entry->port_num == port_num)
100 return entry;
101 }
102 return NULL;
103 }
104
105 /*
106 * Wrapper function to return a ib_mad_port_private structure or NULL
107 * for a device/port
108 */
109 static inline struct ib_mad_port_private *
110 ib_get_mad_port(struct ib_device *device, int port_num)
111 {
112 struct ib_mad_port_private *entry;
113 unsigned long flags;
114
115 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
116 entry = __ib_get_mad_port(device, port_num);
117 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
118
119 return entry;
120 }
121
122 static inline u8 convert_mgmt_class(u8 mgmt_class)
123 {
124 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
125 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
126 0 : mgmt_class;
127 }
128
129 static int get_spl_qp_index(enum ib_qp_type qp_type)
130 {
131 switch (qp_type)
132 {
133 case IB_QPT_SMI:
134 return 0;
135 case IB_QPT_GSI:
136 return 1;
137 default:
138 return -1;
139 }
140 }
141
142 static int vendor_class_index(u8 mgmt_class)
143 {
144 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
145 }
146
147 static int is_vendor_class(u8 mgmt_class)
148 {
149 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
150 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
151 return 0;
152 return 1;
153 }
154
155 static int is_vendor_oui(char *oui)
156 {
157 if (oui[0] || oui[1] || oui[2])
158 return 1;
159 return 0;
160 }
161
162 static int is_vendor_method_in_use(
163 struct ib_mad_mgmt_vendor_class *vendor_class,
164 struct ib_mad_reg_req *mad_reg_req)
165 {
166 struct ib_mad_mgmt_method_table *method;
167 int i;
168
169 for (i = 0; i < MAX_MGMT_OUI; i++) {
170 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
171 method = vendor_class->method_table[i];
172 if (method) {
173 if (method_in_use(&method, mad_reg_req))
174 return 1;
175 else
176 break;
177 }
178 }
179 }
180 return 0;
181 }
182
183 int ib_response_mad(const struct ib_mad_hdr *hdr)
184 {
185 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
186 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
187 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
188 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
189 }
190 EXPORT_SYMBOL(ib_response_mad);
191
192 /*
193 * ib_register_mad_agent - Register to send/receive MADs
194 */
195 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
196 u8 port_num,
197 enum ib_qp_type qp_type,
198 struct ib_mad_reg_req *mad_reg_req,
199 u8 rmpp_version,
200 ib_mad_send_handler send_handler,
201 ib_mad_recv_handler recv_handler,
202 void *context,
203 u32 registration_flags)
204 {
205 struct ib_mad_port_private *port_priv;
206 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
207 struct ib_mad_agent_private *mad_agent_priv;
208 struct ib_mad_reg_req *reg_req = NULL;
209 struct ib_mad_mgmt_class_table *class;
210 struct ib_mad_mgmt_vendor_class_table *vendor;
211 struct ib_mad_mgmt_vendor_class *vendor_class;
212 struct ib_mad_mgmt_method_table *method;
213 int ret2, qpn;
214 unsigned long flags;
215 u8 mgmt_class, vclass;
216
217 /* Validate parameters */
218 qpn = get_spl_qp_index(qp_type);
219 if (qpn == -1) {
220 dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n",
221 __func__, qp_type);
222 goto error1;
223 }
224
225 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
226 dev_dbg_ratelimited(&device->dev,
227 "%s: invalid RMPP Version %u\n",
228 __func__, rmpp_version);
229 goto error1;
230 }
231
232 /* Validate MAD registration request if supplied */
233 if (mad_reg_req) {
234 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
235 dev_dbg_ratelimited(&device->dev,
236 "%s: invalid Class Version %u\n",
237 __func__,
238 mad_reg_req->mgmt_class_version);
239 goto error1;
240 }
241 if (!recv_handler) {
242 dev_dbg_ratelimited(&device->dev,
243 "%s: no recv_handler\n", __func__);
244 goto error1;
245 }
246 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
247 /*
248 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
249 * one in this range currently allowed
250 */
251 if (mad_reg_req->mgmt_class !=
252 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
253 dev_dbg_ratelimited(&device->dev,
254 "%s: Invalid Mgmt Class 0x%x\n",
255 __func__, mad_reg_req->mgmt_class);
256 goto error1;
257 }
258 } else if (mad_reg_req->mgmt_class == 0) {
259 /*
260 * Class 0 is reserved in IBA and is used for
261 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
262 */
263 dev_dbg_ratelimited(&device->dev,
264 "%s: Invalid Mgmt Class 0\n",
265 __func__);
266 goto error1;
267 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
268 /*
269 * If class is in "new" vendor range,
270 * ensure supplied OUI is not zero
271 */
272 if (!is_vendor_oui(mad_reg_req->oui)) {
273 dev_dbg_ratelimited(&device->dev,
274 "%s: No OUI specified for class 0x%x\n",
275 __func__,
276 mad_reg_req->mgmt_class);
277 goto error1;
278 }
279 }
280 /* Make sure class supplied is consistent with RMPP */
281 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
282 if (rmpp_version) {
283 dev_dbg_ratelimited(&device->dev,
284 "%s: RMPP version for non-RMPP class 0x%x\n",
285 __func__, mad_reg_req->mgmt_class);
286 goto error1;
287 }
288 }
289
290 /* Make sure class supplied is consistent with QP type */
291 if (qp_type == IB_QPT_SMI) {
292 if ((mad_reg_req->mgmt_class !=
293 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
294 (mad_reg_req->mgmt_class !=
295 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
296 dev_dbg_ratelimited(&device->dev,
297 "%s: Invalid SM QP type: class 0x%x\n",
298 __func__, mad_reg_req->mgmt_class);
299 goto error1;
300 }
301 } else {
302 if ((mad_reg_req->mgmt_class ==
303 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
304 (mad_reg_req->mgmt_class ==
305 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
306 dev_dbg_ratelimited(&device->dev,
307 "%s: Invalid GS QP type: class 0x%x\n",
308 __func__, mad_reg_req->mgmt_class);
309 goto error1;
310 }
311 }
312 } else {
313 /* No registration request supplied */
314 if (!send_handler)
315 goto error1;
316 if (registration_flags & IB_MAD_USER_RMPP)
317 goto error1;
318 }
319
320 /* Validate device and port */
321 port_priv = ib_get_mad_port(device, port_num);
322 if (!port_priv) {
323 dev_dbg_ratelimited(&device->dev, "%s: Invalid port %d\n",
324 __func__, port_num);
325 ret = ERR_PTR(-ENODEV);
326 goto error1;
327 }
328
329 /* Verify the QP requested is supported. For example, Ethernet devices
330 * will not have QP0.
331 */
332 if (!port_priv->qp_info[qpn].qp) {
333 dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
334 __func__, qpn);
335 ret = ERR_PTR(-EPROTONOSUPPORT);
336 goto error1;
337 }
338
339 /* Allocate structures */
340 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
341 if (!mad_agent_priv) {
342 ret = ERR_PTR(-ENOMEM);
343 goto error1;
344 }
345
346 if (mad_reg_req) {
347 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
348 if (!reg_req) {
349 ret = ERR_PTR(-ENOMEM);
350 goto error3;
351 }
352 }
353
354 /* Now, fill in the various structures */
355 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
356 mad_agent_priv->reg_req = reg_req;
357 mad_agent_priv->agent.rmpp_version = rmpp_version;
358 mad_agent_priv->agent.device = device;
359 mad_agent_priv->agent.recv_handler = recv_handler;
360 mad_agent_priv->agent.send_handler = send_handler;
361 mad_agent_priv->agent.context = context;
362 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
363 mad_agent_priv->agent.port_num = port_num;
364 mad_agent_priv->agent.flags = registration_flags;
365 spin_lock_init(&mad_agent_priv->lock);
366 INIT_LIST_HEAD(&mad_agent_priv->send_list);
367 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
368 INIT_LIST_HEAD(&mad_agent_priv->done_list);
369 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
370 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
371 INIT_LIST_HEAD(&mad_agent_priv->local_list);
372 INIT_WORK(&mad_agent_priv->local_work, local_completions);
373 atomic_set(&mad_agent_priv->refcount, 1);
374 init_completion(&mad_agent_priv->comp);
375
376 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
377 if (ret2) {
378 ret = ERR_PTR(ret2);
379 goto error4;
380 }
381
382 spin_lock_irqsave(&port_priv->reg_lock, flags);
383 mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id);
384
385 /*
386 * Make sure MAD registration (if supplied)
387 * is non overlapping with any existing ones
388 */
389 if (mad_reg_req) {
390 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
391 if (!is_vendor_class(mgmt_class)) {
392 class = port_priv->version[mad_reg_req->
393 mgmt_class_version].class;
394 if (class) {
395 method = class->method_table[mgmt_class];
396 if (method) {
397 if (method_in_use(&method,
398 mad_reg_req))
399 goto error5;
400 }
401 }
402 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
403 mgmt_class);
404 } else {
405 /* "New" vendor class range */
406 vendor = port_priv->version[mad_reg_req->
407 mgmt_class_version].vendor;
408 if (vendor) {
409 vclass = vendor_class_index(mgmt_class);
410 vendor_class = vendor->vendor_class[vclass];
411 if (vendor_class) {
412 if (is_vendor_method_in_use(
413 vendor_class,
414 mad_reg_req))
415 goto error5;
416 }
417 }
418 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
419 }
420 if (ret2) {
421 ret = ERR_PTR(ret2);
422 goto error5;
423 }
424 }
425
426 /* Add mad agent into port's agent list */
427 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
428 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
429
430 return &mad_agent_priv->agent;
431 error5:
432 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
433 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
434 error4:
435 kfree(reg_req);
436 error3:
437 kfree(mad_agent_priv);
438 error1:
439 return ret;
440 }
441 EXPORT_SYMBOL(ib_register_mad_agent);
442
443 static inline int is_snooping_sends(int mad_snoop_flags)
444 {
445 return (mad_snoop_flags &
446 (/*IB_MAD_SNOOP_POSTED_SENDS |
447 IB_MAD_SNOOP_RMPP_SENDS |*/
448 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
449 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
450 }
451
452 static inline int is_snooping_recvs(int mad_snoop_flags)
453 {
454 return (mad_snoop_flags &
455 (IB_MAD_SNOOP_RECVS /*|
456 IB_MAD_SNOOP_RMPP_RECVS*/));
457 }
458
459 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
460 struct ib_mad_snoop_private *mad_snoop_priv)
461 {
462 struct ib_mad_snoop_private **new_snoop_table;
463 unsigned long flags;
464 int i;
465
466 spin_lock_irqsave(&qp_info->snoop_lock, flags);
467 /* Check for empty slot in array. */
468 for (i = 0; i < qp_info->snoop_table_size; i++)
469 if (!qp_info->snoop_table[i])
470 break;
471
472 if (i == qp_info->snoop_table_size) {
473 /* Grow table. */
474 new_snoop_table = krealloc(qp_info->snoop_table,
475 sizeof mad_snoop_priv *
476 (qp_info->snoop_table_size + 1),
477 GFP_ATOMIC);
478 if (!new_snoop_table) {
479 i = -ENOMEM;
480 goto out;
481 }
482
483 qp_info->snoop_table = new_snoop_table;
484 qp_info->snoop_table_size++;
485 }
486 qp_info->snoop_table[i] = mad_snoop_priv;
487 atomic_inc(&qp_info->snoop_count);
488 out:
489 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
490 return i;
491 }
492
493 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
494 u8 port_num,
495 enum ib_qp_type qp_type,
496 int mad_snoop_flags,
497 ib_mad_snoop_handler snoop_handler,
498 ib_mad_recv_handler recv_handler,
499 void *context)
500 {
501 struct ib_mad_port_private *port_priv;
502 struct ib_mad_agent *ret;
503 struct ib_mad_snoop_private *mad_snoop_priv;
504 int qpn;
505 int err;
506
507 /* Validate parameters */
508 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
509 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
510 ret = ERR_PTR(-EINVAL);
511 goto error1;
512 }
513 qpn = get_spl_qp_index(qp_type);
514 if (qpn == -1) {
515 ret = ERR_PTR(-EINVAL);
516 goto error1;
517 }
518 port_priv = ib_get_mad_port(device, port_num);
519 if (!port_priv) {
520 ret = ERR_PTR(-ENODEV);
521 goto error1;
522 }
523 /* Allocate structures */
524 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
525 if (!mad_snoop_priv) {
526 ret = ERR_PTR(-ENOMEM);
527 goto error1;
528 }
529
530 /* Now, fill in the various structures */
531 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
532 mad_snoop_priv->agent.device = device;
533 mad_snoop_priv->agent.recv_handler = recv_handler;
534 mad_snoop_priv->agent.snoop_handler = snoop_handler;
535 mad_snoop_priv->agent.context = context;
536 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
537 mad_snoop_priv->agent.port_num = port_num;
538 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
539 init_completion(&mad_snoop_priv->comp);
540
541 err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type);
542 if (err) {
543 ret = ERR_PTR(err);
544 goto error2;
545 }
546
547 mad_snoop_priv->snoop_index = register_snoop_agent(
548 &port_priv->qp_info[qpn],
549 mad_snoop_priv);
550 if (mad_snoop_priv->snoop_index < 0) {
551 ret = ERR_PTR(mad_snoop_priv->snoop_index);
552 goto error3;
553 }
554
555 atomic_set(&mad_snoop_priv->refcount, 1);
556 return &mad_snoop_priv->agent;
557 error3:
558 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
559 error2:
560 kfree(mad_snoop_priv);
561 error1:
562 return ret;
563 }
564 EXPORT_SYMBOL(ib_register_mad_snoop);
565
566 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
567 {
568 if (atomic_dec_and_test(&mad_agent_priv->refcount))
569 complete(&mad_agent_priv->comp);
570 }
571
572 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
573 {
574 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
575 complete(&mad_snoop_priv->comp);
576 }
577
578 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
579 {
580 struct ib_mad_port_private *port_priv;
581 unsigned long flags;
582
583 /* Note that we could still be handling received MADs */
584
585 /*
586 * Canceling all sends results in dropping received response
587 * MADs, preventing us from queuing additional work
588 */
589 cancel_mads(mad_agent_priv);
590 port_priv = mad_agent_priv->qp_info->port_priv;
591 cancel_delayed_work(&mad_agent_priv->timed_work);
592
593 spin_lock_irqsave(&port_priv->reg_lock, flags);
594 remove_mad_reg_req(mad_agent_priv);
595 list_del(&mad_agent_priv->agent_list);
596 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
597
598 flush_workqueue(port_priv->wq);
599 ib_cancel_rmpp_recvs(mad_agent_priv);
600
601 deref_mad_agent(mad_agent_priv);
602 wait_for_completion(&mad_agent_priv->comp);
603
604 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
605
606 kfree(mad_agent_priv->reg_req);
607 kfree(mad_agent_priv);
608 }
609
610 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
611 {
612 struct ib_mad_qp_info *qp_info;
613 unsigned long flags;
614
615 qp_info = mad_snoop_priv->qp_info;
616 spin_lock_irqsave(&qp_info->snoop_lock, flags);
617 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
618 atomic_dec(&qp_info->snoop_count);
619 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
620
621 deref_snoop_agent(mad_snoop_priv);
622 wait_for_completion(&mad_snoop_priv->comp);
623
624 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
625
626 kfree(mad_snoop_priv);
627 }
628
629 /*
630 * ib_unregister_mad_agent - Unregisters a client from using MAD services
631 */
632 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
633 {
634 struct ib_mad_agent_private *mad_agent_priv;
635 struct ib_mad_snoop_private *mad_snoop_priv;
636
637 /* If the TID is zero, the agent can only snoop. */
638 if (mad_agent->hi_tid) {
639 mad_agent_priv = container_of(mad_agent,
640 struct ib_mad_agent_private,
641 agent);
642 unregister_mad_agent(mad_agent_priv);
643 } else {
644 mad_snoop_priv = container_of(mad_agent,
645 struct ib_mad_snoop_private,
646 agent);
647 unregister_mad_snoop(mad_snoop_priv);
648 }
649 }
650 EXPORT_SYMBOL(ib_unregister_mad_agent);
651
652 static void dequeue_mad(struct ib_mad_list_head *mad_list)
653 {
654 struct ib_mad_queue *mad_queue;
655 unsigned long flags;
656
657 BUG_ON(!mad_list->mad_queue);
658 mad_queue = mad_list->mad_queue;
659 spin_lock_irqsave(&mad_queue->lock, flags);
660 list_del(&mad_list->list);
661 mad_queue->count--;
662 spin_unlock_irqrestore(&mad_queue->lock, flags);
663 }
664
665 static void snoop_send(struct ib_mad_qp_info *qp_info,
666 struct ib_mad_send_buf *send_buf,
667 struct ib_mad_send_wc *mad_send_wc,
668 int mad_snoop_flags)
669 {
670 struct ib_mad_snoop_private *mad_snoop_priv;
671 unsigned long flags;
672 int i;
673
674 spin_lock_irqsave(&qp_info->snoop_lock, flags);
675 for (i = 0; i < qp_info->snoop_table_size; i++) {
676 mad_snoop_priv = qp_info->snoop_table[i];
677 if (!mad_snoop_priv ||
678 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
679 continue;
680
681 atomic_inc(&mad_snoop_priv->refcount);
682 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
683 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
684 send_buf, mad_send_wc);
685 deref_snoop_agent(mad_snoop_priv);
686 spin_lock_irqsave(&qp_info->snoop_lock, flags);
687 }
688 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
689 }
690
691 static void snoop_recv(struct ib_mad_qp_info *qp_info,
692 struct ib_mad_recv_wc *mad_recv_wc,
693 int mad_snoop_flags)
694 {
695 struct ib_mad_snoop_private *mad_snoop_priv;
696 unsigned long flags;
697 int i;
698
699 spin_lock_irqsave(&qp_info->snoop_lock, flags);
700 for (i = 0; i < qp_info->snoop_table_size; i++) {
701 mad_snoop_priv = qp_info->snoop_table[i];
702 if (!mad_snoop_priv ||
703 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
704 continue;
705
706 atomic_inc(&mad_snoop_priv->refcount);
707 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
708 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
709 mad_recv_wc);
710 deref_snoop_agent(mad_snoop_priv);
711 spin_lock_irqsave(&qp_info->snoop_lock, flags);
712 }
713 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
714 }
715
716 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
717 u16 pkey_index, u8 port_num, struct ib_wc *wc)
718 {
719 memset(wc, 0, sizeof *wc);
720 wc->wr_cqe = cqe;
721 wc->status = IB_WC_SUCCESS;
722 wc->opcode = IB_WC_RECV;
723 wc->pkey_index = pkey_index;
724 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
725 wc->src_qp = IB_QP0;
726 wc->qp = qp;
727 wc->slid = slid;
728 wc->sl = 0;
729 wc->dlid_path_bits = 0;
730 wc->port_num = port_num;
731 }
732
733 static size_t mad_priv_size(const struct ib_mad_private *mp)
734 {
735 return sizeof(struct ib_mad_private) + mp->mad_size;
736 }
737
738 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
739 {
740 size_t size = sizeof(struct ib_mad_private) + mad_size;
741 struct ib_mad_private *ret = kzalloc(size, flags);
742
743 if (ret)
744 ret->mad_size = mad_size;
745
746 return ret;
747 }
748
749 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
750 {
751 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
752 }
753
754 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
755 {
756 return sizeof(struct ib_grh) + mp->mad_size;
757 }
758
759 /*
760 * Return 0 if SMP is to be sent
761 * Return 1 if SMP was consumed locally (whether or not solicited)
762 * Return < 0 if error
763 */
764 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
765 struct ib_mad_send_wr_private *mad_send_wr)
766 {
767 int ret = 0;
768 struct ib_smp *smp = mad_send_wr->send_buf.mad;
769 struct opa_smp *opa_smp = (struct opa_smp *)smp;
770 unsigned long flags;
771 struct ib_mad_local_private *local;
772 struct ib_mad_private *mad_priv;
773 struct ib_mad_port_private *port_priv;
774 struct ib_mad_agent_private *recv_mad_agent = NULL;
775 struct ib_device *device = mad_agent_priv->agent.device;
776 u8 port_num;
777 struct ib_wc mad_wc;
778 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
779 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
780 u16 out_mad_pkey_index = 0;
781 u16 drslid;
782 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
783 mad_agent_priv->qp_info->port_priv->port_num);
784
785 if (rdma_cap_ib_switch(device) &&
786 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
787 port_num = send_wr->port_num;
788 else
789 port_num = mad_agent_priv->agent.port_num;
790
791 /*
792 * Directed route handling starts if the initial LID routed part of
793 * a request or the ending LID routed part of a response is empty.
794 * If we are at the start of the LID routed part, don't update the
795 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
796 */
797 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
798 u32 opa_drslid;
799
800 if ((opa_get_smp_direction(opa_smp)
801 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
802 OPA_LID_PERMISSIVE &&
803 opa_smi_handle_dr_smp_send(opa_smp,
804 rdma_cap_ib_switch(device),
805 port_num) == IB_SMI_DISCARD) {
806 ret = -EINVAL;
807 dev_err(&device->dev, "OPA Invalid directed route\n");
808 goto out;
809 }
810 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
811 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
812 opa_drslid & 0xffff0000) {
813 ret = -EINVAL;
814 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
815 opa_drslid);
816 goto out;
817 }
818 drslid = (u16)(opa_drslid & 0x0000ffff);
819
820 /* Check to post send on QP or process locally */
821 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
822 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
823 goto out;
824 } else {
825 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
826 IB_LID_PERMISSIVE &&
827 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
828 IB_SMI_DISCARD) {
829 ret = -EINVAL;
830 dev_err(&device->dev, "Invalid directed route\n");
831 goto out;
832 }
833 drslid = be16_to_cpu(smp->dr_slid);
834
835 /* Check to post send on QP or process locally */
836 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
837 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
838 goto out;
839 }
840
841 local = kmalloc(sizeof *local, GFP_ATOMIC);
842 if (!local) {
843 ret = -ENOMEM;
844 goto out;
845 }
846 local->mad_priv = NULL;
847 local->recv_mad_agent = NULL;
848 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
849 if (!mad_priv) {
850 ret = -ENOMEM;
851 kfree(local);
852 goto out;
853 }
854
855 build_smp_wc(mad_agent_priv->agent.qp,
856 send_wr->wr.wr_cqe, drslid,
857 send_wr->pkey_index,
858 send_wr->port_num, &mad_wc);
859
860 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
861 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
862 + mad_send_wr->send_buf.data_len
863 + sizeof(struct ib_grh);
864 }
865
866 /* No GRH for DR SMP */
867 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
868 (const struct ib_mad_hdr *)smp, mad_size,
869 (struct ib_mad_hdr *)mad_priv->mad,
870 &mad_size, &out_mad_pkey_index);
871 switch (ret)
872 {
873 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
874 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
875 mad_agent_priv->agent.recv_handler) {
876 local->mad_priv = mad_priv;
877 local->recv_mad_agent = mad_agent_priv;
878 /*
879 * Reference MAD agent until receive
880 * side of local completion handled
881 */
882 atomic_inc(&mad_agent_priv->refcount);
883 } else
884 kfree(mad_priv);
885 break;
886 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
887 kfree(mad_priv);
888 break;
889 case IB_MAD_RESULT_SUCCESS:
890 /* Treat like an incoming receive MAD */
891 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
892 mad_agent_priv->agent.port_num);
893 if (port_priv) {
894 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
895 recv_mad_agent = find_mad_agent(port_priv,
896 (const struct ib_mad_hdr *)mad_priv->mad);
897 }
898 if (!port_priv || !recv_mad_agent) {
899 /*
900 * No receiving agent so drop packet and
901 * generate send completion.
902 */
903 kfree(mad_priv);
904 break;
905 }
906 local->mad_priv = mad_priv;
907 local->recv_mad_agent = recv_mad_agent;
908 break;
909 default:
910 kfree(mad_priv);
911 kfree(local);
912 ret = -EINVAL;
913 goto out;
914 }
915
916 local->mad_send_wr = mad_send_wr;
917 if (opa) {
918 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
919 local->return_wc_byte_len = mad_size;
920 }
921 /* Reference MAD agent until send side of local completion handled */
922 atomic_inc(&mad_agent_priv->refcount);
923 /* Queue local completion to local list */
924 spin_lock_irqsave(&mad_agent_priv->lock, flags);
925 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
926 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
927 queue_work(mad_agent_priv->qp_info->port_priv->wq,
928 &mad_agent_priv->local_work);
929 ret = 1;
930 out:
931 return ret;
932 }
933
934 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
935 {
936 int seg_size, pad;
937
938 seg_size = mad_size - hdr_len;
939 if (data_len && seg_size) {
940 pad = seg_size - data_len % seg_size;
941 return pad == seg_size ? 0 : pad;
942 } else
943 return seg_size;
944 }
945
946 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
947 {
948 struct ib_rmpp_segment *s, *t;
949
950 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
951 list_del(&s->list);
952 kfree(s);
953 }
954 }
955
956 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
957 size_t mad_size, gfp_t gfp_mask)
958 {
959 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
960 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
961 struct ib_rmpp_segment *seg = NULL;
962 int left, seg_size, pad;
963
964 send_buf->seg_size = mad_size - send_buf->hdr_len;
965 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
966 seg_size = send_buf->seg_size;
967 pad = send_wr->pad;
968
969 /* Allocate data segments. */
970 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
971 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
972 if (!seg) {
973 free_send_rmpp_list(send_wr);
974 return -ENOMEM;
975 }
976 seg->num = ++send_buf->seg_count;
977 list_add_tail(&seg->list, &send_wr->rmpp_list);
978 }
979
980 /* Zero any padding */
981 if (pad)
982 memset(seg->data + seg_size - pad, 0, pad);
983
984 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
985 agent.rmpp_version;
986 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
987 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
988
989 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
990 struct ib_rmpp_segment, list);
991 send_wr->last_ack_seg = send_wr->cur_seg;
992 return 0;
993 }
994
995 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
996 {
997 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
998 }
999 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
1000
1001 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
1002 u32 remote_qpn, u16 pkey_index,
1003 int rmpp_active,
1004 int hdr_len, int data_len,
1005 gfp_t gfp_mask,
1006 u8 base_version)
1007 {
1008 struct ib_mad_agent_private *mad_agent_priv;
1009 struct ib_mad_send_wr_private *mad_send_wr;
1010 int pad, message_size, ret, size;
1011 void *buf;
1012 size_t mad_size;
1013 bool opa;
1014
1015 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1016 agent);
1017
1018 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1019
1020 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1021 mad_size = sizeof(struct opa_mad);
1022 else
1023 mad_size = sizeof(struct ib_mad);
1024
1025 pad = get_pad_size(hdr_len, data_len, mad_size);
1026 message_size = hdr_len + data_len + pad;
1027
1028 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1029 if (!rmpp_active && message_size > mad_size)
1030 return ERR_PTR(-EINVAL);
1031 } else
1032 if (rmpp_active || message_size > mad_size)
1033 return ERR_PTR(-EINVAL);
1034
1035 size = rmpp_active ? hdr_len : mad_size;
1036 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1037 if (!buf)
1038 return ERR_PTR(-ENOMEM);
1039
1040 mad_send_wr = buf + size;
1041 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1042 mad_send_wr->send_buf.mad = buf;
1043 mad_send_wr->send_buf.hdr_len = hdr_len;
1044 mad_send_wr->send_buf.data_len = data_len;
1045 mad_send_wr->pad = pad;
1046
1047 mad_send_wr->mad_agent_priv = mad_agent_priv;
1048 mad_send_wr->sg_list[0].length = hdr_len;
1049 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1050
1051 /* OPA MADs don't have to be the full 2048 bytes */
1052 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1053 data_len < mad_size - hdr_len)
1054 mad_send_wr->sg_list[1].length = data_len;
1055 else
1056 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1057
1058 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1059
1060 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1061
1062 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1063 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1064 mad_send_wr->send_wr.wr.num_sge = 2;
1065 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1066 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1067 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1068 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1069 mad_send_wr->send_wr.pkey_index = pkey_index;
1070
1071 if (rmpp_active) {
1072 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1073 if (ret) {
1074 kfree(buf);
1075 return ERR_PTR(ret);
1076 }
1077 }
1078
1079 mad_send_wr->send_buf.mad_agent = mad_agent;
1080 atomic_inc(&mad_agent_priv->refcount);
1081 return &mad_send_wr->send_buf;
1082 }
1083 EXPORT_SYMBOL(ib_create_send_mad);
1084
1085 int ib_get_mad_data_offset(u8 mgmt_class)
1086 {
1087 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1088 return IB_MGMT_SA_HDR;
1089 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1090 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1091 (mgmt_class == IB_MGMT_CLASS_BIS))
1092 return IB_MGMT_DEVICE_HDR;
1093 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1094 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1095 return IB_MGMT_VENDOR_HDR;
1096 else
1097 return IB_MGMT_MAD_HDR;
1098 }
1099 EXPORT_SYMBOL(ib_get_mad_data_offset);
1100
1101 int ib_is_mad_class_rmpp(u8 mgmt_class)
1102 {
1103 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1104 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1105 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1106 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1107 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1108 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1109 return 1;
1110 return 0;
1111 }
1112 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1113
1114 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1115 {
1116 struct ib_mad_send_wr_private *mad_send_wr;
1117 struct list_head *list;
1118
1119 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1120 send_buf);
1121 list = &mad_send_wr->cur_seg->list;
1122
1123 if (mad_send_wr->cur_seg->num < seg_num) {
1124 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1125 if (mad_send_wr->cur_seg->num == seg_num)
1126 break;
1127 } else if (mad_send_wr->cur_seg->num > seg_num) {
1128 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1129 if (mad_send_wr->cur_seg->num == seg_num)
1130 break;
1131 }
1132 return mad_send_wr->cur_seg->data;
1133 }
1134 EXPORT_SYMBOL(ib_get_rmpp_segment);
1135
1136 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1137 {
1138 if (mad_send_wr->send_buf.seg_count)
1139 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1140 mad_send_wr->seg_num);
1141 else
1142 return mad_send_wr->send_buf.mad +
1143 mad_send_wr->send_buf.hdr_len;
1144 }
1145
1146 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1147 {
1148 struct ib_mad_agent_private *mad_agent_priv;
1149 struct ib_mad_send_wr_private *mad_send_wr;
1150
1151 mad_agent_priv = container_of(send_buf->mad_agent,
1152 struct ib_mad_agent_private, agent);
1153 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1154 send_buf);
1155
1156 free_send_rmpp_list(mad_send_wr);
1157 kfree(send_buf->mad);
1158 deref_mad_agent(mad_agent_priv);
1159 }
1160 EXPORT_SYMBOL(ib_free_send_mad);
1161
1162 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1163 {
1164 struct ib_mad_qp_info *qp_info;
1165 struct list_head *list;
1166 struct ib_send_wr *bad_send_wr;
1167 struct ib_mad_agent *mad_agent;
1168 struct ib_sge *sge;
1169 unsigned long flags;
1170 int ret;
1171
1172 /* Set WR ID to find mad_send_wr upon completion */
1173 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1174 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1175 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1176 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1177
1178 mad_agent = mad_send_wr->send_buf.mad_agent;
1179 sge = mad_send_wr->sg_list;
1180 sge[0].addr = ib_dma_map_single(mad_agent->device,
1181 mad_send_wr->send_buf.mad,
1182 sge[0].length,
1183 DMA_TO_DEVICE);
1184 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1185 return -ENOMEM;
1186
1187 mad_send_wr->header_mapping = sge[0].addr;
1188
1189 sge[1].addr = ib_dma_map_single(mad_agent->device,
1190 ib_get_payload(mad_send_wr),
1191 sge[1].length,
1192 DMA_TO_DEVICE);
1193 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1194 ib_dma_unmap_single(mad_agent->device,
1195 mad_send_wr->header_mapping,
1196 sge[0].length, DMA_TO_DEVICE);
1197 return -ENOMEM;
1198 }
1199 mad_send_wr->payload_mapping = sge[1].addr;
1200
1201 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1202 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1203 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1204 &bad_send_wr);
1205 list = &qp_info->send_queue.list;
1206 } else {
1207 ret = 0;
1208 list = &qp_info->overflow_list;
1209 }
1210
1211 if (!ret) {
1212 qp_info->send_queue.count++;
1213 list_add_tail(&mad_send_wr->mad_list.list, list);
1214 }
1215 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1216 if (ret) {
1217 ib_dma_unmap_single(mad_agent->device,
1218 mad_send_wr->header_mapping,
1219 sge[0].length, DMA_TO_DEVICE);
1220 ib_dma_unmap_single(mad_agent->device,
1221 mad_send_wr->payload_mapping,
1222 sge[1].length, DMA_TO_DEVICE);
1223 }
1224 return ret;
1225 }
1226
1227 /*
1228 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1229 * with the registered client
1230 */
1231 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1232 struct ib_mad_send_buf **bad_send_buf)
1233 {
1234 struct ib_mad_agent_private *mad_agent_priv;
1235 struct ib_mad_send_buf *next_send_buf;
1236 struct ib_mad_send_wr_private *mad_send_wr;
1237 unsigned long flags;
1238 int ret = -EINVAL;
1239
1240 /* Walk list of send WRs and post each on send list */
1241 for (; send_buf; send_buf = next_send_buf) {
1242 mad_send_wr = container_of(send_buf,
1243 struct ib_mad_send_wr_private,
1244 send_buf);
1245 mad_agent_priv = mad_send_wr->mad_agent_priv;
1246
1247 ret = ib_mad_enforce_security(mad_agent_priv,
1248 mad_send_wr->send_wr.pkey_index);
1249 if (ret)
1250 goto error;
1251
1252 if (!send_buf->mad_agent->send_handler ||
1253 (send_buf->timeout_ms &&
1254 !send_buf->mad_agent->recv_handler)) {
1255 ret = -EINVAL;
1256 goto error;
1257 }
1258
1259 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1260 if (mad_agent_priv->agent.rmpp_version) {
1261 ret = -EINVAL;
1262 goto error;
1263 }
1264 }
1265
1266 /*
1267 * Save pointer to next work request to post in case the
1268 * current one completes, and the user modifies the work
1269 * request associated with the completion
1270 */
1271 next_send_buf = send_buf->next;
1272 mad_send_wr->send_wr.ah = send_buf->ah;
1273
1274 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1275 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1276 ret = handle_outgoing_dr_smp(mad_agent_priv,
1277 mad_send_wr);
1278 if (ret < 0) /* error */
1279 goto error;
1280 else if (ret == 1) /* locally consumed */
1281 continue;
1282 }
1283
1284 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1285 /* Timeout will be updated after send completes */
1286 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1287 mad_send_wr->max_retries = send_buf->retries;
1288 mad_send_wr->retries_left = send_buf->retries;
1289 send_buf->retries = 0;
1290 /* Reference for work request to QP + response */
1291 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1292 mad_send_wr->status = IB_WC_SUCCESS;
1293
1294 /* Reference MAD agent until send completes */
1295 atomic_inc(&mad_agent_priv->refcount);
1296 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1297 list_add_tail(&mad_send_wr->agent_list,
1298 &mad_agent_priv->send_list);
1299 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1300
1301 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1302 ret = ib_send_rmpp_mad(mad_send_wr);
1303 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1304 ret = ib_send_mad(mad_send_wr);
1305 } else
1306 ret = ib_send_mad(mad_send_wr);
1307 if (ret < 0) {
1308 /* Fail send request */
1309 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1310 list_del(&mad_send_wr->agent_list);
1311 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1312 atomic_dec(&mad_agent_priv->refcount);
1313 goto error;
1314 }
1315 }
1316 return 0;
1317 error:
1318 if (bad_send_buf)
1319 *bad_send_buf = send_buf;
1320 return ret;
1321 }
1322 EXPORT_SYMBOL(ib_post_send_mad);
1323
1324 /*
1325 * ib_free_recv_mad - Returns data buffers used to receive
1326 * a MAD to the access layer
1327 */
1328 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1329 {
1330 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1331 struct ib_mad_private_header *mad_priv_hdr;
1332 struct ib_mad_private *priv;
1333 struct list_head free_list;
1334
1335 INIT_LIST_HEAD(&free_list);
1336 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1337
1338 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1339 &free_list, list) {
1340 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1341 recv_buf);
1342 mad_priv_hdr = container_of(mad_recv_wc,
1343 struct ib_mad_private_header,
1344 recv_wc);
1345 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1346 header);
1347 kfree(priv);
1348 }
1349 }
1350 EXPORT_SYMBOL(ib_free_recv_mad);
1351
1352 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1353 u8 rmpp_version,
1354 ib_mad_send_handler send_handler,
1355 ib_mad_recv_handler recv_handler,
1356 void *context)
1357 {
1358 return ERR_PTR(-EINVAL); /* XXX: for now */
1359 }
1360 EXPORT_SYMBOL(ib_redirect_mad_qp);
1361
1362 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1363 struct ib_wc *wc)
1364 {
1365 dev_err(&mad_agent->device->dev,
1366 "ib_process_mad_wc() not implemented yet\n");
1367 return 0;
1368 }
1369 EXPORT_SYMBOL(ib_process_mad_wc);
1370
1371 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1372 struct ib_mad_reg_req *mad_reg_req)
1373 {
1374 int i;
1375
1376 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1377 if ((*method)->agent[i]) {
1378 pr_err("Method %d already in use\n", i);
1379 return -EINVAL;
1380 }
1381 }
1382 return 0;
1383 }
1384
1385 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1386 {
1387 /* Allocate management method table */
1388 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1389 return (*method) ? 0 : (-ENOMEM);
1390 }
1391
1392 /*
1393 * Check to see if there are any methods still in use
1394 */
1395 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1396 {
1397 int i;
1398
1399 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1400 if (method->agent[i])
1401 return 1;
1402 return 0;
1403 }
1404
1405 /*
1406 * Check to see if there are any method tables for this class still in use
1407 */
1408 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1409 {
1410 int i;
1411
1412 for (i = 0; i < MAX_MGMT_CLASS; i++)
1413 if (class->method_table[i])
1414 return 1;
1415 return 0;
1416 }
1417
1418 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1419 {
1420 int i;
1421
1422 for (i = 0; i < MAX_MGMT_OUI; i++)
1423 if (vendor_class->method_table[i])
1424 return 1;
1425 return 0;
1426 }
1427
1428 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1429 const char *oui)
1430 {
1431 int i;
1432
1433 for (i = 0; i < MAX_MGMT_OUI; i++)
1434 /* Is there matching OUI for this vendor class ? */
1435 if (!memcmp(vendor_class->oui[i], oui, 3))
1436 return i;
1437
1438 return -1;
1439 }
1440
1441 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1442 {
1443 int i;
1444
1445 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1446 if (vendor->vendor_class[i])
1447 return 1;
1448
1449 return 0;
1450 }
1451
1452 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1453 struct ib_mad_agent_private *agent)
1454 {
1455 int i;
1456
1457 /* Remove any methods for this mad agent */
1458 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1459 if (method->agent[i] == agent) {
1460 method->agent[i] = NULL;
1461 }
1462 }
1463 }
1464
1465 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1466 struct ib_mad_agent_private *agent_priv,
1467 u8 mgmt_class)
1468 {
1469 struct ib_mad_port_private *port_priv;
1470 struct ib_mad_mgmt_class_table **class;
1471 struct ib_mad_mgmt_method_table **method;
1472 int i, ret;
1473
1474 port_priv = agent_priv->qp_info->port_priv;
1475 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1476 if (!*class) {
1477 /* Allocate management class table for "new" class version */
1478 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1479 if (!*class) {
1480 ret = -ENOMEM;
1481 goto error1;
1482 }
1483
1484 /* Allocate method table for this management class */
1485 method = &(*class)->method_table[mgmt_class];
1486 if ((ret = allocate_method_table(method)))
1487 goto error2;
1488 } else {
1489 method = &(*class)->method_table[mgmt_class];
1490 if (!*method) {
1491 /* Allocate method table for this management class */
1492 if ((ret = allocate_method_table(method)))
1493 goto error1;
1494 }
1495 }
1496
1497 /* Now, make sure methods are not already in use */
1498 if (method_in_use(method, mad_reg_req))
1499 goto error3;
1500
1501 /* Finally, add in methods being registered */
1502 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1503 (*method)->agent[i] = agent_priv;
1504
1505 return 0;
1506
1507 error3:
1508 /* Remove any methods for this mad agent */
1509 remove_methods_mad_agent(*method, agent_priv);
1510 /* Now, check to see if there are any methods in use */
1511 if (!check_method_table(*method)) {
1512 /* If not, release management method table */
1513 kfree(*method);
1514 *method = NULL;
1515 }
1516 ret = -EINVAL;
1517 goto error1;
1518 error2:
1519 kfree(*class);
1520 *class = NULL;
1521 error1:
1522 return ret;
1523 }
1524
1525 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1526 struct ib_mad_agent_private *agent_priv)
1527 {
1528 struct ib_mad_port_private *port_priv;
1529 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1530 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1531 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1532 struct ib_mad_mgmt_method_table **method;
1533 int i, ret = -ENOMEM;
1534 u8 vclass;
1535
1536 /* "New" vendor (with OUI) class */
1537 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1538 port_priv = agent_priv->qp_info->port_priv;
1539 vendor_table = &port_priv->version[
1540 mad_reg_req->mgmt_class_version].vendor;
1541 if (!*vendor_table) {
1542 /* Allocate mgmt vendor class table for "new" class version */
1543 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1544 if (!vendor)
1545 goto error1;
1546
1547 *vendor_table = vendor;
1548 }
1549 if (!(*vendor_table)->vendor_class[vclass]) {
1550 /* Allocate table for this management vendor class */
1551 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1552 if (!vendor_class)
1553 goto error2;
1554
1555 (*vendor_table)->vendor_class[vclass] = vendor_class;
1556 }
1557 for (i = 0; i < MAX_MGMT_OUI; i++) {
1558 /* Is there matching OUI for this vendor class ? */
1559 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1560 mad_reg_req->oui, 3)) {
1561 method = &(*vendor_table)->vendor_class[
1562 vclass]->method_table[i];
1563 if (!*method)
1564 goto error3;
1565 goto check_in_use;
1566 }
1567 }
1568 for (i = 0; i < MAX_MGMT_OUI; i++) {
1569 /* OUI slot available ? */
1570 if (!is_vendor_oui((*vendor_table)->vendor_class[
1571 vclass]->oui[i])) {
1572 method = &(*vendor_table)->vendor_class[
1573 vclass]->method_table[i];
1574 /* Allocate method table for this OUI */
1575 if (!*method) {
1576 ret = allocate_method_table(method);
1577 if (ret)
1578 goto error3;
1579 }
1580 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1581 mad_reg_req->oui, 3);
1582 goto check_in_use;
1583 }
1584 }
1585 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1586 goto error3;
1587
1588 check_in_use:
1589 /* Now, make sure methods are not already in use */
1590 if (method_in_use(method, mad_reg_req))
1591 goto error4;
1592
1593 /* Finally, add in methods being registered */
1594 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1595 (*method)->agent[i] = agent_priv;
1596
1597 return 0;
1598
1599 error4:
1600 /* Remove any methods for this mad agent */
1601 remove_methods_mad_agent(*method, agent_priv);
1602 /* Now, check to see if there are any methods in use */
1603 if (!check_method_table(*method)) {
1604 /* If not, release management method table */
1605 kfree(*method);
1606 *method = NULL;
1607 }
1608 ret = -EINVAL;
1609 error3:
1610 if (vendor_class) {
1611 (*vendor_table)->vendor_class[vclass] = NULL;
1612 kfree(vendor_class);
1613 }
1614 error2:
1615 if (vendor) {
1616 *vendor_table = NULL;
1617 kfree(vendor);
1618 }
1619 error1:
1620 return ret;
1621 }
1622
1623 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1624 {
1625 struct ib_mad_port_private *port_priv;
1626 struct ib_mad_mgmt_class_table *class;
1627 struct ib_mad_mgmt_method_table *method;
1628 struct ib_mad_mgmt_vendor_class_table *vendor;
1629 struct ib_mad_mgmt_vendor_class *vendor_class;
1630 int index;
1631 u8 mgmt_class;
1632
1633 /*
1634 * Was MAD registration request supplied
1635 * with original registration ?
1636 */
1637 if (!agent_priv->reg_req) {
1638 goto out;
1639 }
1640
1641 port_priv = agent_priv->qp_info->port_priv;
1642 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1643 class = port_priv->version[
1644 agent_priv->reg_req->mgmt_class_version].class;
1645 if (!class)
1646 goto vendor_check;
1647
1648 method = class->method_table[mgmt_class];
1649 if (method) {
1650 /* Remove any methods for this mad agent */
1651 remove_methods_mad_agent(method, agent_priv);
1652 /* Now, check to see if there are any methods still in use */
1653 if (!check_method_table(method)) {
1654 /* If not, release management method table */
1655 kfree(method);
1656 class->method_table[mgmt_class] = NULL;
1657 /* Any management classes left ? */
1658 if (!check_class_table(class)) {
1659 /* If not, release management class table */
1660 kfree(class);
1661 port_priv->version[
1662 agent_priv->reg_req->
1663 mgmt_class_version].class = NULL;
1664 }
1665 }
1666 }
1667
1668 vendor_check:
1669 if (!is_vendor_class(mgmt_class))
1670 goto out;
1671
1672 /* normalize mgmt_class to vendor range 2 */
1673 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1674 vendor = port_priv->version[
1675 agent_priv->reg_req->mgmt_class_version].vendor;
1676
1677 if (!vendor)
1678 goto out;
1679
1680 vendor_class = vendor->vendor_class[mgmt_class];
1681 if (vendor_class) {
1682 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1683 if (index < 0)
1684 goto out;
1685 method = vendor_class->method_table[index];
1686 if (method) {
1687 /* Remove any methods for this mad agent */
1688 remove_methods_mad_agent(method, agent_priv);
1689 /*
1690 * Now, check to see if there are
1691 * any methods still in use
1692 */
1693 if (!check_method_table(method)) {
1694 /* If not, release management method table */
1695 kfree(method);
1696 vendor_class->method_table[index] = NULL;
1697 memset(vendor_class->oui[index], 0, 3);
1698 /* Any OUIs left ? */
1699 if (!check_vendor_class(vendor_class)) {
1700 /* If not, release vendor class table */
1701 kfree(vendor_class);
1702 vendor->vendor_class[mgmt_class] = NULL;
1703 /* Any other vendor classes left ? */
1704 if (!check_vendor_table(vendor)) {
1705 kfree(vendor);
1706 port_priv->version[
1707 agent_priv->reg_req->
1708 mgmt_class_version].
1709 vendor = NULL;
1710 }
1711 }
1712 }
1713 }
1714 }
1715
1716 out:
1717 return;
1718 }
1719
1720 static struct ib_mad_agent_private *
1721 find_mad_agent(struct ib_mad_port_private *port_priv,
1722 const struct ib_mad_hdr *mad_hdr)
1723 {
1724 struct ib_mad_agent_private *mad_agent = NULL;
1725 unsigned long flags;
1726
1727 spin_lock_irqsave(&port_priv->reg_lock, flags);
1728 if (ib_response_mad(mad_hdr)) {
1729 u32 hi_tid;
1730 struct ib_mad_agent_private *entry;
1731
1732 /*
1733 * Routing is based on high 32 bits of transaction ID
1734 * of MAD.
1735 */
1736 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1737 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1738 if (entry->agent.hi_tid == hi_tid) {
1739 mad_agent = entry;
1740 break;
1741 }
1742 }
1743 } else {
1744 struct ib_mad_mgmt_class_table *class;
1745 struct ib_mad_mgmt_method_table *method;
1746 struct ib_mad_mgmt_vendor_class_table *vendor;
1747 struct ib_mad_mgmt_vendor_class *vendor_class;
1748 const struct ib_vendor_mad *vendor_mad;
1749 int index;
1750
1751 /*
1752 * Routing is based on version, class, and method
1753 * For "newer" vendor MADs, also based on OUI
1754 */
1755 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1756 goto out;
1757 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1758 class = port_priv->version[
1759 mad_hdr->class_version].class;
1760 if (!class)
1761 goto out;
1762 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1763 ARRAY_SIZE(class->method_table))
1764 goto out;
1765 method = class->method_table[convert_mgmt_class(
1766 mad_hdr->mgmt_class)];
1767 if (method)
1768 mad_agent = method->agent[mad_hdr->method &
1769 ~IB_MGMT_METHOD_RESP];
1770 } else {
1771 vendor = port_priv->version[
1772 mad_hdr->class_version].vendor;
1773 if (!vendor)
1774 goto out;
1775 vendor_class = vendor->vendor_class[vendor_class_index(
1776 mad_hdr->mgmt_class)];
1777 if (!vendor_class)
1778 goto out;
1779 /* Find matching OUI */
1780 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1781 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1782 if (index == -1)
1783 goto out;
1784 method = vendor_class->method_table[index];
1785 if (method) {
1786 mad_agent = method->agent[mad_hdr->method &
1787 ~IB_MGMT_METHOD_RESP];
1788 }
1789 }
1790 }
1791
1792 if (mad_agent) {
1793 if (mad_agent->agent.recv_handler)
1794 atomic_inc(&mad_agent->refcount);
1795 else {
1796 dev_notice(&port_priv->device->dev,
1797 "No receive handler for client %p on port %d\n",
1798 &mad_agent->agent, port_priv->port_num);
1799 mad_agent = NULL;
1800 }
1801 }
1802 out:
1803 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1804
1805 return mad_agent;
1806 }
1807
1808 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1809 const struct ib_mad_qp_info *qp_info,
1810 bool opa)
1811 {
1812 int valid = 0;
1813 u32 qp_num = qp_info->qp->qp_num;
1814
1815 /* Make sure MAD base version is understood */
1816 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1817 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1818 pr_err("MAD received with unsupported base version %d %s\n",
1819 mad_hdr->base_version, opa ? "(opa)" : "");
1820 goto out;
1821 }
1822
1823 /* Filter SMI packets sent to other than QP0 */
1824 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1825 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1826 if (qp_num == 0)
1827 valid = 1;
1828 } else {
1829 /* CM attributes other than ClassPortInfo only use Send method */
1830 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1831 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1832 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1833 goto out;
1834 /* Filter GSI packets sent to QP0 */
1835 if (qp_num != 0)
1836 valid = 1;
1837 }
1838
1839 out:
1840 return valid;
1841 }
1842
1843 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1844 const struct ib_mad_hdr *mad_hdr)
1845 {
1846 struct ib_rmpp_mad *rmpp_mad;
1847
1848 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1849 return !mad_agent_priv->agent.rmpp_version ||
1850 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1851 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1852 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1853 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1854 }
1855
1856 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1857 const struct ib_mad_recv_wc *rwc)
1858 {
1859 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1860 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1861 }
1862
1863 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1864 const struct ib_mad_send_wr_private *wr,
1865 const struct ib_mad_recv_wc *rwc )
1866 {
1867 struct rdma_ah_attr attr;
1868 u8 send_resp, rcv_resp;
1869 union ib_gid sgid;
1870 struct ib_device *device = mad_agent_priv->agent.device;
1871 u8 port_num = mad_agent_priv->agent.port_num;
1872 u8 lmc;
1873 bool has_grh;
1874
1875 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1876 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1877
1878 if (send_resp == rcv_resp)
1879 /* both requests, or both responses. GIDs different */
1880 return 0;
1881
1882 if (rdma_query_ah(wr->send_buf.ah, &attr))
1883 /* Assume not equal, to avoid false positives. */
1884 return 0;
1885
1886 has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH);
1887 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH))
1888 /* one has GID, other does not. Assume different */
1889 return 0;
1890
1891 if (!send_resp && rcv_resp) {
1892 /* is request/response. */
1893 if (!has_grh) {
1894 if (ib_get_cached_lmc(device, port_num, &lmc))
1895 return 0;
1896 return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
1897 rwc->wc->dlid_path_bits) &
1898 ((1 << lmc) - 1)));
1899 } else {
1900 const struct ib_global_route *grh =
1901 rdma_ah_read_grh(&attr);
1902
1903 if (ib_get_cached_gid(device, port_num,
1904 grh->sgid_index, &sgid, NULL))
1905 return 0;
1906 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1907 16);
1908 }
1909 }
1910
1911 if (!has_grh)
1912 return rdma_ah_get_dlid(&attr) == rwc->wc->slid;
1913 else
1914 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw,
1915 rwc->recv_buf.grh->sgid.raw,
1916 16);
1917 }
1918
1919 static inline int is_direct(u8 class)
1920 {
1921 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1922 }
1923
1924 struct ib_mad_send_wr_private*
1925 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1926 const struct ib_mad_recv_wc *wc)
1927 {
1928 struct ib_mad_send_wr_private *wr;
1929 const struct ib_mad_hdr *mad_hdr;
1930
1931 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1932
1933 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1934 if ((wr->tid == mad_hdr->tid) &&
1935 rcv_has_same_class(wr, wc) &&
1936 /*
1937 * Don't check GID for direct routed MADs.
1938 * These might have permissive LIDs.
1939 */
1940 (is_direct(mad_hdr->mgmt_class) ||
1941 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1942 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1943 }
1944
1945 /*
1946 * It's possible to receive the response before we've
1947 * been notified that the send has completed
1948 */
1949 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1950 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1951 wr->tid == mad_hdr->tid &&
1952 wr->timeout &&
1953 rcv_has_same_class(wr, wc) &&
1954 /*
1955 * Don't check GID for direct routed MADs.
1956 * These might have permissive LIDs.
1957 */
1958 (is_direct(mad_hdr->mgmt_class) ||
1959 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1960 /* Verify request has not been canceled */
1961 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1962 }
1963 return NULL;
1964 }
1965
1966 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1967 {
1968 mad_send_wr->timeout = 0;
1969 if (mad_send_wr->refcount == 1)
1970 list_move_tail(&mad_send_wr->agent_list,
1971 &mad_send_wr->mad_agent_priv->done_list);
1972 }
1973
1974 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1975 struct ib_mad_recv_wc *mad_recv_wc)
1976 {
1977 struct ib_mad_send_wr_private *mad_send_wr;
1978 struct ib_mad_send_wc mad_send_wc;
1979 unsigned long flags;
1980 int ret;
1981
1982 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1983 ret = ib_mad_enforce_security(mad_agent_priv,
1984 mad_recv_wc->wc->pkey_index);
1985 if (ret) {
1986 ib_free_recv_mad(mad_recv_wc);
1987 deref_mad_agent(mad_agent_priv);
1988 return;
1989 }
1990
1991 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1992 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1993 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1994 mad_recv_wc);
1995 if (!mad_recv_wc) {
1996 deref_mad_agent(mad_agent_priv);
1997 return;
1998 }
1999 }
2000
2001 /* Complete corresponding request */
2002 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
2003 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2004 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
2005 if (!mad_send_wr) {
2006 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2007 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
2008 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
2009 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
2010 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
2011 /* user rmpp is in effect
2012 * and this is an active RMPP MAD
2013 */
2014 mad_agent_priv->agent.recv_handler(
2015 &mad_agent_priv->agent, NULL,
2016 mad_recv_wc);
2017 atomic_dec(&mad_agent_priv->refcount);
2018 } else {
2019 /* not user rmpp, revert to normal behavior and
2020 * drop the mad */
2021 ib_free_recv_mad(mad_recv_wc);
2022 deref_mad_agent(mad_agent_priv);
2023 return;
2024 }
2025 } else {
2026 ib_mark_mad_done(mad_send_wr);
2027 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2028
2029 /* Defined behavior is to complete response before request */
2030 mad_agent_priv->agent.recv_handler(
2031 &mad_agent_priv->agent,
2032 &mad_send_wr->send_buf,
2033 mad_recv_wc);
2034 atomic_dec(&mad_agent_priv->refcount);
2035
2036 mad_send_wc.status = IB_WC_SUCCESS;
2037 mad_send_wc.vendor_err = 0;
2038 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2039 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2040 }
2041 } else {
2042 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2043 mad_recv_wc);
2044 deref_mad_agent(mad_agent_priv);
2045 }
2046
2047 return;
2048 }
2049
2050 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2051 const struct ib_mad_qp_info *qp_info,
2052 const struct ib_wc *wc,
2053 int port_num,
2054 struct ib_mad_private *recv,
2055 struct ib_mad_private *response)
2056 {
2057 enum smi_forward_action retsmi;
2058 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2059
2060 if (smi_handle_dr_smp_recv(smp,
2061 rdma_cap_ib_switch(port_priv->device),
2062 port_num,
2063 port_priv->device->phys_port_cnt) ==
2064 IB_SMI_DISCARD)
2065 return IB_SMI_DISCARD;
2066
2067 retsmi = smi_check_forward_dr_smp(smp);
2068 if (retsmi == IB_SMI_LOCAL)
2069 return IB_SMI_HANDLE;
2070
2071 if (retsmi == IB_SMI_SEND) { /* don't forward */
2072 if (smi_handle_dr_smp_send(smp,
2073 rdma_cap_ib_switch(port_priv->device),
2074 port_num) == IB_SMI_DISCARD)
2075 return IB_SMI_DISCARD;
2076
2077 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2078 return IB_SMI_DISCARD;
2079 } else if (rdma_cap_ib_switch(port_priv->device)) {
2080 /* forward case for switches */
2081 memcpy(response, recv, mad_priv_size(response));
2082 response->header.recv_wc.wc = &response->header.wc;
2083 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2084 response->header.recv_wc.recv_buf.grh = &response->grh;
2085
2086 agent_send_response((const struct ib_mad_hdr *)response->mad,
2087 &response->grh, wc,
2088 port_priv->device,
2089 smi_get_fwd_port(smp),
2090 qp_info->qp->qp_num,
2091 response->mad_size,
2092 false);
2093
2094 return IB_SMI_DISCARD;
2095 }
2096 return IB_SMI_HANDLE;
2097 }
2098
2099 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2100 struct ib_mad_private *response,
2101 size_t *resp_len, bool opa)
2102 {
2103 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2104 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2105
2106 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2107 recv_hdr->method == IB_MGMT_METHOD_SET) {
2108 memcpy(response, recv, mad_priv_size(response));
2109 response->header.recv_wc.wc = &response->header.wc;
2110 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2111 response->header.recv_wc.recv_buf.grh = &response->grh;
2112 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2113 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2114 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2115 resp_hdr->status |= IB_SMP_DIRECTION;
2116
2117 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2118 if (recv_hdr->mgmt_class ==
2119 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2120 recv_hdr->mgmt_class ==
2121 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2122 *resp_len = opa_get_smp_header_size(
2123 (struct opa_smp *)recv->mad);
2124 else
2125 *resp_len = sizeof(struct ib_mad_hdr);
2126 }
2127
2128 return true;
2129 } else {
2130 return false;
2131 }
2132 }
2133
2134 static enum smi_action
2135 handle_opa_smi(struct ib_mad_port_private *port_priv,
2136 struct ib_mad_qp_info *qp_info,
2137 struct ib_wc *wc,
2138 int port_num,
2139 struct ib_mad_private *recv,
2140 struct ib_mad_private *response)
2141 {
2142 enum smi_forward_action retsmi;
2143 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2144
2145 if (opa_smi_handle_dr_smp_recv(smp,
2146 rdma_cap_ib_switch(port_priv->device),
2147 port_num,
2148 port_priv->device->phys_port_cnt) ==
2149 IB_SMI_DISCARD)
2150 return IB_SMI_DISCARD;
2151
2152 retsmi = opa_smi_check_forward_dr_smp(smp);
2153 if (retsmi == IB_SMI_LOCAL)
2154 return IB_SMI_HANDLE;
2155
2156 if (retsmi == IB_SMI_SEND) { /* don't forward */
2157 if (opa_smi_handle_dr_smp_send(smp,
2158 rdma_cap_ib_switch(port_priv->device),
2159 port_num) == IB_SMI_DISCARD)
2160 return IB_SMI_DISCARD;
2161
2162 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2163 IB_SMI_DISCARD)
2164 return IB_SMI_DISCARD;
2165
2166 } else if (rdma_cap_ib_switch(port_priv->device)) {
2167 /* forward case for switches */
2168 memcpy(response, recv, mad_priv_size(response));
2169 response->header.recv_wc.wc = &response->header.wc;
2170 response->header.recv_wc.recv_buf.opa_mad =
2171 (struct opa_mad *)response->mad;
2172 response->header.recv_wc.recv_buf.grh = &response->grh;
2173
2174 agent_send_response((const struct ib_mad_hdr *)response->mad,
2175 &response->grh, wc,
2176 port_priv->device,
2177 opa_smi_get_fwd_port(smp),
2178 qp_info->qp->qp_num,
2179 recv->header.wc.byte_len,
2180 true);
2181
2182 return IB_SMI_DISCARD;
2183 }
2184
2185 return IB_SMI_HANDLE;
2186 }
2187
2188 static enum smi_action
2189 handle_smi(struct ib_mad_port_private *port_priv,
2190 struct ib_mad_qp_info *qp_info,
2191 struct ib_wc *wc,
2192 int port_num,
2193 struct ib_mad_private *recv,
2194 struct ib_mad_private *response,
2195 bool opa)
2196 {
2197 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2198
2199 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2200 mad_hdr->class_version == OPA_SM_CLASS_VERSION)
2201 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2202 response);
2203
2204 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2205 }
2206
2207 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2208 {
2209 struct ib_mad_port_private *port_priv = cq->cq_context;
2210 struct ib_mad_list_head *mad_list =
2211 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2212 struct ib_mad_qp_info *qp_info;
2213 struct ib_mad_private_header *mad_priv_hdr;
2214 struct ib_mad_private *recv, *response = NULL;
2215 struct ib_mad_agent_private *mad_agent;
2216 int port_num;
2217 int ret = IB_MAD_RESULT_SUCCESS;
2218 size_t mad_size;
2219 u16 resp_mad_pkey_index = 0;
2220 bool opa;
2221
2222 if (list_empty_careful(&port_priv->port_list))
2223 return;
2224
2225 if (wc->status != IB_WC_SUCCESS) {
2226 /*
2227 * Receive errors indicate that the QP has entered the error
2228 * state - error handling/shutdown code will cleanup
2229 */
2230 return;
2231 }
2232
2233 qp_info = mad_list->mad_queue->qp_info;
2234 dequeue_mad(mad_list);
2235
2236 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2237 qp_info->port_priv->port_num);
2238
2239 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2240 mad_list);
2241 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2242 ib_dma_unmap_single(port_priv->device,
2243 recv->header.mapping,
2244 mad_priv_dma_size(recv),
2245 DMA_FROM_DEVICE);
2246
2247 /* Setup MAD receive work completion from "normal" work completion */
2248 recv->header.wc = *wc;
2249 recv->header.recv_wc.wc = &recv->header.wc;
2250
2251 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2252 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2253 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2254 } else {
2255 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2256 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2257 }
2258
2259 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2260 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2261
2262 if (atomic_read(&qp_info->snoop_count))
2263 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2264
2265 /* Validate MAD */
2266 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2267 goto out;
2268
2269 mad_size = recv->mad_size;
2270 response = alloc_mad_private(mad_size, GFP_KERNEL);
2271 if (!response)
2272 goto out;
2273
2274 if (rdma_cap_ib_switch(port_priv->device))
2275 port_num = wc->port_num;
2276 else
2277 port_num = port_priv->port_num;
2278
2279 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2280 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2281 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2282 response, opa)
2283 == IB_SMI_DISCARD)
2284 goto out;
2285 }
2286
2287 /* Give driver "right of first refusal" on incoming MAD */
2288 if (port_priv->device->process_mad) {
2289 ret = port_priv->device->process_mad(port_priv->device, 0,
2290 port_priv->port_num,
2291 wc, &recv->grh,
2292 (const struct ib_mad_hdr *)recv->mad,
2293 recv->mad_size,
2294 (struct ib_mad_hdr *)response->mad,
2295 &mad_size, &resp_mad_pkey_index);
2296
2297 if (opa)
2298 wc->pkey_index = resp_mad_pkey_index;
2299
2300 if (ret & IB_MAD_RESULT_SUCCESS) {
2301 if (ret & IB_MAD_RESULT_CONSUMED)
2302 goto out;
2303 if (ret & IB_MAD_RESULT_REPLY) {
2304 agent_send_response((const struct ib_mad_hdr *)response->mad,
2305 &recv->grh, wc,
2306 port_priv->device,
2307 port_num,
2308 qp_info->qp->qp_num,
2309 mad_size, opa);
2310 goto out;
2311 }
2312 }
2313 }
2314
2315 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2316 if (mad_agent) {
2317 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2318 /*
2319 * recv is freed up in error cases in ib_mad_complete_recv
2320 * or via recv_handler in ib_mad_complete_recv()
2321 */
2322 recv = NULL;
2323 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2324 generate_unmatched_resp(recv, response, &mad_size, opa)) {
2325 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2326 port_priv->device, port_num,
2327 qp_info->qp->qp_num, mad_size, opa);
2328 }
2329
2330 out:
2331 /* Post another receive request for this QP */
2332 if (response) {
2333 ib_mad_post_receive_mads(qp_info, response);
2334 kfree(recv);
2335 } else
2336 ib_mad_post_receive_mads(qp_info, recv);
2337 }
2338
2339 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2340 {
2341 struct ib_mad_send_wr_private *mad_send_wr;
2342 unsigned long delay;
2343
2344 if (list_empty(&mad_agent_priv->wait_list)) {
2345 cancel_delayed_work(&mad_agent_priv->timed_work);
2346 } else {
2347 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2348 struct ib_mad_send_wr_private,
2349 agent_list);
2350
2351 if (time_after(mad_agent_priv->timeout,
2352 mad_send_wr->timeout)) {
2353 mad_agent_priv->timeout = mad_send_wr->timeout;
2354 delay = mad_send_wr->timeout - jiffies;
2355 if ((long)delay <= 0)
2356 delay = 1;
2357 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2358 &mad_agent_priv->timed_work, delay);
2359 }
2360 }
2361 }
2362
2363 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2364 {
2365 struct ib_mad_agent_private *mad_agent_priv;
2366 struct ib_mad_send_wr_private *temp_mad_send_wr;
2367 struct list_head *list_item;
2368 unsigned long delay;
2369
2370 mad_agent_priv = mad_send_wr->mad_agent_priv;
2371 list_del(&mad_send_wr->agent_list);
2372
2373 delay = mad_send_wr->timeout;
2374 mad_send_wr->timeout += jiffies;
2375
2376 if (delay) {
2377 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2378 temp_mad_send_wr = list_entry(list_item,
2379 struct ib_mad_send_wr_private,
2380 agent_list);
2381 if (time_after(mad_send_wr->timeout,
2382 temp_mad_send_wr->timeout))
2383 break;
2384 }
2385 }
2386 else
2387 list_item = &mad_agent_priv->wait_list;
2388 list_add(&mad_send_wr->agent_list, list_item);
2389
2390 /* Reschedule a work item if we have a shorter timeout */
2391 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2392 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2393 &mad_agent_priv->timed_work, delay);
2394 }
2395
2396 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2397 int timeout_ms)
2398 {
2399 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2400 wait_for_response(mad_send_wr);
2401 }
2402
2403 /*
2404 * Process a send work completion
2405 */
2406 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2407 struct ib_mad_send_wc *mad_send_wc)
2408 {
2409 struct ib_mad_agent_private *mad_agent_priv;
2410 unsigned long flags;
2411 int ret;
2412
2413 mad_agent_priv = mad_send_wr->mad_agent_priv;
2414 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2415 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2416 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2417 if (ret == IB_RMPP_RESULT_CONSUMED)
2418 goto done;
2419 } else
2420 ret = IB_RMPP_RESULT_UNHANDLED;
2421
2422 if (mad_send_wc->status != IB_WC_SUCCESS &&
2423 mad_send_wr->status == IB_WC_SUCCESS) {
2424 mad_send_wr->status = mad_send_wc->status;
2425 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2426 }
2427
2428 if (--mad_send_wr->refcount > 0) {
2429 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2430 mad_send_wr->status == IB_WC_SUCCESS) {
2431 wait_for_response(mad_send_wr);
2432 }
2433 goto done;
2434 }
2435
2436 /* Remove send from MAD agent and notify client of completion */
2437 list_del(&mad_send_wr->agent_list);
2438 adjust_timeout(mad_agent_priv);
2439 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2440
2441 if (mad_send_wr->status != IB_WC_SUCCESS )
2442 mad_send_wc->status = mad_send_wr->status;
2443 if (ret == IB_RMPP_RESULT_INTERNAL)
2444 ib_rmpp_send_handler(mad_send_wc);
2445 else
2446 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2447 mad_send_wc);
2448
2449 /* Release reference on agent taken when sending */
2450 deref_mad_agent(mad_agent_priv);
2451 return;
2452 done:
2453 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2454 }
2455
2456 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2457 {
2458 struct ib_mad_port_private *port_priv = cq->cq_context;
2459 struct ib_mad_list_head *mad_list =
2460 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2461 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2462 struct ib_mad_qp_info *qp_info;
2463 struct ib_mad_queue *send_queue;
2464 struct ib_send_wr *bad_send_wr;
2465 struct ib_mad_send_wc mad_send_wc;
2466 unsigned long flags;
2467 int ret;
2468
2469 if (list_empty_careful(&port_priv->port_list))
2470 return;
2471
2472 if (wc->status != IB_WC_SUCCESS) {
2473 if (!ib_mad_send_error(port_priv, wc))
2474 return;
2475 }
2476
2477 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2478 mad_list);
2479 send_queue = mad_list->mad_queue;
2480 qp_info = send_queue->qp_info;
2481
2482 retry:
2483 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2484 mad_send_wr->header_mapping,
2485 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2486 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2487 mad_send_wr->payload_mapping,
2488 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2489 queued_send_wr = NULL;
2490 spin_lock_irqsave(&send_queue->lock, flags);
2491 list_del(&mad_list->list);
2492
2493 /* Move queued send to the send queue */
2494 if (send_queue->count-- > send_queue->max_active) {
2495 mad_list = container_of(qp_info->overflow_list.next,
2496 struct ib_mad_list_head, list);
2497 queued_send_wr = container_of(mad_list,
2498 struct ib_mad_send_wr_private,
2499 mad_list);
2500 list_move_tail(&mad_list->list, &send_queue->list);
2501 }
2502 spin_unlock_irqrestore(&send_queue->lock, flags);
2503
2504 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2505 mad_send_wc.status = wc->status;
2506 mad_send_wc.vendor_err = wc->vendor_err;
2507 if (atomic_read(&qp_info->snoop_count))
2508 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2509 IB_MAD_SNOOP_SEND_COMPLETIONS);
2510 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2511
2512 if (queued_send_wr) {
2513 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2514 &bad_send_wr);
2515 if (ret) {
2516 dev_err(&port_priv->device->dev,
2517 "ib_post_send failed: %d\n", ret);
2518 mad_send_wr = queued_send_wr;
2519 wc->status = IB_WC_LOC_QP_OP_ERR;
2520 goto retry;
2521 }
2522 }
2523 }
2524
2525 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2526 {
2527 struct ib_mad_send_wr_private *mad_send_wr;
2528 struct ib_mad_list_head *mad_list;
2529 unsigned long flags;
2530
2531 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2532 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2533 mad_send_wr = container_of(mad_list,
2534 struct ib_mad_send_wr_private,
2535 mad_list);
2536 mad_send_wr->retry = 1;
2537 }
2538 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2539 }
2540
2541 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2542 struct ib_wc *wc)
2543 {
2544 struct ib_mad_list_head *mad_list =
2545 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2546 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2547 struct ib_mad_send_wr_private *mad_send_wr;
2548 int ret;
2549
2550 /*
2551 * Send errors will transition the QP to SQE - move
2552 * QP to RTS and repost flushed work requests
2553 */
2554 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2555 mad_list);
2556 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2557 if (mad_send_wr->retry) {
2558 /* Repost send */
2559 struct ib_send_wr *bad_send_wr;
2560
2561 mad_send_wr->retry = 0;
2562 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2563 &bad_send_wr);
2564 if (!ret)
2565 return false;
2566 }
2567 } else {
2568 struct ib_qp_attr *attr;
2569
2570 /* Transition QP to RTS and fail offending send */
2571 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2572 if (attr) {
2573 attr->qp_state = IB_QPS_RTS;
2574 attr->cur_qp_state = IB_QPS_SQE;
2575 ret = ib_modify_qp(qp_info->qp, attr,
2576 IB_QP_STATE | IB_QP_CUR_STATE);
2577 kfree(attr);
2578 if (ret)
2579 dev_err(&port_priv->device->dev,
2580 "%s - ib_modify_qp to RTS: %d\n",
2581 __func__, ret);
2582 else
2583 mark_sends_for_retry(qp_info);
2584 }
2585 }
2586
2587 return true;
2588 }
2589
2590 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2591 {
2592 unsigned long flags;
2593 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2594 struct ib_mad_send_wc mad_send_wc;
2595 struct list_head cancel_list;
2596
2597 INIT_LIST_HEAD(&cancel_list);
2598
2599 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2600 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2601 &mad_agent_priv->send_list, agent_list) {
2602 if (mad_send_wr->status == IB_WC_SUCCESS) {
2603 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2604 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2605 }
2606 }
2607
2608 /* Empty wait list to prevent receives from finding a request */
2609 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2610 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2611
2612 /* Report all cancelled requests */
2613 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2614 mad_send_wc.vendor_err = 0;
2615
2616 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2617 &cancel_list, agent_list) {
2618 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2619 list_del(&mad_send_wr->agent_list);
2620 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2621 &mad_send_wc);
2622 atomic_dec(&mad_agent_priv->refcount);
2623 }
2624 }
2625
2626 static struct ib_mad_send_wr_private*
2627 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2628 struct ib_mad_send_buf *send_buf)
2629 {
2630 struct ib_mad_send_wr_private *mad_send_wr;
2631
2632 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2633 agent_list) {
2634 if (&mad_send_wr->send_buf == send_buf)
2635 return mad_send_wr;
2636 }
2637
2638 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2639 agent_list) {
2640 if (is_rmpp_data_mad(mad_agent_priv,
2641 mad_send_wr->send_buf.mad) &&
2642 &mad_send_wr->send_buf == send_buf)
2643 return mad_send_wr;
2644 }
2645 return NULL;
2646 }
2647
2648 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2649 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2650 {
2651 struct ib_mad_agent_private *mad_agent_priv;
2652 struct ib_mad_send_wr_private *mad_send_wr;
2653 unsigned long flags;
2654 int active;
2655
2656 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2657 agent);
2658 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2659 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2660 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2661 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2662 return -EINVAL;
2663 }
2664
2665 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2666 if (!timeout_ms) {
2667 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2668 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2669 }
2670
2671 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2672 if (active)
2673 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2674 else
2675 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2676
2677 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2678 return 0;
2679 }
2680 EXPORT_SYMBOL(ib_modify_mad);
2681
2682 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2683 struct ib_mad_send_buf *send_buf)
2684 {
2685 ib_modify_mad(mad_agent, send_buf, 0);
2686 }
2687 EXPORT_SYMBOL(ib_cancel_mad);
2688
2689 static void local_completions(struct work_struct *work)
2690 {
2691 struct ib_mad_agent_private *mad_agent_priv;
2692 struct ib_mad_local_private *local;
2693 struct ib_mad_agent_private *recv_mad_agent;
2694 unsigned long flags;
2695 int free_mad;
2696 struct ib_wc wc;
2697 struct ib_mad_send_wc mad_send_wc;
2698 bool opa;
2699
2700 mad_agent_priv =
2701 container_of(work, struct ib_mad_agent_private, local_work);
2702
2703 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2704 mad_agent_priv->qp_info->port_priv->port_num);
2705
2706 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2707 while (!list_empty(&mad_agent_priv->local_list)) {
2708 local = list_entry(mad_agent_priv->local_list.next,
2709 struct ib_mad_local_private,
2710 completion_list);
2711 list_del(&local->completion_list);
2712 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2713 free_mad = 0;
2714 if (local->mad_priv) {
2715 u8 base_version;
2716 recv_mad_agent = local->recv_mad_agent;
2717 if (!recv_mad_agent) {
2718 dev_err(&mad_agent_priv->agent.device->dev,
2719 "No receive MAD agent for local completion\n");
2720 free_mad = 1;
2721 goto local_send_completion;
2722 }
2723
2724 /*
2725 * Defined behavior is to complete response
2726 * before request
2727 */
2728 build_smp_wc(recv_mad_agent->agent.qp,
2729 local->mad_send_wr->send_wr.wr.wr_cqe,
2730 be16_to_cpu(IB_LID_PERMISSIVE),
2731 local->mad_send_wr->send_wr.pkey_index,
2732 recv_mad_agent->agent.port_num, &wc);
2733
2734 local->mad_priv->header.recv_wc.wc = &wc;
2735
2736 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2737 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2738 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2739 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2740 } else {
2741 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2742 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2743 }
2744
2745 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2746 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2747 &local->mad_priv->header.recv_wc.rmpp_list);
2748 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2749 local->mad_priv->header.recv_wc.recv_buf.mad =
2750 (struct ib_mad *)local->mad_priv->mad;
2751 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2752 snoop_recv(recv_mad_agent->qp_info,
2753 &local->mad_priv->header.recv_wc,
2754 IB_MAD_SNOOP_RECVS);
2755 recv_mad_agent->agent.recv_handler(
2756 &recv_mad_agent->agent,
2757 &local->mad_send_wr->send_buf,
2758 &local->mad_priv->header.recv_wc);
2759 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2760 atomic_dec(&recv_mad_agent->refcount);
2761 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2762 }
2763
2764 local_send_completion:
2765 /* Complete send */
2766 mad_send_wc.status = IB_WC_SUCCESS;
2767 mad_send_wc.vendor_err = 0;
2768 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2769 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2770 snoop_send(mad_agent_priv->qp_info,
2771 &local->mad_send_wr->send_buf,
2772 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2773 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2774 &mad_send_wc);
2775
2776 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2777 atomic_dec(&mad_agent_priv->refcount);
2778 if (free_mad)
2779 kfree(local->mad_priv);
2780 kfree(local);
2781 }
2782 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2783 }
2784
2785 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2786 {
2787 int ret;
2788
2789 if (!mad_send_wr->retries_left)
2790 return -ETIMEDOUT;
2791
2792 mad_send_wr->retries_left--;
2793 mad_send_wr->send_buf.retries++;
2794
2795 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2796
2797 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2798 ret = ib_retry_rmpp(mad_send_wr);
2799 switch (ret) {
2800 case IB_RMPP_RESULT_UNHANDLED:
2801 ret = ib_send_mad(mad_send_wr);
2802 break;
2803 case IB_RMPP_RESULT_CONSUMED:
2804 ret = 0;
2805 break;
2806 default:
2807 ret = -ECOMM;
2808 break;
2809 }
2810 } else
2811 ret = ib_send_mad(mad_send_wr);
2812
2813 if (!ret) {
2814 mad_send_wr->refcount++;
2815 list_add_tail(&mad_send_wr->agent_list,
2816 &mad_send_wr->mad_agent_priv->send_list);
2817 }
2818 return ret;
2819 }
2820
2821 static void timeout_sends(struct work_struct *work)
2822 {
2823 struct ib_mad_agent_private *mad_agent_priv;
2824 struct ib_mad_send_wr_private *mad_send_wr;
2825 struct ib_mad_send_wc mad_send_wc;
2826 unsigned long flags, delay;
2827
2828 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2829 timed_work.work);
2830 mad_send_wc.vendor_err = 0;
2831
2832 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2833 while (!list_empty(&mad_agent_priv->wait_list)) {
2834 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2835 struct ib_mad_send_wr_private,
2836 agent_list);
2837
2838 if (time_after(mad_send_wr->timeout, jiffies)) {
2839 delay = mad_send_wr->timeout - jiffies;
2840 if ((long)delay <= 0)
2841 delay = 1;
2842 queue_delayed_work(mad_agent_priv->qp_info->
2843 port_priv->wq,
2844 &mad_agent_priv->timed_work, delay);
2845 break;
2846 }
2847
2848 list_del(&mad_send_wr->agent_list);
2849 if (mad_send_wr->status == IB_WC_SUCCESS &&
2850 !retry_send(mad_send_wr))
2851 continue;
2852
2853 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2854
2855 if (mad_send_wr->status == IB_WC_SUCCESS)
2856 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2857 else
2858 mad_send_wc.status = mad_send_wr->status;
2859 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2860 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2861 &mad_send_wc);
2862
2863 atomic_dec(&mad_agent_priv->refcount);
2864 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2865 }
2866 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2867 }
2868
2869 /*
2870 * Allocate receive MADs and post receive WRs for them
2871 */
2872 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2873 struct ib_mad_private *mad)
2874 {
2875 unsigned long flags;
2876 int post, ret;
2877 struct ib_mad_private *mad_priv;
2878 struct ib_sge sg_list;
2879 struct ib_recv_wr recv_wr, *bad_recv_wr;
2880 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2881
2882 /* Initialize common scatter list fields */
2883 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2884
2885 /* Initialize common receive WR fields */
2886 recv_wr.next = NULL;
2887 recv_wr.sg_list = &sg_list;
2888 recv_wr.num_sge = 1;
2889
2890 do {
2891 /* Allocate and map receive buffer */
2892 if (mad) {
2893 mad_priv = mad;
2894 mad = NULL;
2895 } else {
2896 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2897 GFP_ATOMIC);
2898 if (!mad_priv) {
2899 ret = -ENOMEM;
2900 break;
2901 }
2902 }
2903 sg_list.length = mad_priv_dma_size(mad_priv);
2904 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2905 &mad_priv->grh,
2906 mad_priv_dma_size(mad_priv),
2907 DMA_FROM_DEVICE);
2908 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2909 sg_list.addr))) {
2910 ret = -ENOMEM;
2911 break;
2912 }
2913 mad_priv->header.mapping = sg_list.addr;
2914 mad_priv->header.mad_list.mad_queue = recv_queue;
2915 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2916 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2917
2918 /* Post receive WR */
2919 spin_lock_irqsave(&recv_queue->lock, flags);
2920 post = (++recv_queue->count < recv_queue->max_active);
2921 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2922 spin_unlock_irqrestore(&recv_queue->lock, flags);
2923 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2924 if (ret) {
2925 spin_lock_irqsave(&recv_queue->lock, flags);
2926 list_del(&mad_priv->header.mad_list.list);
2927 recv_queue->count--;
2928 spin_unlock_irqrestore(&recv_queue->lock, flags);
2929 ib_dma_unmap_single(qp_info->port_priv->device,
2930 mad_priv->header.mapping,
2931 mad_priv_dma_size(mad_priv),
2932 DMA_FROM_DEVICE);
2933 kfree(mad_priv);
2934 dev_err(&qp_info->port_priv->device->dev,
2935 "ib_post_recv failed: %d\n", ret);
2936 break;
2937 }
2938 } while (post);
2939
2940 return ret;
2941 }
2942
2943 /*
2944 * Return all the posted receive MADs
2945 */
2946 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2947 {
2948 struct ib_mad_private_header *mad_priv_hdr;
2949 struct ib_mad_private *recv;
2950 struct ib_mad_list_head *mad_list;
2951
2952 if (!qp_info->qp)
2953 return;
2954
2955 while (!list_empty(&qp_info->recv_queue.list)) {
2956
2957 mad_list = list_entry(qp_info->recv_queue.list.next,
2958 struct ib_mad_list_head, list);
2959 mad_priv_hdr = container_of(mad_list,
2960 struct ib_mad_private_header,
2961 mad_list);
2962 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2963 header);
2964
2965 /* Remove from posted receive MAD list */
2966 list_del(&mad_list->list);
2967
2968 ib_dma_unmap_single(qp_info->port_priv->device,
2969 recv->header.mapping,
2970 mad_priv_dma_size(recv),
2971 DMA_FROM_DEVICE);
2972 kfree(recv);
2973 }
2974
2975 qp_info->recv_queue.count = 0;
2976 }
2977
2978 /*
2979 * Start the port
2980 */
2981 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2982 {
2983 int ret, i;
2984 struct ib_qp_attr *attr;
2985 struct ib_qp *qp;
2986 u16 pkey_index;
2987
2988 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2989 if (!attr)
2990 return -ENOMEM;
2991
2992 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2993 IB_DEFAULT_PKEY_FULL, &pkey_index);
2994 if (ret)
2995 pkey_index = 0;
2996
2997 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2998 qp = port_priv->qp_info[i].qp;
2999 if (!qp)
3000 continue;
3001
3002 /*
3003 * PKey index for QP1 is irrelevant but
3004 * one is needed for the Reset to Init transition
3005 */
3006 attr->qp_state = IB_QPS_INIT;
3007 attr->pkey_index = pkey_index;
3008 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
3009 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
3010 IB_QP_PKEY_INDEX | IB_QP_QKEY);
3011 if (ret) {
3012 dev_err(&port_priv->device->dev,
3013 "Couldn't change QP%d state to INIT: %d\n",
3014 i, ret);
3015 goto out;
3016 }
3017
3018 attr->qp_state = IB_QPS_RTR;
3019 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3020 if (ret) {
3021 dev_err(&port_priv->device->dev,
3022 "Couldn't change QP%d state to RTR: %d\n",
3023 i, ret);
3024 goto out;
3025 }
3026
3027 attr->qp_state = IB_QPS_RTS;
3028 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3029 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3030 if (ret) {
3031 dev_err(&port_priv->device->dev,
3032 "Couldn't change QP%d state to RTS: %d\n",
3033 i, ret);
3034 goto out;
3035 }
3036 }
3037
3038 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3039 if (ret) {
3040 dev_err(&port_priv->device->dev,
3041 "Failed to request completion notification: %d\n",
3042 ret);
3043 goto out;
3044 }
3045
3046 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3047 if (!port_priv->qp_info[i].qp)
3048 continue;
3049
3050 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3051 if (ret) {
3052 dev_err(&port_priv->device->dev,
3053 "Couldn't post receive WRs\n");
3054 goto out;
3055 }
3056 }
3057 out:
3058 kfree(attr);
3059 return ret;
3060 }
3061
3062 static void qp_event_handler(struct ib_event *event, void *qp_context)
3063 {
3064 struct ib_mad_qp_info *qp_info = qp_context;
3065
3066 /* It's worse than that! He's dead, Jim! */
3067 dev_err(&qp_info->port_priv->device->dev,
3068 "Fatal error (%d) on MAD QP (%d)\n",
3069 event->event, qp_info->qp->qp_num);
3070 }
3071
3072 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3073 struct ib_mad_queue *mad_queue)
3074 {
3075 mad_queue->qp_info = qp_info;
3076 mad_queue->count = 0;
3077 spin_lock_init(&mad_queue->lock);
3078 INIT_LIST_HEAD(&mad_queue->list);
3079 }
3080
3081 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3082 struct ib_mad_qp_info *qp_info)
3083 {
3084 qp_info->port_priv = port_priv;
3085 init_mad_queue(qp_info, &qp_info->send_queue);
3086 init_mad_queue(qp_info, &qp_info->recv_queue);
3087 INIT_LIST_HEAD(&qp_info->overflow_list);
3088 spin_lock_init(&qp_info->snoop_lock);
3089 qp_info->snoop_table = NULL;
3090 qp_info->snoop_table_size = 0;
3091 atomic_set(&qp_info->snoop_count, 0);
3092 }
3093
3094 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3095 enum ib_qp_type qp_type)
3096 {
3097 struct ib_qp_init_attr qp_init_attr;
3098 int ret;
3099
3100 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3101 qp_init_attr.send_cq = qp_info->port_priv->cq;
3102 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3103 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3104 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3105 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3106 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3107 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3108 qp_init_attr.qp_type = qp_type;
3109 qp_init_attr.port_num = qp_info->port_priv->port_num;
3110 qp_init_attr.qp_context = qp_info;
3111 qp_init_attr.event_handler = qp_event_handler;
3112 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3113 if (IS_ERR(qp_info->qp)) {
3114 dev_err(&qp_info->port_priv->device->dev,
3115 "Couldn't create ib_mad QP%d\n",
3116 get_spl_qp_index(qp_type));
3117 ret = PTR_ERR(qp_info->qp);
3118 goto error;
3119 }
3120 /* Use minimum queue sizes unless the CQ is resized */
3121 qp_info->send_queue.max_active = mad_sendq_size;
3122 qp_info->recv_queue.max_active = mad_recvq_size;
3123 return 0;
3124
3125 error:
3126 return ret;
3127 }
3128
3129 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3130 {
3131 if (!qp_info->qp)
3132 return;
3133
3134 ib_destroy_qp(qp_info->qp);
3135 kfree(qp_info->snoop_table);
3136 }
3137
3138 /*
3139 * Open the port
3140 * Create the QP, PD, MR, and CQ if needed
3141 */
3142 static int ib_mad_port_open(struct ib_device *device,
3143 int port_num)
3144 {
3145 int ret, cq_size;
3146 struct ib_mad_port_private *port_priv;
3147 unsigned long flags;
3148 char name[sizeof "ib_mad123"];
3149 int has_smi;
3150
3151 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3152 return -EFAULT;
3153
3154 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3155 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3156 return -EFAULT;
3157
3158 /* Create new device info */
3159 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3160 if (!port_priv)
3161 return -ENOMEM;
3162
3163 port_priv->device = device;
3164 port_priv->port_num = port_num;
3165 spin_lock_init(&port_priv->reg_lock);
3166 INIT_LIST_HEAD(&port_priv->agent_list);
3167 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3168 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3169
3170 cq_size = mad_sendq_size + mad_recvq_size;
3171 has_smi = rdma_cap_ib_smi(device, port_num);
3172 if (has_smi)
3173 cq_size *= 2;
3174
3175 port_priv->pd = ib_alloc_pd(device, 0);
3176 if (IS_ERR(port_priv->pd)) {
3177 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3178 ret = PTR_ERR(port_priv->pd);
3179 goto error3;
3180 }
3181
3182 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3183 IB_POLL_UNBOUND_WORKQUEUE);
3184 if (IS_ERR(port_priv->cq)) {
3185 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3186 ret = PTR_ERR(port_priv->cq);
3187 goto error4;
3188 }
3189
3190 if (has_smi) {
3191 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3192 if (ret)
3193 goto error6;
3194 }
3195 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3196 if (ret)
3197 goto error7;
3198
3199 snprintf(name, sizeof name, "ib_mad%d", port_num);
3200 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3201 if (!port_priv->wq) {
3202 ret = -ENOMEM;
3203 goto error8;
3204 }
3205
3206 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3207 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3208 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3209
3210 ret = ib_mad_port_start(port_priv);
3211 if (ret) {
3212 dev_err(&device->dev, "Couldn't start port\n");
3213 goto error9;
3214 }
3215
3216 return 0;
3217
3218 error9:
3219 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3220 list_del_init(&port_priv->port_list);
3221 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3222
3223 destroy_workqueue(port_priv->wq);
3224 error8:
3225 destroy_mad_qp(&port_priv->qp_info[1]);
3226 error7:
3227 destroy_mad_qp(&port_priv->qp_info[0]);
3228 error6:
3229 ib_free_cq(port_priv->cq);
3230 cleanup_recv_queue(&port_priv->qp_info[1]);
3231 cleanup_recv_queue(&port_priv->qp_info[0]);
3232 error4:
3233 ib_dealloc_pd(port_priv->pd);
3234 error3:
3235 kfree(port_priv);
3236
3237 return ret;
3238 }
3239
3240 /*
3241 * Close the port
3242 * If there are no classes using the port, free the port
3243 * resources (CQ, MR, PD, QP) and remove the port's info structure
3244 */
3245 static int ib_mad_port_close(struct ib_device *device, int port_num)
3246 {
3247 struct ib_mad_port_private *port_priv;
3248 unsigned long flags;
3249
3250 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3251 port_priv = __ib_get_mad_port(device, port_num);
3252 if (port_priv == NULL) {
3253 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3254 dev_err(&device->dev, "Port %d not found\n", port_num);
3255 return -ENODEV;
3256 }
3257 list_del_init(&port_priv->port_list);
3258 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3259
3260 destroy_workqueue(port_priv->wq);
3261 destroy_mad_qp(&port_priv->qp_info[1]);
3262 destroy_mad_qp(&port_priv->qp_info[0]);
3263 ib_free_cq(port_priv->cq);
3264 ib_dealloc_pd(port_priv->pd);
3265 cleanup_recv_queue(&port_priv->qp_info[1]);
3266 cleanup_recv_queue(&port_priv->qp_info[0]);
3267 /* XXX: Handle deallocation of MAD registration tables */
3268
3269 kfree(port_priv);
3270
3271 return 0;
3272 }
3273
3274 static void ib_mad_init_device(struct ib_device *device)
3275 {
3276 int start, i;
3277
3278 start = rdma_start_port(device);
3279
3280 for (i = start; i <= rdma_end_port(device); i++) {
3281 if (!rdma_cap_ib_mad(device, i))
3282 continue;
3283
3284 if (ib_mad_port_open(device, i)) {
3285 dev_err(&device->dev, "Couldn't open port %d\n", i);
3286 goto error;
3287 }
3288 if (ib_agent_port_open(device, i)) {
3289 dev_err(&device->dev,
3290 "Couldn't open port %d for agents\n", i);
3291 goto error_agent;
3292 }
3293 }
3294 return;
3295
3296 error_agent:
3297 if (ib_mad_port_close(device, i))
3298 dev_err(&device->dev, "Couldn't close port %d\n", i);
3299
3300 error:
3301 while (--i >= start) {
3302 if (!rdma_cap_ib_mad(device, i))
3303 continue;
3304
3305 if (ib_agent_port_close(device, i))
3306 dev_err(&device->dev,
3307 "Couldn't close port %d for agents\n", i);
3308 if (ib_mad_port_close(device, i))
3309 dev_err(&device->dev, "Couldn't close port %d\n", i);
3310 }
3311 }
3312
3313 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3314 {
3315 int i;
3316
3317 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3318 if (!rdma_cap_ib_mad(device, i))
3319 continue;
3320
3321 if (ib_agent_port_close(device, i))
3322 dev_err(&device->dev,
3323 "Couldn't close port %d for agents\n", i);
3324 if (ib_mad_port_close(device, i))
3325 dev_err(&device->dev, "Couldn't close port %d\n", i);
3326 }
3327 }
3328
3329 static struct ib_client mad_client = {
3330 .name = "mad",
3331 .add = ib_mad_init_device,
3332 .remove = ib_mad_remove_device
3333 };
3334
3335 int ib_mad_init(void)
3336 {
3337 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3338 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3339
3340 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3341 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3342
3343 INIT_LIST_HEAD(&ib_mad_port_list);
3344
3345 if (ib_register_client(&mad_client)) {
3346 pr_err("Couldn't register ib_mad client\n");
3347 return -EINVAL;
3348 }
3349
3350 return 0;
3351 }
3352
3353 void ib_mad_cleanup(void)
3354 {
3355 ib_unregister_client(&mad_client);
3356 }