]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/infiniband/core/mad.c
[PATCH] IB: Introduce RMPP APIs
[mirror_ubuntu-focal-kernel.git] / drivers / infiniband / core / mad.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mad.c 1389 2004-12-27 22:56:47Z roland $
33 */
34
35#include <linux/dma-mapping.h>
1da177e4
LT
36
37#include "mad_priv.h"
38#include "smi.h"
39#include "agent.h"
40
41MODULE_LICENSE("Dual BSD/GPL");
42MODULE_DESCRIPTION("kernel IB MAD API");
43MODULE_AUTHOR("Hal Rosenstock");
44MODULE_AUTHOR("Sean Hefty");
45
46
47kmem_cache_t *ib_mad_cache;
48static struct list_head ib_mad_port_list;
49static u32 ib_mad_client_id = 0;
50
51/* Port list lock */
52static spinlock_t ib_mad_port_list_lock;
53
54
55/* Forward declarations */
56static int method_in_use(struct ib_mad_mgmt_method_table **method,
57 struct ib_mad_reg_req *mad_reg_req);
58static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
59static struct ib_mad_agent_private *find_mad_agent(
60 struct ib_mad_port_private *port_priv,
4a0754fa 61 struct ib_mad *mad);
1da177e4
LT
62static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
63 struct ib_mad_private *mad);
64static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
65static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
66 struct ib_mad_send_wc *mad_send_wc);
67static void timeout_sends(void *data);
1da177e4 68static void local_completions(void *data);
1da177e4
LT
69static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
70 struct ib_mad_agent_private *agent_priv,
71 u8 mgmt_class);
72static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
73 struct ib_mad_agent_private *agent_priv);
74
75/*
76 * Returns a ib_mad_port_private structure or NULL for a device/port
77 * Assumes ib_mad_port_list_lock is being held
78 */
79static inline struct ib_mad_port_private *
80__ib_get_mad_port(struct ib_device *device, int port_num)
81{
82 struct ib_mad_port_private *entry;
83
84 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
85 if (entry->device == device && entry->port_num == port_num)
86 return entry;
87 }
88 return NULL;
89}
90
91/*
92 * Wrapper function to return a ib_mad_port_private structure or NULL
93 * for a device/port
94 */
95static inline struct ib_mad_port_private *
96ib_get_mad_port(struct ib_device *device, int port_num)
97{
98 struct ib_mad_port_private *entry;
99 unsigned long flags;
100
101 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
102 entry = __ib_get_mad_port(device, port_num);
103 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
104
105 return entry;
106}
107
108static inline u8 convert_mgmt_class(u8 mgmt_class)
109{
110 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
111 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
112 0 : mgmt_class;
113}
114
115static int get_spl_qp_index(enum ib_qp_type qp_type)
116{
117 switch (qp_type)
118 {
119 case IB_QPT_SMI:
120 return 0;
121 case IB_QPT_GSI:
122 return 1;
123 default:
124 return -1;
125 }
126}
127
128static int vendor_class_index(u8 mgmt_class)
129{
130 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
131}
132
133static int is_vendor_class(u8 mgmt_class)
134{
135 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
136 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
137 return 0;
138 return 1;
139}
140
141static int is_vendor_oui(char *oui)
142{
143 if (oui[0] || oui[1] || oui[2])
144 return 1;
145 return 0;
146}
147
148static int is_vendor_method_in_use(
149 struct ib_mad_mgmt_vendor_class *vendor_class,
150 struct ib_mad_reg_req *mad_reg_req)
151{
152 struct ib_mad_mgmt_method_table *method;
153 int i;
154
155 for (i = 0; i < MAX_MGMT_OUI; i++) {
156 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
157 method = vendor_class->method_table[i];
158 if (method) {
159 if (method_in_use(&method, mad_reg_req))
160 return 1;
161 else
162 break;
163 }
164 }
165 }
166 return 0;
167}
168
169/*
170 * ib_register_mad_agent - Register to send/receive MADs
171 */
172struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
173 u8 port_num,
174 enum ib_qp_type qp_type,
175 struct ib_mad_reg_req *mad_reg_req,
176 u8 rmpp_version,
177 ib_mad_send_handler send_handler,
178 ib_mad_recv_handler recv_handler,
179 void *context)
180{
181 struct ib_mad_port_private *port_priv;
182 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
183 struct ib_mad_agent_private *mad_agent_priv;
184 struct ib_mad_reg_req *reg_req = NULL;
185 struct ib_mad_mgmt_class_table *class;
186 struct ib_mad_mgmt_vendor_class_table *vendor;
187 struct ib_mad_mgmt_vendor_class *vendor_class;
188 struct ib_mad_mgmt_method_table *method;
189 int ret2, qpn;
190 unsigned long flags;
191 u8 mgmt_class, vclass;
192
193 /* Validate parameters */
194 qpn = get_spl_qp_index(qp_type);
195 if (qpn == -1)
196 goto error1;
197
198 if (rmpp_version)
199 goto error1; /* XXX: until RMPP implemented */
200
201 /* Validate MAD registration request if supplied */
202 if (mad_reg_req) {
203 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
204 goto error1;
205 if (!recv_handler)
206 goto error1;
207 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
208 /*
209 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
210 * one in this range currently allowed
211 */
212 if (mad_reg_req->mgmt_class !=
213 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
214 goto error1;
215 } else if (mad_reg_req->mgmt_class == 0) {
216 /*
217 * Class 0 is reserved in IBA and is used for
218 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
219 */
220 goto error1;
221 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
222 /*
223 * If class is in "new" vendor range,
224 * ensure supplied OUI is not zero
225 */
226 if (!is_vendor_oui(mad_reg_req->oui))
227 goto error1;
228 }
229 /* Make sure class supplied is consistent with QP type */
230 if (qp_type == IB_QPT_SMI) {
231 if ((mad_reg_req->mgmt_class !=
232 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
233 (mad_reg_req->mgmt_class !=
234 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
235 goto error1;
236 } else {
237 if ((mad_reg_req->mgmt_class ==
238 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
239 (mad_reg_req->mgmt_class ==
240 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
241 goto error1;
242 }
243 } else {
244 /* No registration request supplied */
245 if (!send_handler)
246 goto error1;
247 }
248
249 /* Validate device and port */
250 port_priv = ib_get_mad_port(device, port_num);
251 if (!port_priv) {
252 ret = ERR_PTR(-ENODEV);
253 goto error1;
254 }
255
256 /* Allocate structures */
257 mad_agent_priv = kmalloc(sizeof *mad_agent_priv, GFP_KERNEL);
258 if (!mad_agent_priv) {
259 ret = ERR_PTR(-ENOMEM);
260 goto error1;
261 }
b82cab6b
HR
262 memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
263
264 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
265 IB_ACCESS_LOCAL_WRITE);
266 if (IS_ERR(mad_agent_priv->agent.mr)) {
267 ret = ERR_PTR(-ENOMEM);
268 goto error2;
269 }
1da177e4
LT
270
271 if (mad_reg_req) {
272 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
273 if (!reg_req) {
274 ret = ERR_PTR(-ENOMEM);
b82cab6b 275 goto error3;
1da177e4
LT
276 }
277 /* Make a copy of the MAD registration request */
278 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
279 }
280
281 /* Now, fill in the various structures */
1da177e4
LT
282 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
283 mad_agent_priv->reg_req = reg_req;
284 mad_agent_priv->rmpp_version = rmpp_version;
285 mad_agent_priv->agent.device = device;
286 mad_agent_priv->agent.recv_handler = recv_handler;
287 mad_agent_priv->agent.send_handler = send_handler;
288 mad_agent_priv->agent.context = context;
289 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
290 mad_agent_priv->agent.port_num = port_num;
291
292 spin_lock_irqsave(&port_priv->reg_lock, flags);
293 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
294
295 /*
296 * Make sure MAD registration (if supplied)
297 * is non overlapping with any existing ones
298 */
299 if (mad_reg_req) {
300 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
301 if (!is_vendor_class(mgmt_class)) {
302 class = port_priv->version[mad_reg_req->
303 mgmt_class_version].class;
304 if (class) {
305 method = class->method_table[mgmt_class];
306 if (method) {
307 if (method_in_use(&method,
308 mad_reg_req))
b82cab6b 309 goto error4;
1da177e4
LT
310 }
311 }
312 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
313 mgmt_class);
314 } else {
315 /* "New" vendor class range */
316 vendor = port_priv->version[mad_reg_req->
317 mgmt_class_version].vendor;
318 if (vendor) {
319 vclass = vendor_class_index(mgmt_class);
320 vendor_class = vendor->vendor_class[vclass];
321 if (vendor_class) {
322 if (is_vendor_method_in_use(
323 vendor_class,
324 mad_reg_req))
b82cab6b 325 goto error4;
1da177e4
LT
326 }
327 }
328 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
329 }
330 if (ret2) {
331 ret = ERR_PTR(ret2);
b82cab6b 332 goto error4;
1da177e4
LT
333 }
334 }
335
336 /* Add mad agent into port's agent list */
337 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
338 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
339
340 spin_lock_init(&mad_agent_priv->lock);
341 INIT_LIST_HEAD(&mad_agent_priv->send_list);
342 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
6a0c435e 343 INIT_LIST_HEAD(&mad_agent_priv->done_list);
1da177e4
LT
344 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
345 INIT_LIST_HEAD(&mad_agent_priv->local_list);
346 INIT_WORK(&mad_agent_priv->local_work, local_completions,
347 mad_agent_priv);
1da177e4
LT
348 atomic_set(&mad_agent_priv->refcount, 1);
349 init_waitqueue_head(&mad_agent_priv->wait);
350
351 return &mad_agent_priv->agent;
352
b82cab6b 353error4:
1da177e4
LT
354 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
355 kfree(reg_req);
b82cab6b 356error3:
1da177e4 357 kfree(mad_agent_priv);
b82cab6b
HR
358error2:
359 ib_dereg_mr(mad_agent_priv->agent.mr);
1da177e4
LT
360error1:
361 return ret;
362}
363EXPORT_SYMBOL(ib_register_mad_agent);
364
365static inline int is_snooping_sends(int mad_snoop_flags)
366{
367 return (mad_snoop_flags &
368 (/*IB_MAD_SNOOP_POSTED_SENDS |
369 IB_MAD_SNOOP_RMPP_SENDS |*/
370 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
371 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
372}
373
374static inline int is_snooping_recvs(int mad_snoop_flags)
375{
376 return (mad_snoop_flags &
377 (IB_MAD_SNOOP_RECVS /*|
378 IB_MAD_SNOOP_RMPP_RECVS*/));
379}
380
381static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
382 struct ib_mad_snoop_private *mad_snoop_priv)
383{
384 struct ib_mad_snoop_private **new_snoop_table;
385 unsigned long flags;
386 int i;
387
388 spin_lock_irqsave(&qp_info->snoop_lock, flags);
389 /* Check for empty slot in array. */
390 for (i = 0; i < qp_info->snoop_table_size; i++)
391 if (!qp_info->snoop_table[i])
392 break;
393
394 if (i == qp_info->snoop_table_size) {
395 /* Grow table. */
396 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
397 qp_info->snoop_table_size + 1,
398 GFP_ATOMIC);
399 if (!new_snoop_table) {
400 i = -ENOMEM;
401 goto out;
402 }
403 if (qp_info->snoop_table) {
404 memcpy(new_snoop_table, qp_info->snoop_table,
405 sizeof mad_snoop_priv *
406 qp_info->snoop_table_size);
407 kfree(qp_info->snoop_table);
408 }
409 qp_info->snoop_table = new_snoop_table;
410 qp_info->snoop_table_size++;
411 }
412 qp_info->snoop_table[i] = mad_snoop_priv;
413 atomic_inc(&qp_info->snoop_count);
414out:
415 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
416 return i;
417}
418
419struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
420 u8 port_num,
421 enum ib_qp_type qp_type,
422 int mad_snoop_flags,
423 ib_mad_snoop_handler snoop_handler,
424 ib_mad_recv_handler recv_handler,
425 void *context)
426{
427 struct ib_mad_port_private *port_priv;
428 struct ib_mad_agent *ret;
429 struct ib_mad_snoop_private *mad_snoop_priv;
430 int qpn;
431
432 /* Validate parameters */
433 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
434 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
435 ret = ERR_PTR(-EINVAL);
436 goto error1;
437 }
438 qpn = get_spl_qp_index(qp_type);
439 if (qpn == -1) {
440 ret = ERR_PTR(-EINVAL);
441 goto error1;
442 }
443 port_priv = ib_get_mad_port(device, port_num);
444 if (!port_priv) {
445 ret = ERR_PTR(-ENODEV);
446 goto error1;
447 }
448 /* Allocate structures */
449 mad_snoop_priv = kmalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
450 if (!mad_snoop_priv) {
451 ret = ERR_PTR(-ENOMEM);
452 goto error1;
453 }
454
455 /* Now, fill in the various structures */
456 memset(mad_snoop_priv, 0, sizeof *mad_snoop_priv);
457 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
458 mad_snoop_priv->agent.device = device;
459 mad_snoop_priv->agent.recv_handler = recv_handler;
460 mad_snoop_priv->agent.snoop_handler = snoop_handler;
461 mad_snoop_priv->agent.context = context;
462 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
463 mad_snoop_priv->agent.port_num = port_num;
464 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
465 init_waitqueue_head(&mad_snoop_priv->wait);
466 mad_snoop_priv->snoop_index = register_snoop_agent(
467 &port_priv->qp_info[qpn],
468 mad_snoop_priv);
469 if (mad_snoop_priv->snoop_index < 0) {
470 ret = ERR_PTR(mad_snoop_priv->snoop_index);
471 goto error2;
472 }
473
474 atomic_set(&mad_snoop_priv->refcount, 1);
475 return &mad_snoop_priv->agent;
476
477error2:
478 kfree(mad_snoop_priv);
479error1:
480 return ret;
481}
482EXPORT_SYMBOL(ib_register_mad_snoop);
483
484static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
485{
486 struct ib_mad_port_private *port_priv;
487 unsigned long flags;
488
489 /* Note that we could still be handling received MADs */
490
491 /*
492 * Canceling all sends results in dropping received response
493 * MADs, preventing us from queuing additional work
494 */
495 cancel_mads(mad_agent_priv);
1da177e4 496 port_priv = mad_agent_priv->qp_info->port_priv;
1da177e4 497 cancel_delayed_work(&mad_agent_priv->timed_work);
1da177e4
LT
498
499 spin_lock_irqsave(&port_priv->reg_lock, flags);
500 remove_mad_reg_req(mad_agent_priv);
501 list_del(&mad_agent_priv->agent_list);
502 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
503
b82cab6b 504 flush_workqueue(port_priv->wq);
1da177e4
LT
505
506 atomic_dec(&mad_agent_priv->refcount);
507 wait_event(mad_agent_priv->wait,
508 !atomic_read(&mad_agent_priv->refcount));
509
510 if (mad_agent_priv->reg_req)
511 kfree(mad_agent_priv->reg_req);
b82cab6b 512 ib_dereg_mr(mad_agent_priv->agent.mr);
1da177e4
LT
513 kfree(mad_agent_priv);
514}
515
516static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
517{
518 struct ib_mad_qp_info *qp_info;
519 unsigned long flags;
520
521 qp_info = mad_snoop_priv->qp_info;
522 spin_lock_irqsave(&qp_info->snoop_lock, flags);
523 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
524 atomic_dec(&qp_info->snoop_count);
525 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
526
527 atomic_dec(&mad_snoop_priv->refcount);
528 wait_event(mad_snoop_priv->wait,
529 !atomic_read(&mad_snoop_priv->refcount));
530
531 kfree(mad_snoop_priv);
532}
533
534/*
535 * ib_unregister_mad_agent - Unregisters a client from using MAD services
536 */
537int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
538{
539 struct ib_mad_agent_private *mad_agent_priv;
540 struct ib_mad_snoop_private *mad_snoop_priv;
541
542 /* If the TID is zero, the agent can only snoop. */
543 if (mad_agent->hi_tid) {
544 mad_agent_priv = container_of(mad_agent,
545 struct ib_mad_agent_private,
546 agent);
547 unregister_mad_agent(mad_agent_priv);
548 } else {
549 mad_snoop_priv = container_of(mad_agent,
550 struct ib_mad_snoop_private,
551 agent);
552 unregister_mad_snoop(mad_snoop_priv);
553 }
554 return 0;
555}
556EXPORT_SYMBOL(ib_unregister_mad_agent);
557
4a0754fa
HR
558static inline int response_mad(struct ib_mad *mad)
559{
560 /* Trap represses are responses although response bit is reset */
561 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
562 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
563}
564
1da177e4
LT
565static void dequeue_mad(struct ib_mad_list_head *mad_list)
566{
567 struct ib_mad_queue *mad_queue;
568 unsigned long flags;
569
570 BUG_ON(!mad_list->mad_queue);
571 mad_queue = mad_list->mad_queue;
572 spin_lock_irqsave(&mad_queue->lock, flags);
573 list_del(&mad_list->list);
574 mad_queue->count--;
575 spin_unlock_irqrestore(&mad_queue->lock, flags);
576}
577
578static void snoop_send(struct ib_mad_qp_info *qp_info,
579 struct ib_send_wr *send_wr,
580 struct ib_mad_send_wc *mad_send_wc,
581 int mad_snoop_flags)
582{
583 struct ib_mad_snoop_private *mad_snoop_priv;
584 unsigned long flags;
585 int i;
586
587 spin_lock_irqsave(&qp_info->snoop_lock, flags);
588 for (i = 0; i < qp_info->snoop_table_size; i++) {
589 mad_snoop_priv = qp_info->snoop_table[i];
590 if (!mad_snoop_priv ||
591 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
592 continue;
593
594 atomic_inc(&mad_snoop_priv->refcount);
595 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
596 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
597 send_wr, mad_send_wc);
598 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
599 wake_up(&mad_snoop_priv->wait);
600 spin_lock_irqsave(&qp_info->snoop_lock, flags);
601 }
602 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
603}
604
605static void snoop_recv(struct ib_mad_qp_info *qp_info,
606 struct ib_mad_recv_wc *mad_recv_wc,
607 int mad_snoop_flags)
608{
609 struct ib_mad_snoop_private *mad_snoop_priv;
610 unsigned long flags;
611 int i;
612
613 spin_lock_irqsave(&qp_info->snoop_lock, flags);
614 for (i = 0; i < qp_info->snoop_table_size; i++) {
615 mad_snoop_priv = qp_info->snoop_table[i];
616 if (!mad_snoop_priv ||
617 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
618 continue;
619
620 atomic_inc(&mad_snoop_priv->refcount);
621 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
622 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
623 mad_recv_wc);
624 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
625 wake_up(&mad_snoop_priv->wait);
626 spin_lock_irqsave(&qp_info->snoop_lock, flags);
627 }
628 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
629}
630
631static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
632 struct ib_wc *wc)
633{
634 memset(wc, 0, sizeof *wc);
635 wc->wr_id = wr_id;
636 wc->status = IB_WC_SUCCESS;
637 wc->opcode = IB_WC_RECV;
638 wc->pkey_index = pkey_index;
639 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
640 wc->src_qp = IB_QP0;
641 wc->qp_num = IB_QP0;
642 wc->slid = slid;
643 wc->sl = 0;
644 wc->dlid_path_bits = 0;
645 wc->port_num = port_num;
646}
647
648/*
649 * Return 0 if SMP is to be sent
650 * Return 1 if SMP was consumed locally (whether or not solicited)
651 * Return < 0 if error
652 */
653static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
654 struct ib_smp *smp,
655 struct ib_send_wr *send_wr)
656{
4a0754fa 657 int ret;
1da177e4
LT
658 unsigned long flags;
659 struct ib_mad_local_private *local;
660 struct ib_mad_private *mad_priv;
661 struct ib_mad_port_private *port_priv;
662 struct ib_mad_agent_private *recv_mad_agent = NULL;
663 struct ib_device *device = mad_agent_priv->agent.device;
664 u8 port_num = mad_agent_priv->agent.port_num;
665 struct ib_wc mad_wc;
666
667 if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
668 ret = -EINVAL;
669 printk(KERN_ERR PFX "Invalid directed route\n");
670 goto out;
671 }
672 /* Check to post send on QP or process locally */
673 ret = smi_check_local_dr_smp(smp, device, port_num);
674 if (!ret || !device->process_mad)
675 goto out;
676
677 local = kmalloc(sizeof *local, GFP_ATOMIC);
678 if (!local) {
679 ret = -ENOMEM;
680 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
681 goto out;
682 }
683 local->mad_priv = NULL;
684 local->recv_mad_agent = NULL;
685 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
686 if (!mad_priv) {
687 ret = -ENOMEM;
688 printk(KERN_ERR PFX "No memory for local response MAD\n");
689 kfree(local);
690 goto out;
691 }
692
693 build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index,
694 send_wr->wr.ud.port_num, &mad_wc);
695
696 /* No GRH for DR SMP */
697 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
698 (struct ib_mad *)smp,
699 (struct ib_mad *)&mad_priv->mad);
700 switch (ret)
701 {
702 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
4a0754fa 703 if (response_mad(&mad_priv->mad.mad) &&
1da177e4
LT
704 mad_agent_priv->agent.recv_handler) {
705 local->mad_priv = mad_priv;
706 local->recv_mad_agent = mad_agent_priv;
707 /*
708 * Reference MAD agent until receive
709 * side of local completion handled
710 */
711 atomic_inc(&mad_agent_priv->refcount);
712 } else
713 kmem_cache_free(ib_mad_cache, mad_priv);
714 break;
715 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
716 kmem_cache_free(ib_mad_cache, mad_priv);
717 break;
718 case IB_MAD_RESULT_SUCCESS:
719 /* Treat like an incoming receive MAD */
1da177e4
LT
720 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
721 mad_agent_priv->agent.port_num);
722 if (port_priv) {
723 mad_priv->mad.mad.mad_hdr.tid =
724 ((struct ib_mad *)smp)->mad_hdr.tid;
725 recv_mad_agent = find_mad_agent(port_priv,
4a0754fa 726 &mad_priv->mad.mad);
1da177e4
LT
727 }
728 if (!port_priv || !recv_mad_agent) {
729 kmem_cache_free(ib_mad_cache, mad_priv);
730 kfree(local);
731 ret = 0;
732 goto out;
733 }
734 local->mad_priv = mad_priv;
735 local->recv_mad_agent = recv_mad_agent;
736 break;
737 default:
738 kmem_cache_free(ib_mad_cache, mad_priv);
739 kfree(local);
740 ret = -EINVAL;
741 goto out;
742 }
743
744 local->send_wr = *send_wr;
745 local->send_wr.sg_list = local->sg_list;
746 memcpy(local->sg_list, send_wr->sg_list,
747 sizeof *send_wr->sg_list * send_wr->num_sge);
748 local->send_wr.next = NULL;
749 local->tid = send_wr->wr.ud.mad_hdr->tid;
750 local->wr_id = send_wr->wr_id;
751 /* Reference MAD agent until send side of local completion handled */
752 atomic_inc(&mad_agent_priv->refcount);
753 /* Queue local completion to local list */
754 spin_lock_irqsave(&mad_agent_priv->lock, flags);
755 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
756 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
757 queue_work(mad_agent_priv->qp_info->port_priv->wq,
b82cab6b 758 &mad_agent_priv->local_work);
1da177e4
LT
759 ret = 1;
760out:
761 return ret;
762}
763
824c8ae7
HR
764static int get_buf_length(int hdr_len, int data_len)
765{
766 int seg_size, pad;
767
768 seg_size = sizeof(struct ib_mad) - hdr_len;
769 if (data_len && seg_size) {
770 pad = seg_size - data_len % seg_size;
771 if (pad == seg_size)
772 pad = 0;
773 } else
774 pad = seg_size;
775 return hdr_len + data_len + pad;
776}
777
778struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
779 u32 remote_qpn, u16 pkey_index,
d2082ee5 780 struct ib_ah *ah, int rmpp_active,
824c8ae7
HR
781 int hdr_len, int data_len,
782 unsigned int __nocast gfp_mask)
783{
784 struct ib_mad_agent_private *mad_agent_priv;
785 struct ib_mad_send_buf *send_buf;
786 int buf_size;
787 void *buf;
788
d2082ee5
HR
789 if (rmpp_active)
790 return ERR_PTR(-EINVAL); /* until RMPP implemented */
824c8ae7
HR
791 mad_agent_priv = container_of(mad_agent,
792 struct ib_mad_agent_private, agent);
793 buf_size = get_buf_length(hdr_len, data_len);
794
795 buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask);
796 if (!buf)
797 return ERR_PTR(-ENOMEM);
df9f9ead 798 memset(buf, 0, sizeof *send_buf + buf_size);
824c8ae7
HR
799
800 send_buf = buf + buf_size;
824c8ae7
HR
801 send_buf->mad = buf;
802
803 send_buf->sge.addr = dma_map_single(mad_agent->device->dma_device,
804 buf, buf_size, DMA_TO_DEVICE);
805 pci_unmap_addr_set(send_buf, mapping, send_buf->sge.addr);
806 send_buf->sge.length = buf_size;
807 send_buf->sge.lkey = mad_agent->mr->lkey;
808
809 send_buf->send_wr.wr_id = (unsigned long) send_buf;
810 send_buf->send_wr.sg_list = &send_buf->sge;
811 send_buf->send_wr.num_sge = 1;
812 send_buf->send_wr.opcode = IB_WR_SEND;
813 send_buf->send_wr.send_flags = IB_SEND_SIGNALED;
814 send_buf->send_wr.wr.ud.ah = ah;
815 send_buf->send_wr.wr.ud.mad_hdr = &send_buf->mad->mad_hdr;
816 send_buf->send_wr.wr.ud.remote_qpn = remote_qpn;
817 send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
818 send_buf->send_wr.wr.ud.pkey_index = pkey_index;
819 send_buf->mad_agent = mad_agent;
820 atomic_inc(&mad_agent_priv->refcount);
821 return send_buf;
822}
823EXPORT_SYMBOL(ib_create_send_mad);
824
825void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
826{
827 struct ib_mad_agent_private *mad_agent_priv;
828
829 mad_agent_priv = container_of(send_buf->mad_agent,
830 struct ib_mad_agent_private, agent);
831
832 dma_unmap_single(send_buf->mad_agent->device->dma_device,
833 pci_unmap_addr(send_buf, mapping),
834 send_buf->sge.length, DMA_TO_DEVICE);
835 kfree(send_buf->mad);
836
837 if (atomic_dec_and_test(&mad_agent_priv->refcount))
838 wake_up(&mad_agent_priv->wait);
839}
840EXPORT_SYMBOL(ib_free_send_mad);
841
d760ce8f 842static int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4
LT
843{
844 struct ib_mad_qp_info *qp_info;
845 struct ib_send_wr *bad_send_wr;
cabe3cbc 846 struct list_head *list;
1da177e4
LT
847 unsigned long flags;
848 int ret;
849
f8197a4e 850 /* Set WR ID to find mad_send_wr upon completion */
d760ce8f 851 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1da177e4
LT
852 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
853 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
854
855 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
cabe3cbc 856 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
d760ce8f 857 ret = ib_post_send(mad_send_wr->mad_agent_priv->agent.qp,
1da177e4 858 &mad_send_wr->send_wr, &bad_send_wr);
cabe3cbc 859 list = &qp_info->send_queue.list;
1da177e4 860 } else {
1da177e4 861 ret = 0;
cabe3cbc 862 list = &qp_info->overflow_list;
1da177e4 863 }
cabe3cbc
HR
864
865 if (!ret) {
866 qp_info->send_queue.count++;
867 list_add_tail(&mad_send_wr->mad_list.list, list);
868 }
869 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1da177e4
LT
870 return ret;
871}
872
873/*
874 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
875 * with the registered client
876 */
877int ib_post_send_mad(struct ib_mad_agent *mad_agent,
878 struct ib_send_wr *send_wr,
879 struct ib_send_wr **bad_send_wr)
880{
881 int ret = -EINVAL;
882 struct ib_mad_agent_private *mad_agent_priv;
883
884 /* Validate supplied parameters */
885 if (!bad_send_wr)
886 goto error1;
887
888 if (!mad_agent || !send_wr)
889 goto error2;
890
891 if (!mad_agent->send_handler)
892 goto error2;
893
894 mad_agent_priv = container_of(mad_agent,
895 struct ib_mad_agent_private,
896 agent);
897
898 /* Walk list of send WRs and post each on send list */
899 while (send_wr) {
900 unsigned long flags;
901 struct ib_send_wr *next_send_wr;
902 struct ib_mad_send_wr_private *mad_send_wr;
903 struct ib_smp *smp;
904
905 /* Validate more parameters */
906 if (send_wr->num_sge > IB_MAD_SEND_REQ_MAX_SG)
907 goto error2;
908
909 if (send_wr->wr.ud.timeout_ms && !mad_agent->recv_handler)
910 goto error2;
911
912 if (!send_wr->wr.ud.mad_hdr) {
913 printk(KERN_ERR PFX "MAD header must be supplied "
914 "in WR %p\n", send_wr);
915 goto error2;
916 }
917
918 /*
919 * Save pointer to next work request to post in case the
920 * current one completes, and the user modifies the work
921 * request associated with the completion
922 */
923 next_send_wr = (struct ib_send_wr *)send_wr->next;
924
925 smp = (struct ib_smp *)send_wr->wr.ud.mad_hdr;
926 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
927 ret = handle_outgoing_dr_smp(mad_agent_priv, smp,
928 send_wr);
929 if (ret < 0) /* error */
930 goto error2;
931 else if (ret == 1) /* locally consumed */
932 goto next;
933 }
934
935 /* Allocate MAD send WR tracking structure */
936 mad_send_wr = kmalloc(sizeof *mad_send_wr, GFP_ATOMIC);
937 if (!mad_send_wr) {
938 printk(KERN_ERR PFX "No memory for "
939 "ib_mad_send_wr_private\n");
940 ret = -ENOMEM;
941 goto error2;
942 }
943
944 mad_send_wr->send_wr = *send_wr;
945 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
946 memcpy(mad_send_wr->sg_list, send_wr->sg_list,
947 sizeof *send_wr->sg_list * send_wr->num_sge);
f8197a4e 948 mad_send_wr->wr_id = mad_send_wr->send_wr.wr_id;
1da177e4
LT
949 mad_send_wr->send_wr.next = NULL;
950 mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid;
d760ce8f 951 mad_send_wr->mad_agent_priv = mad_agent_priv;
1da177e4
LT
952 /* Timeout will be updated after send completes */
953 mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr.
954 ud.timeout_ms);
f75b7a52 955 mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries;
1da177e4
LT
956 /* One reference for each work request to QP + response */
957 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
958 mad_send_wr->status = IB_WC_SUCCESS;
959
960 /* Reference MAD agent until send completes */
961 atomic_inc(&mad_agent_priv->refcount);
962 spin_lock_irqsave(&mad_agent_priv->lock, flags);
963 list_add_tail(&mad_send_wr->agent_list,
964 &mad_agent_priv->send_list);
965 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
966
d760ce8f 967 ret = ib_send_mad(mad_send_wr);
1da177e4
LT
968 if (ret) {
969 /* Fail send request */
970 spin_lock_irqsave(&mad_agent_priv->lock, flags);
971 list_del(&mad_send_wr->agent_list);
972 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
973 atomic_dec(&mad_agent_priv->refcount);
974 goto error2;
975 }
976next:
977 send_wr = next_send_wr;
978 }
979 return 0;
980
981error2:
982 *bad_send_wr = send_wr;
983error1:
984 return ret;
985}
986EXPORT_SYMBOL(ib_post_send_mad);
987
988/*
989 * ib_free_recv_mad - Returns data buffers used to receive
990 * a MAD to the access layer
991 */
992void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
993{
994 struct ib_mad_recv_buf *entry;
995 struct ib_mad_private_header *mad_priv_hdr;
996 struct ib_mad_private *priv;
997
998 mad_priv_hdr = container_of(mad_recv_wc,
999 struct ib_mad_private_header,
1000 recv_wc);
1001 priv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1002
1003 /*
1004 * Walk receive buffer list associated with this WC
1005 * No need to remove them from list of receive buffers
1006 */
1007 list_for_each_entry(entry, &mad_recv_wc->recv_buf.list, list) {
1008 /* Free previous receive buffer */
1009 kmem_cache_free(ib_mad_cache, priv);
1010 mad_priv_hdr = container_of(mad_recv_wc,
1011 struct ib_mad_private_header,
1012 recv_wc);
1013 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1014 header);
1015 }
1016
1017 /* Free last buffer */
1018 kmem_cache_free(ib_mad_cache, priv);
1019}
1020EXPORT_SYMBOL(ib_free_recv_mad);
1021
1da177e4
LT
1022struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1023 u8 rmpp_version,
1024 ib_mad_send_handler send_handler,
1025 ib_mad_recv_handler recv_handler,
1026 void *context)
1027{
1028 return ERR_PTR(-EINVAL); /* XXX: for now */
1029}
1030EXPORT_SYMBOL(ib_redirect_mad_qp);
1031
1032int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1033 struct ib_wc *wc)
1034{
1035 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1036 return 0;
1037}
1038EXPORT_SYMBOL(ib_process_mad_wc);
1039
1040static int method_in_use(struct ib_mad_mgmt_method_table **method,
1041 struct ib_mad_reg_req *mad_reg_req)
1042{
1043 int i;
1044
1045 for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1046 i < IB_MGMT_MAX_METHODS;
1047 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1048 1+i)) {
1049 if ((*method)->agent[i]) {
1050 printk(KERN_ERR PFX "Method %d already in use\n", i);
1051 return -EINVAL;
1052 }
1053 }
1054 return 0;
1055}
1056
1057static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1058{
1059 /* Allocate management method table */
1060 *method = kmalloc(sizeof **method, GFP_ATOMIC);
1061 if (!*method) {
1062 printk(KERN_ERR PFX "No memory for "
1063 "ib_mad_mgmt_method_table\n");
1064 return -ENOMEM;
1065 }
1066 /* Clear management method table */
1067 memset(*method, 0, sizeof **method);
1068
1069 return 0;
1070}
1071
1072/*
1073 * Check to see if there are any methods still in use
1074 */
1075static int check_method_table(struct ib_mad_mgmt_method_table *method)
1076{
1077 int i;
1078
1079 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1080 if (method->agent[i])
1081 return 1;
1082 return 0;
1083}
1084
1085/*
1086 * Check to see if there are any method tables for this class still in use
1087 */
1088static int check_class_table(struct ib_mad_mgmt_class_table *class)
1089{
1090 int i;
1091
1092 for (i = 0; i < MAX_MGMT_CLASS; i++)
1093 if (class->method_table[i])
1094 return 1;
1095 return 0;
1096}
1097
1098static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1099{
1100 int i;
1101
1102 for (i = 0; i < MAX_MGMT_OUI; i++)
1103 if (vendor_class->method_table[i])
1104 return 1;
1105 return 0;
1106}
1107
1108static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1109 char *oui)
1110{
1111 int i;
1112
1113 for (i = 0; i < MAX_MGMT_OUI; i++)
1114 /* Is there matching OUI for this vendor class ? */
1115 if (!memcmp(vendor_class->oui[i], oui, 3))
1116 return i;
1117
1118 return -1;
1119}
1120
1121static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1122{
1123 int i;
1124
1125 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1126 if (vendor->vendor_class[i])
1127 return 1;
1128
1129 return 0;
1130}
1131
1132static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1133 struct ib_mad_agent_private *agent)
1134{
1135 int i;
1136
1137 /* Remove any methods for this mad agent */
1138 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1139 if (method->agent[i] == agent) {
1140 method->agent[i] = NULL;
1141 }
1142 }
1143}
1144
1145static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1146 struct ib_mad_agent_private *agent_priv,
1147 u8 mgmt_class)
1148{
1149 struct ib_mad_port_private *port_priv;
1150 struct ib_mad_mgmt_class_table **class;
1151 struct ib_mad_mgmt_method_table **method;
1152 int i, ret;
1153
1154 port_priv = agent_priv->qp_info->port_priv;
1155 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1156 if (!*class) {
1157 /* Allocate management class table for "new" class version */
1158 *class = kmalloc(sizeof **class, GFP_ATOMIC);
1159 if (!*class) {
1160 printk(KERN_ERR PFX "No memory for "
1161 "ib_mad_mgmt_class_table\n");
1162 ret = -ENOMEM;
1163 goto error1;
1164 }
1165 /* Clear management class table */
1166 memset(*class, 0, sizeof(**class));
1167 /* Allocate method table for this management class */
1168 method = &(*class)->method_table[mgmt_class];
1169 if ((ret = allocate_method_table(method)))
1170 goto error2;
1171 } else {
1172 method = &(*class)->method_table[mgmt_class];
1173 if (!*method) {
1174 /* Allocate method table for this management class */
1175 if ((ret = allocate_method_table(method)))
1176 goto error1;
1177 }
1178 }
1179
1180 /* Now, make sure methods are not already in use */
1181 if (method_in_use(method, mad_reg_req))
1182 goto error3;
1183
1184 /* Finally, add in methods being registered */
1185 for (i = find_first_bit(mad_reg_req->method_mask,
1186 IB_MGMT_MAX_METHODS);
1187 i < IB_MGMT_MAX_METHODS;
1188 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1189 1+i)) {
1190 (*method)->agent[i] = agent_priv;
1191 }
1192 return 0;
1193
1194error3:
1195 /* Remove any methods for this mad agent */
1196 remove_methods_mad_agent(*method, agent_priv);
1197 /* Now, check to see if there are any methods in use */
1198 if (!check_method_table(*method)) {
1199 /* If not, release management method table */
1200 kfree(*method);
1201 *method = NULL;
1202 }
1203 ret = -EINVAL;
1204 goto error1;
1205error2:
1206 kfree(*class);
1207 *class = NULL;
1208error1:
1209 return ret;
1210}
1211
1212static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1213 struct ib_mad_agent_private *agent_priv)
1214{
1215 struct ib_mad_port_private *port_priv;
1216 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1217 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1218 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1219 struct ib_mad_mgmt_method_table **method;
1220 int i, ret = -ENOMEM;
1221 u8 vclass;
1222
1223 /* "New" vendor (with OUI) class */
1224 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1225 port_priv = agent_priv->qp_info->port_priv;
1226 vendor_table = &port_priv->version[
1227 mad_reg_req->mgmt_class_version].vendor;
1228 if (!*vendor_table) {
1229 /* Allocate mgmt vendor class table for "new" class version */
1230 vendor = kmalloc(sizeof *vendor, GFP_ATOMIC);
1231 if (!vendor) {
1232 printk(KERN_ERR PFX "No memory for "
1233 "ib_mad_mgmt_vendor_class_table\n");
1234 goto error1;
1235 }
1236 /* Clear management vendor class table */
1237 memset(vendor, 0, sizeof(*vendor));
1238 *vendor_table = vendor;
1239 }
1240 if (!(*vendor_table)->vendor_class[vclass]) {
1241 /* Allocate table for this management vendor class */
1242 vendor_class = kmalloc(sizeof *vendor_class, GFP_ATOMIC);
1243 if (!vendor_class) {
1244 printk(KERN_ERR PFX "No memory for "
1245 "ib_mad_mgmt_vendor_class\n");
1246 goto error2;
1247 }
1248 memset(vendor_class, 0, sizeof(*vendor_class));
1249 (*vendor_table)->vendor_class[vclass] = vendor_class;
1250 }
1251 for (i = 0; i < MAX_MGMT_OUI; i++) {
1252 /* Is there matching OUI for this vendor class ? */
1253 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1254 mad_reg_req->oui, 3)) {
1255 method = &(*vendor_table)->vendor_class[
1256 vclass]->method_table[i];
1257 BUG_ON(!*method);
1258 goto check_in_use;
1259 }
1260 }
1261 for (i = 0; i < MAX_MGMT_OUI; i++) {
1262 /* OUI slot available ? */
1263 if (!is_vendor_oui((*vendor_table)->vendor_class[
1264 vclass]->oui[i])) {
1265 method = &(*vendor_table)->vendor_class[
1266 vclass]->method_table[i];
1267 BUG_ON(*method);
1268 /* Allocate method table for this OUI */
1269 if ((ret = allocate_method_table(method)))
1270 goto error3;
1271 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1272 mad_reg_req->oui, 3);
1273 goto check_in_use;
1274 }
1275 }
1276 printk(KERN_ERR PFX "All OUI slots in use\n");
1277 goto error3;
1278
1279check_in_use:
1280 /* Now, make sure methods are not already in use */
1281 if (method_in_use(method, mad_reg_req))
1282 goto error4;
1283
1284 /* Finally, add in methods being registered */
1285 for (i = find_first_bit(mad_reg_req->method_mask,
1286 IB_MGMT_MAX_METHODS);
1287 i < IB_MGMT_MAX_METHODS;
1288 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1289 1+i)) {
1290 (*method)->agent[i] = agent_priv;
1291 }
1292 return 0;
1293
1294error4:
1295 /* Remove any methods for this mad agent */
1296 remove_methods_mad_agent(*method, agent_priv);
1297 /* Now, check to see if there are any methods in use */
1298 if (!check_method_table(*method)) {
1299 /* If not, release management method table */
1300 kfree(*method);
1301 *method = NULL;
1302 }
1303 ret = -EINVAL;
1304error3:
1305 if (vendor_class) {
1306 (*vendor_table)->vendor_class[vclass] = NULL;
1307 kfree(vendor_class);
1308 }
1309error2:
1310 if (vendor) {
1311 *vendor_table = NULL;
1312 kfree(vendor);
1313 }
1314error1:
1315 return ret;
1316}
1317
1318static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1319{
1320 struct ib_mad_port_private *port_priv;
1321 struct ib_mad_mgmt_class_table *class;
1322 struct ib_mad_mgmt_method_table *method;
1323 struct ib_mad_mgmt_vendor_class_table *vendor;
1324 struct ib_mad_mgmt_vendor_class *vendor_class;
1325 int index;
1326 u8 mgmt_class;
1327
1328 /*
1329 * Was MAD registration request supplied
1330 * with original registration ?
1331 */
1332 if (!agent_priv->reg_req) {
1333 goto out;
1334 }
1335
1336 port_priv = agent_priv->qp_info->port_priv;
1337 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1338 class = port_priv->version[
1339 agent_priv->reg_req->mgmt_class_version].class;
1340 if (!class)
1341 goto vendor_check;
1342
1343 method = class->method_table[mgmt_class];
1344 if (method) {
1345 /* Remove any methods for this mad agent */
1346 remove_methods_mad_agent(method, agent_priv);
1347 /* Now, check to see if there are any methods still in use */
1348 if (!check_method_table(method)) {
1349 /* If not, release management method table */
1350 kfree(method);
1351 class->method_table[mgmt_class] = NULL;
1352 /* Any management classes left ? */
1353 if (!check_class_table(class)) {
1354 /* If not, release management class table */
1355 kfree(class);
1356 port_priv->version[
1357 agent_priv->reg_req->
1358 mgmt_class_version].class = NULL;
1359 }
1360 }
1361 }
1362
1363vendor_check:
1364 if (!is_vendor_class(mgmt_class))
1365 goto out;
1366
1367 /* normalize mgmt_class to vendor range 2 */
1368 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1369 vendor = port_priv->version[
1370 agent_priv->reg_req->mgmt_class_version].vendor;
1371
1372 if (!vendor)
1373 goto out;
1374
1375 vendor_class = vendor->vendor_class[mgmt_class];
1376 if (vendor_class) {
1377 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1378 if (index < 0)
1379 goto out;
1380 method = vendor_class->method_table[index];
1381 if (method) {
1382 /* Remove any methods for this mad agent */
1383 remove_methods_mad_agent(method, agent_priv);
1384 /*
1385 * Now, check to see if there are
1386 * any methods still in use
1387 */
1388 if (!check_method_table(method)) {
1389 /* If not, release management method table */
1390 kfree(method);
1391 vendor_class->method_table[index] = NULL;
1392 memset(vendor_class->oui[index], 0, 3);
1393 /* Any OUIs left ? */
1394 if (!check_vendor_class(vendor_class)) {
1395 /* If not, release vendor class table */
1396 kfree(vendor_class);
1397 vendor->vendor_class[mgmt_class] = NULL;
1398 /* Any other vendor classes left ? */
1399 if (!check_vendor_table(vendor)) {
1400 kfree(vendor);
1401 port_priv->version[
1402 agent_priv->reg_req->
1403 mgmt_class_version].
1404 vendor = NULL;
1405 }
1406 }
1407 }
1408 }
1409 }
1410
1411out:
1412 return;
1413}
1414
1da177e4
LT
1415static struct ib_mad_agent_private *
1416find_mad_agent(struct ib_mad_port_private *port_priv,
4a0754fa 1417 struct ib_mad *mad)
1da177e4
LT
1418{
1419 struct ib_mad_agent_private *mad_agent = NULL;
1420 unsigned long flags;
1421
1422 spin_lock_irqsave(&port_priv->reg_lock, flags);
4a0754fa 1423 if (response_mad(mad)) {
1da177e4
LT
1424 u32 hi_tid;
1425 struct ib_mad_agent_private *entry;
1426
1427 /*
1428 * Routing is based on high 32 bits of transaction ID
1429 * of MAD.
1430 */
1431 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1432 list_for_each_entry(entry, &port_priv->agent_list,
1433 agent_list) {
1434 if (entry->agent.hi_tid == hi_tid) {
1435 mad_agent = entry;
1436 break;
1437 }
1438 }
1439 } else {
1440 struct ib_mad_mgmt_class_table *class;
1441 struct ib_mad_mgmt_method_table *method;
1442 struct ib_mad_mgmt_vendor_class_table *vendor;
1443 struct ib_mad_mgmt_vendor_class *vendor_class;
1444 struct ib_vendor_mad *vendor_mad;
1445 int index;
1446
1447 /*
1448 * Routing is based on version, class, and method
1449 * For "newer" vendor MADs, also based on OUI
1450 */
1451 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1452 goto out;
1453 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1454 class = port_priv->version[
1455 mad->mad_hdr.class_version].class;
1456 if (!class)
1457 goto out;
1458 method = class->method_table[convert_mgmt_class(
1459 mad->mad_hdr.mgmt_class)];
1460 if (method)
1461 mad_agent = method->agent[mad->mad_hdr.method &
1462 ~IB_MGMT_METHOD_RESP];
1463 } else {
1464 vendor = port_priv->version[
1465 mad->mad_hdr.class_version].vendor;
1466 if (!vendor)
1467 goto out;
1468 vendor_class = vendor->vendor_class[vendor_class_index(
1469 mad->mad_hdr.mgmt_class)];
1470 if (!vendor_class)
1471 goto out;
1472 /* Find matching OUI */
1473 vendor_mad = (struct ib_vendor_mad *)mad;
1474 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1475 if (index == -1)
1476 goto out;
1477 method = vendor_class->method_table[index];
1478 if (method) {
1479 mad_agent = method->agent[mad->mad_hdr.method &
1480 ~IB_MGMT_METHOD_RESP];
1481 }
1482 }
1483 }
1484
1485 if (mad_agent) {
1486 if (mad_agent->agent.recv_handler)
1487 atomic_inc(&mad_agent->refcount);
1488 else {
1489 printk(KERN_NOTICE PFX "No receive handler for client "
1490 "%p on port %d\n",
1491 &mad_agent->agent, port_priv->port_num);
1492 mad_agent = NULL;
1493 }
1494 }
1495out:
1496 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1497
1498 return mad_agent;
1499}
1500
1501static int validate_mad(struct ib_mad *mad, u32 qp_num)
1502{
1503 int valid = 0;
1504
1505 /* Make sure MAD base version is understood */
1506 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1507 printk(KERN_ERR PFX "MAD received with unsupported base "
1508 "version %d\n", mad->mad_hdr.base_version);
1509 goto out;
1510 }
1511
1512 /* Filter SMI packets sent to other than QP0 */
1513 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1514 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1515 if (qp_num == 0)
1516 valid = 1;
1517 } else {
1518 /* Filter GSI packets sent to QP0 */
1519 if (qp_num != 0)
1520 valid = 1;
1521 }
1522
1523out:
1524 return valid;
1525}
1526
1da177e4
LT
1527static struct ib_mad_send_wr_private*
1528find_send_req(struct ib_mad_agent_private *mad_agent_priv,
1529 u64 tid)
1530{
1531 struct ib_mad_send_wr_private *mad_send_wr;
1532
1533 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
1534 agent_list) {
1535 if (mad_send_wr->tid == tid)
1536 return mad_send_wr;
1537 }
1538
1539 /*
1540 * It's possible to receive the response before we've
1541 * been notified that the send has completed
1542 */
1543 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1544 agent_list) {
1545 if (mad_send_wr->tid == tid && mad_send_wr->timeout) {
1546 /* Verify request has not been canceled */
1547 return (mad_send_wr->status == IB_WC_SUCCESS) ?
1548 mad_send_wr : NULL;
1549 }
1550 }
1551 return NULL;
1552}
1553
6a0c435e
HR
1554static void ib_mark_req_done(struct ib_mad_send_wr_private *mad_send_wr)
1555{
1556 mad_send_wr->timeout = 0;
1557 if (mad_send_wr->refcount == 1) {
1558 list_del(&mad_send_wr->agent_list);
1559 list_add_tail(&mad_send_wr->agent_list,
1560 &mad_send_wr->mad_agent_priv->done_list);
1561 }
1562}
1563
1da177e4 1564static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
4a0754fa 1565 struct ib_mad_recv_wc *mad_recv_wc)
1da177e4
LT
1566{
1567 struct ib_mad_send_wr_private *mad_send_wr;
1568 struct ib_mad_send_wc mad_send_wc;
1569 unsigned long flags;
4a0754fa 1570 u64 tid;
1da177e4 1571
4a0754fa 1572 INIT_LIST_HEAD(&mad_recv_wc->recv_buf.list);
1da177e4 1573 /* Complete corresponding request */
4a0754fa
HR
1574 if (response_mad(mad_recv_wc->recv_buf.mad)) {
1575 tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
1da177e4 1576 spin_lock_irqsave(&mad_agent_priv->lock, flags);
4a0754fa 1577 mad_send_wr = find_send_req(mad_agent_priv, tid);
1da177e4
LT
1578 if (!mad_send_wr) {
1579 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
4a0754fa 1580 ib_free_recv_mad(mad_recv_wc);
1da177e4
LT
1581 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1582 wake_up(&mad_agent_priv->wait);
1583 return;
1584 }
6a0c435e 1585 ib_mark_req_done(mad_send_wr);
1da177e4
LT
1586 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1587
1588 /* Defined behavior is to complete response before request */
4a0754fa
HR
1589 mad_recv_wc->wc->wr_id = mad_send_wr->wr_id;
1590 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1591 mad_recv_wc);
1da177e4
LT
1592 atomic_dec(&mad_agent_priv->refcount);
1593
1594 mad_send_wc.status = IB_WC_SUCCESS;
1595 mad_send_wc.vendor_err = 0;
1596 mad_send_wc.wr_id = mad_send_wr->wr_id;
1597 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1598 } else {
4a0754fa
HR
1599 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1600 mad_recv_wc);
1da177e4
LT
1601 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1602 wake_up(&mad_agent_priv->wait);
1603 }
1604}
1605
1606static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1607 struct ib_wc *wc)
1608{
1609 struct ib_mad_qp_info *qp_info;
1610 struct ib_mad_private_header *mad_priv_hdr;
1611 struct ib_mad_private *recv, *response;
1612 struct ib_mad_list_head *mad_list;
1613 struct ib_mad_agent_private *mad_agent;
1da177e4
LT
1614
1615 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1616 if (!response)
1617 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1618 "for response buffer\n");
1619
1620 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1621 qp_info = mad_list->mad_queue->qp_info;
1622 dequeue_mad(mad_list);
1623
1624 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1625 mad_list);
1626 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1627 dma_unmap_single(port_priv->device->dma_device,
1628 pci_unmap_addr(&recv->header, mapping),
1629 sizeof(struct ib_mad_private) -
1630 sizeof(struct ib_mad_private_header),
1631 DMA_FROM_DEVICE);
1632
1633 /* Setup MAD receive work completion from "normal" work completion */
24239aff
SH
1634 recv->header.wc = *wc;
1635 recv->header.recv_wc.wc = &recv->header.wc;
1da177e4
LT
1636 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1637 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1638 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1639
1640 if (atomic_read(&qp_info->snoop_count))
1641 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1642
1643 /* Validate MAD */
1644 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1645 goto out;
1646
1647 if (recv->mad.mad.mad_hdr.mgmt_class ==
1648 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1649 if (!smi_handle_dr_smp_recv(&recv->mad.smp,
1650 port_priv->device->node_type,
1651 port_priv->port_num,
1652 port_priv->device->phys_port_cnt))
1653 goto out;
1654 if (!smi_check_forward_dr_smp(&recv->mad.smp))
1655 goto local;
1656 if (!smi_handle_dr_smp_send(&recv->mad.smp,
1657 port_priv->device->node_type,
1658 port_priv->port_num))
1659 goto out;
1660 if (!smi_check_local_dr_smp(&recv->mad.smp,
1661 port_priv->device,
1662 port_priv->port_num))
1663 goto out;
1664 }
1665
1666local:
1667 /* Give driver "right of first refusal" on incoming MAD */
1668 if (port_priv->device->process_mad) {
1669 int ret;
1670
1671 if (!response) {
1672 printk(KERN_ERR PFX "No memory for response MAD\n");
1673 /*
1674 * Is it better to assume that
1675 * it wouldn't be processed ?
1676 */
1677 goto out;
1678 }
1679
1680 ret = port_priv->device->process_mad(port_priv->device, 0,
1681 port_priv->port_num,
1682 wc, &recv->grh,
1683 &recv->mad.mad,
1684 &response->mad.mad);
1685 if (ret & IB_MAD_RESULT_SUCCESS) {
1686 if (ret & IB_MAD_RESULT_CONSUMED)
1687 goto out;
1688 if (ret & IB_MAD_RESULT_REPLY) {
1689 /* Send response */
1690 if (!agent_send(response, &recv->grh, wc,
1691 port_priv->device,
1692 port_priv->port_num))
1693 response = NULL;
1694 goto out;
1695 }
1696 }
1697 }
1698
4a0754fa 1699 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1da177e4 1700 if (mad_agent) {
4a0754fa 1701 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1da177e4
LT
1702 /*
1703 * recv is freed up in error cases in ib_mad_complete_recv
1704 * or via recv_handler in ib_mad_complete_recv()
1705 */
1706 recv = NULL;
1707 }
1708
1709out:
1710 /* Post another receive request for this QP */
1711 if (response) {
1712 ib_mad_post_receive_mads(qp_info, response);
1713 if (recv)
1714 kmem_cache_free(ib_mad_cache, recv);
1715 } else
1716 ib_mad_post_receive_mads(qp_info, recv);
1717}
1718
1719static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1720{
1721 struct ib_mad_send_wr_private *mad_send_wr;
1722 unsigned long delay;
1723
1724 if (list_empty(&mad_agent_priv->wait_list)) {
1725 cancel_delayed_work(&mad_agent_priv->timed_work);
1726 } else {
1727 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1728 struct ib_mad_send_wr_private,
1729 agent_list);
1730
1731 if (time_after(mad_agent_priv->timeout,
1732 mad_send_wr->timeout)) {
1733 mad_agent_priv->timeout = mad_send_wr->timeout;
1734 cancel_delayed_work(&mad_agent_priv->timed_work);
1735 delay = mad_send_wr->timeout - jiffies;
1736 if ((long)delay <= 0)
1737 delay = 1;
1738 queue_delayed_work(mad_agent_priv->qp_info->
1739 port_priv->wq,
1740 &mad_agent_priv->timed_work, delay);
1741 }
1742 }
1743}
1744
d760ce8f 1745static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4 1746{
d760ce8f 1747 struct ib_mad_agent_private *mad_agent_priv;
1da177e4
LT
1748 struct ib_mad_send_wr_private *temp_mad_send_wr;
1749 struct list_head *list_item;
1750 unsigned long delay;
1751
d760ce8f 1752 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4
LT
1753 list_del(&mad_send_wr->agent_list);
1754
1755 delay = mad_send_wr->timeout;
1756 mad_send_wr->timeout += jiffies;
1757
29bb33dd
HR
1758 if (delay) {
1759 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
1760 temp_mad_send_wr = list_entry(list_item,
1761 struct ib_mad_send_wr_private,
1762 agent_list);
1763 if (time_after(mad_send_wr->timeout,
1764 temp_mad_send_wr->timeout))
1765 break;
1766 }
1da177e4 1767 }
29bb33dd
HR
1768 else
1769 list_item = &mad_agent_priv->wait_list;
1da177e4
LT
1770 list_add(&mad_send_wr->agent_list, list_item);
1771
1772 /* Reschedule a work item if we have a shorter timeout */
1773 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
1774 cancel_delayed_work(&mad_agent_priv->timed_work);
1775 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
1776 &mad_agent_priv->timed_work, delay);
1777 }
1778}
1779
03b61ad2
HR
1780void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
1781 int timeout_ms)
1782{
1783 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
1784 wait_for_response(mad_send_wr);
1785}
1786
1da177e4
LT
1787/*
1788 * Process a send work completion
1789 */
1790static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1791 struct ib_mad_send_wc *mad_send_wc)
1792{
1793 struct ib_mad_agent_private *mad_agent_priv;
1794 unsigned long flags;
1795
d760ce8f 1796 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4
LT
1797 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1798 if (mad_send_wc->status != IB_WC_SUCCESS &&
1799 mad_send_wr->status == IB_WC_SUCCESS) {
1800 mad_send_wr->status = mad_send_wc->status;
1801 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1802 }
1803
1804 if (--mad_send_wr->refcount > 0) {
1805 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
1806 mad_send_wr->status == IB_WC_SUCCESS) {
d760ce8f 1807 wait_for_response(mad_send_wr);
1da177e4
LT
1808 }
1809 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1810 return;
1811 }
1812
1813 /* Remove send from MAD agent and notify client of completion */
1814 list_del(&mad_send_wr->agent_list);
1815 adjust_timeout(mad_agent_priv);
1816 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1817
1818 if (mad_send_wr->status != IB_WC_SUCCESS )
1819 mad_send_wc->status = mad_send_wr->status;
1820 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1821 mad_send_wc);
1822
1823 /* Release reference on agent taken when sending */
1824 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1825 wake_up(&mad_agent_priv->wait);
1826
1827 kfree(mad_send_wr);
1828}
1829
1830static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
1831 struct ib_wc *wc)
1832{
1833 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
1834 struct ib_mad_list_head *mad_list;
1835 struct ib_mad_qp_info *qp_info;
1836 struct ib_mad_queue *send_queue;
1837 struct ib_send_wr *bad_send_wr;
1838 unsigned long flags;
1839 int ret;
1840
1841 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1842 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1843 mad_list);
1844 send_queue = mad_list->mad_queue;
1845 qp_info = send_queue->qp_info;
1846
1847retry:
1848 queued_send_wr = NULL;
1849 spin_lock_irqsave(&send_queue->lock, flags);
1850 list_del(&mad_list->list);
1851
1852 /* Move queued send to the send queue */
1853 if (send_queue->count-- > send_queue->max_active) {
1854 mad_list = container_of(qp_info->overflow_list.next,
1855 struct ib_mad_list_head, list);
1856 queued_send_wr = container_of(mad_list,
1857 struct ib_mad_send_wr_private,
1858 mad_list);
1859 list_del(&mad_list->list);
1860 list_add_tail(&mad_list->list, &send_queue->list);
1861 }
1862 spin_unlock_irqrestore(&send_queue->lock, flags);
1863
1864 /* Restore client wr_id in WC and complete send */
1865 wc->wr_id = mad_send_wr->wr_id;
1866 if (atomic_read(&qp_info->snoop_count))
1867 snoop_send(qp_info, &mad_send_wr->send_wr,
1868 (struct ib_mad_send_wc *)wc,
1869 IB_MAD_SNOOP_SEND_COMPLETIONS);
1870 ib_mad_complete_send_wr(mad_send_wr, (struct ib_mad_send_wc *)wc);
1871
1872 if (queued_send_wr) {
1873 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
1874 &bad_send_wr);
1875 if (ret) {
1876 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
1877 mad_send_wr = queued_send_wr;
1878 wc->status = IB_WC_LOC_QP_OP_ERR;
1879 goto retry;
1880 }
1881 }
1882}
1883
1884static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
1885{
1886 struct ib_mad_send_wr_private *mad_send_wr;
1887 struct ib_mad_list_head *mad_list;
1888 unsigned long flags;
1889
1890 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1891 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
1892 mad_send_wr = container_of(mad_list,
1893 struct ib_mad_send_wr_private,
1894 mad_list);
1895 mad_send_wr->retry = 1;
1896 }
1897 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1898}
1899
1900static void mad_error_handler(struct ib_mad_port_private *port_priv,
1901 struct ib_wc *wc)
1902{
1903 struct ib_mad_list_head *mad_list;
1904 struct ib_mad_qp_info *qp_info;
1905 struct ib_mad_send_wr_private *mad_send_wr;
1906 int ret;
1907
1908 /* Determine if failure was a send or receive */
1909 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1910 qp_info = mad_list->mad_queue->qp_info;
1911 if (mad_list->mad_queue == &qp_info->recv_queue)
1912 /*
1913 * Receive errors indicate that the QP has entered the error
1914 * state - error handling/shutdown code will cleanup
1915 */
1916 return;
1917
1918 /*
1919 * Send errors will transition the QP to SQE - move
1920 * QP to RTS and repost flushed work requests
1921 */
1922 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1923 mad_list);
1924 if (wc->status == IB_WC_WR_FLUSH_ERR) {
1925 if (mad_send_wr->retry) {
1926 /* Repost send */
1927 struct ib_send_wr *bad_send_wr;
1928
1929 mad_send_wr->retry = 0;
1930 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
1931 &bad_send_wr);
1932 if (ret)
1933 ib_mad_send_done_handler(port_priv, wc);
1934 } else
1935 ib_mad_send_done_handler(port_priv, wc);
1936 } else {
1937 struct ib_qp_attr *attr;
1938
1939 /* Transition QP to RTS and fail offending send */
1940 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1941 if (attr) {
1942 attr->qp_state = IB_QPS_RTS;
1943 attr->cur_qp_state = IB_QPS_SQE;
1944 ret = ib_modify_qp(qp_info->qp, attr,
1945 IB_QP_STATE | IB_QP_CUR_STATE);
1946 kfree(attr);
1947 if (ret)
1948 printk(KERN_ERR PFX "mad_error_handler - "
1949 "ib_modify_qp to RTS : %d\n", ret);
1950 else
1951 mark_sends_for_retry(qp_info);
1952 }
1953 ib_mad_send_done_handler(port_priv, wc);
1954 }
1955}
1956
1957/*
1958 * IB MAD completion callback
1959 */
1960static void ib_mad_completion_handler(void *data)
1961{
1962 struct ib_mad_port_private *port_priv;
1963 struct ib_wc wc;
1964
1965 port_priv = (struct ib_mad_port_private *)data;
1966 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
1967
1968 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
1969 if (wc.status == IB_WC_SUCCESS) {
1970 switch (wc.opcode) {
1971 case IB_WC_SEND:
1972 ib_mad_send_done_handler(port_priv, &wc);
1973 break;
1974 case IB_WC_RECV:
1975 ib_mad_recv_done_handler(port_priv, &wc);
1976 break;
1977 default:
1978 BUG_ON(1);
1979 break;
1980 }
1981 } else
1982 mad_error_handler(port_priv, &wc);
1983 }
1984}
1985
1986static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
1987{
1988 unsigned long flags;
1989 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
1990 struct ib_mad_send_wc mad_send_wc;
1991 struct list_head cancel_list;
1992
1993 INIT_LIST_HEAD(&cancel_list);
1994
1995 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1996 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
1997 &mad_agent_priv->send_list, agent_list) {
1998 if (mad_send_wr->status == IB_WC_SUCCESS) {
1999 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2000 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2001 }
2002 }
2003
2004 /* Empty wait list to prevent receives from finding a request */
2005 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2c153b93
HR
2006 /* Empty local completion list as well */
2007 list_splice_init(&mad_agent_priv->local_list, &cancel_list);
1da177e4
LT
2008 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2009
2010 /* Report all cancelled requests */
2011 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2012 mad_send_wc.vendor_err = 0;
2013
2014 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2015 &cancel_list, agent_list) {
2016 mad_send_wc.wr_id = mad_send_wr->wr_id;
2017 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2018 &mad_send_wc);
2019
2020 list_del(&mad_send_wr->agent_list);
2021 kfree(mad_send_wr);
2022 atomic_dec(&mad_agent_priv->refcount);
2023 }
2024}
2025
2026static struct ib_mad_send_wr_private*
cabe3cbc 2027find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, u64 wr_id)
1da177e4
LT
2028{
2029 struct ib_mad_send_wr_private *mad_send_wr;
2030
2031 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2032 agent_list) {
2033 if (mad_send_wr->wr_id == wr_id)
2034 return mad_send_wr;
2035 }
2036
2037 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2038 agent_list) {
2039 if (mad_send_wr->wr_id == wr_id)
2040 return mad_send_wr;
2041 }
2042 return NULL;
2043}
2044
03b61ad2 2045int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms)
1da177e4
LT
2046{
2047 struct ib_mad_agent_private *mad_agent_priv;
2048 struct ib_mad_send_wr_private *mad_send_wr;
2049 unsigned long flags;
cabe3cbc 2050 int active;
1da177e4
LT
2051
2052 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2053 agent);
2054 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2055 mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id);
03b61ad2 2056 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
1da177e4 2057 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
03b61ad2 2058 return -EINVAL;
1da177e4
LT
2059 }
2060
cabe3cbc 2061 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
03b61ad2 2062 if (!timeout_ms) {
1da177e4 2063 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
03b61ad2 2064 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1da177e4
LT
2065 }
2066
03b61ad2 2067 mad_send_wr->send_wr.wr.ud.timeout_ms = timeout_ms;
cabe3cbc 2068 if (active)
03b61ad2
HR
2069 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2070 else
2071 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2072
1da177e4 2073 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
03b61ad2
HR
2074 return 0;
2075}
2076EXPORT_SYMBOL(ib_modify_mad);
1da177e4 2077
03b61ad2
HR
2078void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id)
2079{
2080 ib_modify_mad(mad_agent, wr_id, 0);
1da177e4
LT
2081}
2082EXPORT_SYMBOL(ib_cancel_mad);
2083
2084static void local_completions(void *data)
2085{
2086 struct ib_mad_agent_private *mad_agent_priv;
2087 struct ib_mad_local_private *local;
2088 struct ib_mad_agent_private *recv_mad_agent;
2089 unsigned long flags;
2c153b93 2090 int recv = 0;
1da177e4
LT
2091 struct ib_wc wc;
2092 struct ib_mad_send_wc mad_send_wc;
2093
2094 mad_agent_priv = (struct ib_mad_agent_private *)data;
2095
2096 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2097 while (!list_empty(&mad_agent_priv->local_list)) {
2098 local = list_entry(mad_agent_priv->local_list.next,
2099 struct ib_mad_local_private,
2100 completion_list);
2101 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2102 if (local->mad_priv) {
2103 recv_mad_agent = local->recv_mad_agent;
2104 if (!recv_mad_agent) {
2105 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
1da177e4
LT
2106 goto local_send_completion;
2107 }
2108
2c153b93 2109 recv = 1;
1da177e4
LT
2110 /*
2111 * Defined behavior is to complete response
2112 * before request
2113 */
2114 build_smp_wc(local->wr_id, IB_LID_PERMISSIVE,
2115 0 /* pkey index */,
2116 recv_mad_agent->agent.port_num, &wc);
2117
2118 local->mad_priv->header.recv_wc.wc = &wc;
2119 local->mad_priv->header.recv_wc.mad_len =
2120 sizeof(struct ib_mad);
2121 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.recv_buf.list);
2122 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2123 local->mad_priv->header.recv_wc.recv_buf.mad =
2124 &local->mad_priv->mad.mad;
2125 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2126 snoop_recv(recv_mad_agent->qp_info,
2127 &local->mad_priv->header.recv_wc,
2128 IB_MAD_SNOOP_RECVS);
2129 recv_mad_agent->agent.recv_handler(
2130 &recv_mad_agent->agent,
2131 &local->mad_priv->header.recv_wc);
2132 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2133 atomic_dec(&recv_mad_agent->refcount);
2134 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2135 }
2136
2137local_send_completion:
2138 /* Complete send */
2139 mad_send_wc.status = IB_WC_SUCCESS;
2140 mad_send_wc.vendor_err = 0;
2141 mad_send_wc.wr_id = local->wr_id;
2142 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2143 snoop_send(mad_agent_priv->qp_info, &local->send_wr,
2144 &mad_send_wc,
2145 IB_MAD_SNOOP_SEND_COMPLETIONS);
2146 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2147 &mad_send_wc);
2148
2149 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2150 list_del(&local->completion_list);
2151 atomic_dec(&mad_agent_priv->refcount);
2c153b93
HR
2152 if (!recv)
2153 kmem_cache_free(ib_mad_cache, local->mad_priv);
1da177e4
LT
2154 kfree(local);
2155 }
2156 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2157}
2158
f75b7a52
HR
2159static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2160{
2161 int ret;
2162
2163 if (!mad_send_wr->retries--)
2164 return -ETIMEDOUT;
2165
2166 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_wr.
2167 wr.ud.timeout_ms);
2168
2169 ret = ib_send_mad(mad_send_wr);
2170
2171 if (!ret) {
2172 mad_send_wr->refcount++;
f75b7a52
HR
2173 list_add_tail(&mad_send_wr->agent_list,
2174 &mad_send_wr->mad_agent_priv->send_list);
2175 }
2176 return ret;
2177}
2178
1da177e4
LT
2179static void timeout_sends(void *data)
2180{
2181 struct ib_mad_agent_private *mad_agent_priv;
2182 struct ib_mad_send_wr_private *mad_send_wr;
2183 struct ib_mad_send_wc mad_send_wc;
2184 unsigned long flags, delay;
2185
2186 mad_agent_priv = (struct ib_mad_agent_private *)data;
1da177e4
LT
2187 mad_send_wc.vendor_err = 0;
2188
2189 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2190 while (!list_empty(&mad_agent_priv->wait_list)) {
2191 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2192 struct ib_mad_send_wr_private,
2193 agent_list);
2194
2195 if (time_after(mad_send_wr->timeout, jiffies)) {
2196 delay = mad_send_wr->timeout - jiffies;
2197 if ((long)delay <= 0)
2198 delay = 1;
2199 queue_delayed_work(mad_agent_priv->qp_info->
2200 port_priv->wq,
2201 &mad_agent_priv->timed_work, delay);
2202 break;
2203 }
2204
dbf9227b 2205 list_del(&mad_send_wr->agent_list);
29bb33dd
HR
2206 if (mad_send_wr->status == IB_WC_SUCCESS &&
2207 !retry_send(mad_send_wr))
f75b7a52
HR
2208 continue;
2209
1da177e4
LT
2210 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2211
03b61ad2
HR
2212 if (mad_send_wr->status == IB_WC_SUCCESS)
2213 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2214 else
2215 mad_send_wc.status = mad_send_wr->status;
1da177e4
LT
2216 mad_send_wc.wr_id = mad_send_wr->wr_id;
2217 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2218 &mad_send_wc);
2219
2220 kfree(mad_send_wr);
2221 atomic_dec(&mad_agent_priv->refcount);
2222 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2223 }
2224 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2225}
2226
2227static void ib_mad_thread_completion_handler(struct ib_cq *cq)
2228{
2229 struct ib_mad_port_private *port_priv = cq->cq_context;
2230
2231 queue_work(port_priv->wq, &port_priv->work);
2232}
2233
2234/*
2235 * Allocate receive MADs and post receive WRs for them
2236 */
2237static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2238 struct ib_mad_private *mad)
2239{
2240 unsigned long flags;
2241 int post, ret;
2242 struct ib_mad_private *mad_priv;
2243 struct ib_sge sg_list;
2244 struct ib_recv_wr recv_wr, *bad_recv_wr;
2245 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2246
2247 /* Initialize common scatter list fields */
2248 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2249 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2250
2251 /* Initialize common receive WR fields */
2252 recv_wr.next = NULL;
2253 recv_wr.sg_list = &sg_list;
2254 recv_wr.num_sge = 1;
2255
2256 do {
2257 /* Allocate and map receive buffer */
2258 if (mad) {
2259 mad_priv = mad;
2260 mad = NULL;
2261 } else {
2262 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2263 if (!mad_priv) {
2264 printk(KERN_ERR PFX "No memory for receive buffer\n");
2265 ret = -ENOMEM;
2266 break;
2267 }
2268 }
2269 sg_list.addr = dma_map_single(qp_info->port_priv->
2270 device->dma_device,
2271 &mad_priv->grh,
2272 sizeof *mad_priv -
2273 sizeof mad_priv->header,
2274 DMA_FROM_DEVICE);
2275 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2276 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2277 mad_priv->header.mad_list.mad_queue = recv_queue;
2278
2279 /* Post receive WR */
2280 spin_lock_irqsave(&recv_queue->lock, flags);
2281 post = (++recv_queue->count < recv_queue->max_active);
2282 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2283 spin_unlock_irqrestore(&recv_queue->lock, flags);
2284 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2285 if (ret) {
2286 spin_lock_irqsave(&recv_queue->lock, flags);
2287 list_del(&mad_priv->header.mad_list.list);
2288 recv_queue->count--;
2289 spin_unlock_irqrestore(&recv_queue->lock, flags);
2290 dma_unmap_single(qp_info->port_priv->device->dma_device,
2291 pci_unmap_addr(&mad_priv->header,
2292 mapping),
2293 sizeof *mad_priv -
2294 sizeof mad_priv->header,
2295 DMA_FROM_DEVICE);
2296 kmem_cache_free(ib_mad_cache, mad_priv);
2297 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2298 break;
2299 }
2300 } while (post);
2301
2302 return ret;
2303}
2304
2305/*
2306 * Return all the posted receive MADs
2307 */
2308static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2309{
2310 struct ib_mad_private_header *mad_priv_hdr;
2311 struct ib_mad_private *recv;
2312 struct ib_mad_list_head *mad_list;
2313
2314 while (!list_empty(&qp_info->recv_queue.list)) {
2315
2316 mad_list = list_entry(qp_info->recv_queue.list.next,
2317 struct ib_mad_list_head, list);
2318 mad_priv_hdr = container_of(mad_list,
2319 struct ib_mad_private_header,
2320 mad_list);
2321 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2322 header);
2323
2324 /* Remove from posted receive MAD list */
2325 list_del(&mad_list->list);
2326
1da177e4
LT
2327 dma_unmap_single(qp_info->port_priv->device->dma_device,
2328 pci_unmap_addr(&recv->header, mapping),
2329 sizeof(struct ib_mad_private) -
2330 sizeof(struct ib_mad_private_header),
2331 DMA_FROM_DEVICE);
2332 kmem_cache_free(ib_mad_cache, recv);
2333 }
2334
2335 qp_info->recv_queue.count = 0;
2336}
2337
2338/*
2339 * Start the port
2340 */
2341static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2342{
2343 int ret, i;
2344 struct ib_qp_attr *attr;
2345 struct ib_qp *qp;
2346
2347 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2348 if (!attr) {
2349 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2350 return -ENOMEM;
2351 }
2352
2353 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2354 qp = port_priv->qp_info[i].qp;
2355 /*
2356 * PKey index for QP1 is irrelevant but
2357 * one is needed for the Reset to Init transition
2358 */
2359 attr->qp_state = IB_QPS_INIT;
2360 attr->pkey_index = 0;
2361 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2362 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2363 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2364 if (ret) {
2365 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2366 "INIT: %d\n", i, ret);
2367 goto out;
2368 }
2369
2370 attr->qp_state = IB_QPS_RTR;
2371 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2372 if (ret) {
2373 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2374 "RTR: %d\n", i, ret);
2375 goto out;
2376 }
2377
2378 attr->qp_state = IB_QPS_RTS;
2379 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2380 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2381 if (ret) {
2382 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2383 "RTS: %d\n", i, ret);
2384 goto out;
2385 }
2386 }
2387
2388 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2389 if (ret) {
2390 printk(KERN_ERR PFX "Failed to request completion "
2391 "notification: %d\n", ret);
2392 goto out;
2393 }
2394
2395 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2396 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2397 if (ret) {
2398 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2399 goto out;
2400 }
2401 }
2402out:
2403 kfree(attr);
2404 return ret;
2405}
2406
2407static void qp_event_handler(struct ib_event *event, void *qp_context)
2408{
2409 struct ib_mad_qp_info *qp_info = qp_context;
2410
2411 /* It's worse than that! He's dead, Jim! */
2412 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2413 event->event, qp_info->qp->qp_num);
2414}
2415
2416static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2417 struct ib_mad_queue *mad_queue)
2418{
2419 mad_queue->qp_info = qp_info;
2420 mad_queue->count = 0;
2421 spin_lock_init(&mad_queue->lock);
2422 INIT_LIST_HEAD(&mad_queue->list);
2423}
2424
2425static void init_mad_qp(struct ib_mad_port_private *port_priv,
2426 struct ib_mad_qp_info *qp_info)
2427{
2428 qp_info->port_priv = port_priv;
2429 init_mad_queue(qp_info, &qp_info->send_queue);
2430 init_mad_queue(qp_info, &qp_info->recv_queue);
2431 INIT_LIST_HEAD(&qp_info->overflow_list);
2432 spin_lock_init(&qp_info->snoop_lock);
2433 qp_info->snoop_table = NULL;
2434 qp_info->snoop_table_size = 0;
2435 atomic_set(&qp_info->snoop_count, 0);
2436}
2437
2438static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2439 enum ib_qp_type qp_type)
2440{
2441 struct ib_qp_init_attr qp_init_attr;
2442 int ret;
2443
2444 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2445 qp_init_attr.send_cq = qp_info->port_priv->cq;
2446 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2447 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2448 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2449 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2450 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2451 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2452 qp_init_attr.qp_type = qp_type;
2453 qp_init_attr.port_num = qp_info->port_priv->port_num;
2454 qp_init_attr.qp_context = qp_info;
2455 qp_init_attr.event_handler = qp_event_handler;
2456 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2457 if (IS_ERR(qp_info->qp)) {
2458 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2459 get_spl_qp_index(qp_type));
2460 ret = PTR_ERR(qp_info->qp);
2461 goto error;
2462 }
2463 /* Use minimum queue sizes unless the CQ is resized */
2464 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2465 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2466 return 0;
2467
2468error:
2469 return ret;
2470}
2471
2472static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2473{
2474 ib_destroy_qp(qp_info->qp);
2475 if (qp_info->snoop_table)
2476 kfree(qp_info->snoop_table);
2477}
2478
2479/*
2480 * Open the port
2481 * Create the QP, PD, MR, and CQ if needed
2482 */
2483static int ib_mad_port_open(struct ib_device *device,
2484 int port_num)
2485{
2486 int ret, cq_size;
2487 struct ib_mad_port_private *port_priv;
2488 unsigned long flags;
2489 char name[sizeof "ib_mad123"];
2490
1da177e4
LT
2491 /* Create new device info */
2492 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
2493 if (!port_priv) {
2494 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2495 return -ENOMEM;
2496 }
2497 memset(port_priv, 0, sizeof *port_priv);
2498 port_priv->device = device;
2499 port_priv->port_num = port_num;
2500 spin_lock_init(&port_priv->reg_lock);
2501 INIT_LIST_HEAD(&port_priv->agent_list);
2502 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2503 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2504
2505 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2506 port_priv->cq = ib_create_cq(port_priv->device,
2507 (ib_comp_handler)
2508 ib_mad_thread_completion_handler,
2509 NULL, port_priv, cq_size);
2510 if (IS_ERR(port_priv->cq)) {
2511 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2512 ret = PTR_ERR(port_priv->cq);
2513 goto error3;
2514 }
2515
2516 port_priv->pd = ib_alloc_pd(device);
2517 if (IS_ERR(port_priv->pd)) {
2518 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2519 ret = PTR_ERR(port_priv->pd);
2520 goto error4;
2521 }
2522
2523 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2524 if (IS_ERR(port_priv->mr)) {
2525 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2526 ret = PTR_ERR(port_priv->mr);
2527 goto error5;
2528 }
2529
2530 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2531 if (ret)
2532 goto error6;
2533 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2534 if (ret)
2535 goto error7;
2536
2537 snprintf(name, sizeof name, "ib_mad%d", port_num);
2538 port_priv->wq = create_singlethread_workqueue(name);
2539 if (!port_priv->wq) {
2540 ret = -ENOMEM;
2541 goto error8;
2542 }
2543 INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
2544
2545 ret = ib_mad_port_start(port_priv);
2546 if (ret) {
2547 printk(KERN_ERR PFX "Couldn't start port\n");
2548 goto error9;
2549 }
2550
2551 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2552 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2553 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2554 return 0;
2555
2556error9:
2557 destroy_workqueue(port_priv->wq);
2558error8:
2559 destroy_mad_qp(&port_priv->qp_info[1]);
2560error7:
2561 destroy_mad_qp(&port_priv->qp_info[0]);
2562error6:
2563 ib_dereg_mr(port_priv->mr);
2564error5:
2565 ib_dealloc_pd(port_priv->pd);
2566error4:
2567 ib_destroy_cq(port_priv->cq);
2568 cleanup_recv_queue(&port_priv->qp_info[1]);
2569 cleanup_recv_queue(&port_priv->qp_info[0]);
2570error3:
2571 kfree(port_priv);
2572
2573 return ret;
2574}
2575
2576/*
2577 * Close the port
2578 * If there are no classes using the port, free the port
2579 * resources (CQ, MR, PD, QP) and remove the port's info structure
2580 */
2581static int ib_mad_port_close(struct ib_device *device, int port_num)
2582{
2583 struct ib_mad_port_private *port_priv;
2584 unsigned long flags;
2585
2586 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2587 port_priv = __ib_get_mad_port(device, port_num);
2588 if (port_priv == NULL) {
2589 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2590 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2591 return -ENODEV;
2592 }
2593 list_del(&port_priv->port_list);
2594 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2595
2596 /* Stop processing completions. */
2597 flush_workqueue(port_priv->wq);
2598 destroy_workqueue(port_priv->wq);
2599 destroy_mad_qp(&port_priv->qp_info[1]);
2600 destroy_mad_qp(&port_priv->qp_info[0]);
2601 ib_dereg_mr(port_priv->mr);
2602 ib_dealloc_pd(port_priv->pd);
2603 ib_destroy_cq(port_priv->cq);
2604 cleanup_recv_queue(&port_priv->qp_info[1]);
2605 cleanup_recv_queue(&port_priv->qp_info[0]);
2606 /* XXX: Handle deallocation of MAD registration tables */
2607
2608 kfree(port_priv);
2609
2610 return 0;
2611}
2612
2613static void ib_mad_init_device(struct ib_device *device)
2614{
f68bcc2d 2615 int num_ports, cur_port, i;
1da177e4
LT
2616
2617 if (device->node_type == IB_NODE_SWITCH) {
2618 num_ports = 1;
2619 cur_port = 0;
2620 } else {
2621 num_ports = device->phys_port_cnt;
2622 cur_port = 1;
2623 }
2624 for (i = 0; i < num_ports; i++, cur_port++) {
f68bcc2d 2625 if (ib_mad_port_open(device, cur_port)) {
1da177e4
LT
2626 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2627 device->name, cur_port);
2628 goto error_device_open;
2629 }
f68bcc2d 2630 if (ib_agent_port_open(device, cur_port)) {
1da177e4
LT
2631 printk(KERN_ERR PFX "Couldn't open %s port %d "
2632 "for agents\n",
2633 device->name, cur_port);
2634 goto error_device_open;
2635 }
2636 }
f68bcc2d 2637 return;
1da177e4
LT
2638
2639error_device_open:
2640 while (i > 0) {
2641 cur_port--;
f68bcc2d 2642 if (ib_agent_port_close(device, cur_port))
1da177e4
LT
2643 printk(KERN_ERR PFX "Couldn't close %s port %d "
2644 "for agents\n",
2645 device->name, cur_port);
f68bcc2d 2646 if (ib_mad_port_close(device, cur_port))
1da177e4
LT
2647 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2648 device->name, cur_port);
1da177e4
LT
2649 i--;
2650 }
1da177e4
LT
2651}
2652
2653static void ib_mad_remove_device(struct ib_device *device)
2654{
f68bcc2d 2655 int i, num_ports, cur_port;
1da177e4
LT
2656
2657 if (device->node_type == IB_NODE_SWITCH) {
2658 num_ports = 1;
2659 cur_port = 0;
2660 } else {
2661 num_ports = device->phys_port_cnt;
2662 cur_port = 1;
2663 }
2664 for (i = 0; i < num_ports; i++, cur_port++) {
f68bcc2d 2665 if (ib_agent_port_close(device, cur_port))
1da177e4
LT
2666 printk(KERN_ERR PFX "Couldn't close %s port %d "
2667 "for agents\n",
2668 device->name, cur_port);
f68bcc2d 2669 if (ib_mad_port_close(device, cur_port))
1da177e4
LT
2670 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2671 device->name, cur_port);
1da177e4
LT
2672 }
2673}
2674
2675static struct ib_client mad_client = {
2676 .name = "mad",
2677 .add = ib_mad_init_device,
2678 .remove = ib_mad_remove_device
2679};
2680
2681static int __init ib_mad_init_module(void)
2682{
2683 int ret;
2684
2685 spin_lock_init(&ib_mad_port_list_lock);
2686 spin_lock_init(&ib_agent_port_list_lock);
2687
2688 ib_mad_cache = kmem_cache_create("ib_mad",
2689 sizeof(struct ib_mad_private),
2690 0,
2691 SLAB_HWCACHE_ALIGN,
2692 NULL,
2693 NULL);
2694 if (!ib_mad_cache) {
2695 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2696 ret = -ENOMEM;
2697 goto error1;
2698 }
2699
2700 INIT_LIST_HEAD(&ib_mad_port_list);
2701
2702 if (ib_register_client(&mad_client)) {
2703 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
2704 ret = -EINVAL;
2705 goto error2;
2706 }
2707
2708 return 0;
2709
2710error2:
2711 kmem_cache_destroy(ib_mad_cache);
2712error1:
2713 return ret;
2714}
2715
2716static void __exit ib_mad_cleanup_module(void)
2717{
2718 ib_unregister_client(&mad_client);
2719
2720 if (kmem_cache_destroy(ib_mad_cache)) {
2721 printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n");
2722 }
2723}
2724
2725module_init(ib_mad_init_module);
2726module_exit(ib_mad_cleanup_module);