]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
de493d47 | 2 | * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. |
fa619a77 HR |
3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. |
4 | * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. | |
b76aabc3 | 5 | * Copyright (c) 2009 HNR Consulting. All rights reserved. |
1da177e4 LT |
6 | * |
7 | * This software is available to you under a choice of one of two | |
8 | * licenses. You may choose to be licensed under the terms of the GNU | |
9 | * General Public License (GPL) Version 2, available from the file | |
10 | * COPYING in the main directory of this source tree, or the | |
11 | * OpenIB.org BSD license below: | |
12 | * | |
13 | * Redistribution and use in source and binary forms, with or | |
14 | * without modification, are permitted provided that the following | |
15 | * conditions are met: | |
16 | * | |
17 | * - Redistributions of source code must retain the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer. | |
20 | * | |
21 | * - Redistributions in binary form must reproduce the above | |
22 | * copyright notice, this list of conditions and the following | |
23 | * disclaimer in the documentation and/or other materials | |
24 | * provided with the distribution. | |
25 | * | |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
33 | * SOFTWARE. | |
34 | * | |
1da177e4 | 35 | */ |
7ef5d4b0 IW |
36 | |
37 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
38 | ||
1da177e4 | 39 | #include <linux/dma-mapping.h> |
5a0e3ad6 | 40 | #include <linux/slab.h> |
e4dd23d7 | 41 | #include <linux/module.h> |
9874e746 | 42 | #include <rdma/ib_cache.h> |
1da177e4 LT |
43 | |
44 | #include "mad_priv.h" | |
fa619a77 | 45 | #include "mad_rmpp.h" |
1da177e4 LT |
46 | #include "smi.h" |
47 | #include "agent.h" | |
48 | ||
49 | MODULE_LICENSE("Dual BSD/GPL"); | |
50 | MODULE_DESCRIPTION("kernel IB MAD API"); | |
51 | MODULE_AUTHOR("Hal Rosenstock"); | |
52 | MODULE_AUTHOR("Sean Hefty"); | |
53 | ||
16933955 RD |
54 | static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; |
55 | static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; | |
b76aabc3 HR |
56 | |
57 | module_param_named(send_queue_size, mad_sendq_size, int, 0444); | |
58 | MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); | |
59 | module_param_named(recv_queue_size, mad_recvq_size, int, 0444); | |
60 | MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); | |
61 | ||
e54f8188 | 62 | static struct kmem_cache *ib_mad_cache; |
fa619a77 | 63 | |
1da177e4 LT |
64 | static struct list_head ib_mad_port_list; |
65 | static u32 ib_mad_client_id = 0; | |
66 | ||
67 | /* Port list lock */ | |
6276e08a | 68 | static DEFINE_SPINLOCK(ib_mad_port_list_lock); |
1da177e4 LT |
69 | |
70 | /* Forward declarations */ | |
71 | static int method_in_use(struct ib_mad_mgmt_method_table **method, | |
72 | struct ib_mad_reg_req *mad_reg_req); | |
73 | static void remove_mad_reg_req(struct ib_mad_agent_private *priv); | |
74 | static struct ib_mad_agent_private *find_mad_agent( | |
75 | struct ib_mad_port_private *port_priv, | |
4a0754fa | 76 | struct ib_mad *mad); |
1da177e4 LT |
77 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, |
78 | struct ib_mad_private *mad); | |
79 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); | |
c4028958 DH |
80 | static void timeout_sends(struct work_struct *work); |
81 | static void local_completions(struct work_struct *work); | |
1da177e4 LT |
82 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, |
83 | struct ib_mad_agent_private *agent_priv, | |
84 | u8 mgmt_class); | |
85 | static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | |
86 | struct ib_mad_agent_private *agent_priv); | |
87 | ||
88 | /* | |
89 | * Returns a ib_mad_port_private structure or NULL for a device/port | |
90 | * Assumes ib_mad_port_list_lock is being held | |
91 | */ | |
92 | static inline struct ib_mad_port_private * | |
93 | __ib_get_mad_port(struct ib_device *device, int port_num) | |
94 | { | |
95 | struct ib_mad_port_private *entry; | |
96 | ||
97 | list_for_each_entry(entry, &ib_mad_port_list, port_list) { | |
98 | if (entry->device == device && entry->port_num == port_num) | |
99 | return entry; | |
100 | } | |
101 | return NULL; | |
102 | } | |
103 | ||
104 | /* | |
105 | * Wrapper function to return a ib_mad_port_private structure or NULL | |
106 | * for a device/port | |
107 | */ | |
108 | static inline struct ib_mad_port_private * | |
109 | ib_get_mad_port(struct ib_device *device, int port_num) | |
110 | { | |
111 | struct ib_mad_port_private *entry; | |
112 | unsigned long flags; | |
113 | ||
114 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); | |
115 | entry = __ib_get_mad_port(device, port_num); | |
116 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | |
117 | ||
118 | return entry; | |
119 | } | |
120 | ||
121 | static inline u8 convert_mgmt_class(u8 mgmt_class) | |
122 | { | |
123 | /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ | |
124 | return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? | |
125 | 0 : mgmt_class; | |
126 | } | |
127 | ||
128 | static int get_spl_qp_index(enum ib_qp_type qp_type) | |
129 | { | |
130 | switch (qp_type) | |
131 | { | |
132 | case IB_QPT_SMI: | |
133 | return 0; | |
134 | case IB_QPT_GSI: | |
135 | return 1; | |
136 | default: | |
137 | return -1; | |
138 | } | |
139 | } | |
140 | ||
141 | static int vendor_class_index(u8 mgmt_class) | |
142 | { | |
143 | return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; | |
144 | } | |
145 | ||
146 | static int is_vendor_class(u8 mgmt_class) | |
147 | { | |
148 | if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || | |
149 | (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) | |
150 | return 0; | |
151 | return 1; | |
152 | } | |
153 | ||
154 | static int is_vendor_oui(char *oui) | |
155 | { | |
156 | if (oui[0] || oui[1] || oui[2]) | |
157 | return 1; | |
158 | return 0; | |
159 | } | |
160 | ||
161 | static int is_vendor_method_in_use( | |
162 | struct ib_mad_mgmt_vendor_class *vendor_class, | |
163 | struct ib_mad_reg_req *mad_reg_req) | |
164 | { | |
165 | struct ib_mad_mgmt_method_table *method; | |
166 | int i; | |
167 | ||
168 | for (i = 0; i < MAX_MGMT_OUI; i++) { | |
169 | if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { | |
170 | method = vendor_class->method_table[i]; | |
171 | if (method) { | |
172 | if (method_in_use(&method, mad_reg_req)) | |
173 | return 1; | |
174 | else | |
175 | break; | |
176 | } | |
177 | } | |
178 | } | |
179 | return 0; | |
180 | } | |
181 | ||
96909308 | 182 | int ib_response_mad(const struct ib_mad_hdr *hdr) |
2527e681 | 183 | { |
96909308 IW |
184 | return ((hdr->method & IB_MGMT_METHOD_RESP) || |
185 | (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || | |
186 | ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && | |
187 | (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); | |
2527e681 SH |
188 | } |
189 | EXPORT_SYMBOL(ib_response_mad); | |
190 | ||
1da177e4 LT |
191 | /* |
192 | * ib_register_mad_agent - Register to send/receive MADs | |
193 | */ | |
194 | struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |
195 | u8 port_num, | |
196 | enum ib_qp_type qp_type, | |
197 | struct ib_mad_reg_req *mad_reg_req, | |
198 | u8 rmpp_version, | |
199 | ib_mad_send_handler send_handler, | |
200 | ib_mad_recv_handler recv_handler, | |
0f29b46d IW |
201 | void *context, |
202 | u32 registration_flags) | |
1da177e4 LT |
203 | { |
204 | struct ib_mad_port_private *port_priv; | |
205 | struct ib_mad_agent *ret = ERR_PTR(-EINVAL); | |
206 | struct ib_mad_agent_private *mad_agent_priv; | |
207 | struct ib_mad_reg_req *reg_req = NULL; | |
208 | struct ib_mad_mgmt_class_table *class; | |
209 | struct ib_mad_mgmt_vendor_class_table *vendor; | |
210 | struct ib_mad_mgmt_vendor_class *vendor_class; | |
211 | struct ib_mad_mgmt_method_table *method; | |
212 | int ret2, qpn; | |
213 | unsigned long flags; | |
214 | u8 mgmt_class, vclass; | |
215 | ||
216 | /* Validate parameters */ | |
217 | qpn = get_spl_qp_index(qp_type); | |
9ad13a42 IW |
218 | if (qpn == -1) { |
219 | dev_notice(&device->dev, | |
220 | "ib_register_mad_agent: invalid QP Type %d\n", | |
221 | qp_type); | |
1da177e4 | 222 | goto error1; |
9ad13a42 | 223 | } |
1da177e4 | 224 | |
9ad13a42 IW |
225 | if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { |
226 | dev_notice(&device->dev, | |
227 | "ib_register_mad_agent: invalid RMPP Version %u\n", | |
228 | rmpp_version); | |
fa619a77 | 229 | goto error1; |
9ad13a42 | 230 | } |
1da177e4 LT |
231 | |
232 | /* Validate MAD registration request if supplied */ | |
233 | if (mad_reg_req) { | |
9ad13a42 IW |
234 | if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { |
235 | dev_notice(&device->dev, | |
236 | "ib_register_mad_agent: invalid Class Version %u\n", | |
237 | mad_reg_req->mgmt_class_version); | |
1da177e4 | 238 | goto error1; |
9ad13a42 IW |
239 | } |
240 | if (!recv_handler) { | |
241 | dev_notice(&device->dev, | |
242 | "ib_register_mad_agent: no recv_handler\n"); | |
1da177e4 | 243 | goto error1; |
9ad13a42 | 244 | } |
1da177e4 LT |
245 | if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { |
246 | /* | |
247 | * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only | |
248 | * one in this range currently allowed | |
249 | */ | |
250 | if (mad_reg_req->mgmt_class != | |
9ad13a42 IW |
251 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { |
252 | dev_notice(&device->dev, | |
253 | "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", | |
254 | mad_reg_req->mgmt_class); | |
1da177e4 | 255 | goto error1; |
9ad13a42 | 256 | } |
1da177e4 LT |
257 | } else if (mad_reg_req->mgmt_class == 0) { |
258 | /* | |
259 | * Class 0 is reserved in IBA and is used for | |
260 | * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE | |
261 | */ | |
9ad13a42 IW |
262 | dev_notice(&device->dev, |
263 | "ib_register_mad_agent: Invalid Mgmt Class 0\n"); | |
1da177e4 LT |
264 | goto error1; |
265 | } else if (is_vendor_class(mad_reg_req->mgmt_class)) { | |
266 | /* | |
267 | * If class is in "new" vendor range, | |
268 | * ensure supplied OUI is not zero | |
269 | */ | |
9ad13a42 IW |
270 | if (!is_vendor_oui(mad_reg_req->oui)) { |
271 | dev_notice(&device->dev, | |
272 | "ib_register_mad_agent: No OUI specified for class 0x%x\n", | |
273 | mad_reg_req->mgmt_class); | |
1da177e4 | 274 | goto error1; |
9ad13a42 | 275 | } |
1da177e4 | 276 | } |
618a3c03 | 277 | /* Make sure class supplied is consistent with RMPP */ |
64cb9c6a | 278 | if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { |
9ad13a42 IW |
279 | if (rmpp_version) { |
280 | dev_notice(&device->dev, | |
281 | "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", | |
282 | mad_reg_req->mgmt_class); | |
618a3c03 | 283 | goto error1; |
9ad13a42 | 284 | } |
618a3c03 | 285 | } |
1471cb6c | 286 | |
1da177e4 LT |
287 | /* Make sure class supplied is consistent with QP type */ |
288 | if (qp_type == IB_QPT_SMI) { | |
289 | if ((mad_reg_req->mgmt_class != | |
290 | IB_MGMT_CLASS_SUBN_LID_ROUTED) && | |
291 | (mad_reg_req->mgmt_class != | |
9ad13a42 IW |
292 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { |
293 | dev_notice(&device->dev, | |
294 | "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", | |
295 | mad_reg_req->mgmt_class); | |
1da177e4 | 296 | goto error1; |
9ad13a42 | 297 | } |
1da177e4 LT |
298 | } else { |
299 | if ((mad_reg_req->mgmt_class == | |
300 | IB_MGMT_CLASS_SUBN_LID_ROUTED) || | |
301 | (mad_reg_req->mgmt_class == | |
9ad13a42 IW |
302 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { |
303 | dev_notice(&device->dev, | |
304 | "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", | |
305 | mad_reg_req->mgmt_class); | |
1da177e4 | 306 | goto error1; |
9ad13a42 | 307 | } |
1da177e4 LT |
308 | } |
309 | } else { | |
310 | /* No registration request supplied */ | |
311 | if (!send_handler) | |
312 | goto error1; | |
1471cb6c IW |
313 | if (registration_flags & IB_MAD_USER_RMPP) |
314 | goto error1; | |
1da177e4 LT |
315 | } |
316 | ||
317 | /* Validate device and port */ | |
318 | port_priv = ib_get_mad_port(device, port_num); | |
319 | if (!port_priv) { | |
9ad13a42 | 320 | dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n"); |
1da177e4 LT |
321 | ret = ERR_PTR(-ENODEV); |
322 | goto error1; | |
323 | } | |
324 | ||
c8367c4c IW |
325 | /* Verify the QP requested is supported. For example, Ethernet devices |
326 | * will not have QP0 */ | |
327 | if (!port_priv->qp_info[qpn].qp) { | |
9ad13a42 IW |
328 | dev_notice(&device->dev, |
329 | "ib_register_mad_agent: QP %d not supported\n", qpn); | |
c8367c4c IW |
330 | ret = ERR_PTR(-EPROTONOSUPPORT); |
331 | goto error1; | |
332 | } | |
333 | ||
1da177e4 | 334 | /* Allocate structures */ |
de6eb66b | 335 | mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); |
1da177e4 LT |
336 | if (!mad_agent_priv) { |
337 | ret = ERR_PTR(-ENOMEM); | |
338 | goto error1; | |
339 | } | |
b82cab6b HR |
340 | |
341 | mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, | |
342 | IB_ACCESS_LOCAL_WRITE); | |
343 | if (IS_ERR(mad_agent_priv->agent.mr)) { | |
344 | ret = ERR_PTR(-ENOMEM); | |
345 | goto error2; | |
346 | } | |
1da177e4 LT |
347 | |
348 | if (mad_reg_req) { | |
9893e742 | 349 | reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); |
1da177e4 LT |
350 | if (!reg_req) { |
351 | ret = ERR_PTR(-ENOMEM); | |
b82cab6b | 352 | goto error3; |
1da177e4 | 353 | } |
1da177e4 LT |
354 | } |
355 | ||
356 | /* Now, fill in the various structures */ | |
1da177e4 LT |
357 | mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; |
358 | mad_agent_priv->reg_req = reg_req; | |
fa619a77 | 359 | mad_agent_priv->agent.rmpp_version = rmpp_version; |
1da177e4 LT |
360 | mad_agent_priv->agent.device = device; |
361 | mad_agent_priv->agent.recv_handler = recv_handler; | |
362 | mad_agent_priv->agent.send_handler = send_handler; | |
363 | mad_agent_priv->agent.context = context; | |
364 | mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; | |
365 | mad_agent_priv->agent.port_num = port_num; | |
0f29b46d | 366 | mad_agent_priv->agent.flags = registration_flags; |
d9620a4c RC |
367 | spin_lock_init(&mad_agent_priv->lock); |
368 | INIT_LIST_HEAD(&mad_agent_priv->send_list); | |
369 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); | |
370 | INIT_LIST_HEAD(&mad_agent_priv->done_list); | |
371 | INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); | |
372 | INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); | |
373 | INIT_LIST_HEAD(&mad_agent_priv->local_list); | |
374 | INIT_WORK(&mad_agent_priv->local_work, local_completions); | |
375 | atomic_set(&mad_agent_priv->refcount, 1); | |
376 | init_completion(&mad_agent_priv->comp); | |
1da177e4 LT |
377 | |
378 | spin_lock_irqsave(&port_priv->reg_lock, flags); | |
379 | mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; | |
380 | ||
381 | /* | |
382 | * Make sure MAD registration (if supplied) | |
383 | * is non overlapping with any existing ones | |
384 | */ | |
385 | if (mad_reg_req) { | |
386 | mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); | |
387 | if (!is_vendor_class(mgmt_class)) { | |
388 | class = port_priv->version[mad_reg_req-> | |
389 | mgmt_class_version].class; | |
390 | if (class) { | |
391 | method = class->method_table[mgmt_class]; | |
392 | if (method) { | |
393 | if (method_in_use(&method, | |
394 | mad_reg_req)) | |
b82cab6b | 395 | goto error4; |
1da177e4 LT |
396 | } |
397 | } | |
398 | ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, | |
399 | mgmt_class); | |
400 | } else { | |
401 | /* "New" vendor class range */ | |
402 | vendor = port_priv->version[mad_reg_req-> | |
403 | mgmt_class_version].vendor; | |
404 | if (vendor) { | |
405 | vclass = vendor_class_index(mgmt_class); | |
406 | vendor_class = vendor->vendor_class[vclass]; | |
407 | if (vendor_class) { | |
408 | if (is_vendor_method_in_use( | |
409 | vendor_class, | |
410 | mad_reg_req)) | |
b82cab6b | 411 | goto error4; |
1da177e4 LT |
412 | } |
413 | } | |
414 | ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); | |
415 | } | |
416 | if (ret2) { | |
417 | ret = ERR_PTR(ret2); | |
b82cab6b | 418 | goto error4; |
1da177e4 LT |
419 | } |
420 | } | |
421 | ||
422 | /* Add mad agent into port's agent list */ | |
423 | list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); | |
424 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | |
425 | ||
1da177e4 LT |
426 | return &mad_agent_priv->agent; |
427 | ||
b82cab6b | 428 | error4: |
1da177e4 LT |
429 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
430 | kfree(reg_req); | |
b82cab6b | 431 | error3: |
b82cab6b | 432 | ib_dereg_mr(mad_agent_priv->agent.mr); |
2012a116 AB |
433 | error2: |
434 | kfree(mad_agent_priv); | |
1da177e4 LT |
435 | error1: |
436 | return ret; | |
437 | } | |
438 | EXPORT_SYMBOL(ib_register_mad_agent); | |
439 | ||
440 | static inline int is_snooping_sends(int mad_snoop_flags) | |
441 | { | |
442 | return (mad_snoop_flags & | |
443 | (/*IB_MAD_SNOOP_POSTED_SENDS | | |
444 | IB_MAD_SNOOP_RMPP_SENDS |*/ | |
445 | IB_MAD_SNOOP_SEND_COMPLETIONS /*| | |
446 | IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); | |
447 | } | |
448 | ||
449 | static inline int is_snooping_recvs(int mad_snoop_flags) | |
450 | { | |
451 | return (mad_snoop_flags & | |
452 | (IB_MAD_SNOOP_RECVS /*| | |
453 | IB_MAD_SNOOP_RMPP_RECVS*/)); | |
454 | } | |
455 | ||
456 | static int register_snoop_agent(struct ib_mad_qp_info *qp_info, | |
457 | struct ib_mad_snoop_private *mad_snoop_priv) | |
458 | { | |
459 | struct ib_mad_snoop_private **new_snoop_table; | |
460 | unsigned long flags; | |
461 | int i; | |
462 | ||
463 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | |
464 | /* Check for empty slot in array. */ | |
465 | for (i = 0; i < qp_info->snoop_table_size; i++) | |
466 | if (!qp_info->snoop_table[i]) | |
467 | break; | |
468 | ||
469 | if (i == qp_info->snoop_table_size) { | |
470 | /* Grow table. */ | |
52805174 RD |
471 | new_snoop_table = krealloc(qp_info->snoop_table, |
472 | sizeof mad_snoop_priv * | |
473 | (qp_info->snoop_table_size + 1), | |
474 | GFP_ATOMIC); | |
1da177e4 LT |
475 | if (!new_snoop_table) { |
476 | i = -ENOMEM; | |
477 | goto out; | |
478 | } | |
52805174 | 479 | |
1da177e4 LT |
480 | qp_info->snoop_table = new_snoop_table; |
481 | qp_info->snoop_table_size++; | |
482 | } | |
483 | qp_info->snoop_table[i] = mad_snoop_priv; | |
484 | atomic_inc(&qp_info->snoop_count); | |
485 | out: | |
486 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | |
487 | return i; | |
488 | } | |
489 | ||
490 | struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, | |
491 | u8 port_num, | |
492 | enum ib_qp_type qp_type, | |
493 | int mad_snoop_flags, | |
494 | ib_mad_snoop_handler snoop_handler, | |
495 | ib_mad_recv_handler recv_handler, | |
496 | void *context) | |
497 | { | |
498 | struct ib_mad_port_private *port_priv; | |
499 | struct ib_mad_agent *ret; | |
500 | struct ib_mad_snoop_private *mad_snoop_priv; | |
501 | int qpn; | |
502 | ||
503 | /* Validate parameters */ | |
504 | if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || | |
505 | (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { | |
506 | ret = ERR_PTR(-EINVAL); | |
507 | goto error1; | |
508 | } | |
509 | qpn = get_spl_qp_index(qp_type); | |
510 | if (qpn == -1) { | |
511 | ret = ERR_PTR(-EINVAL); | |
512 | goto error1; | |
513 | } | |
514 | port_priv = ib_get_mad_port(device, port_num); | |
515 | if (!port_priv) { | |
516 | ret = ERR_PTR(-ENODEV); | |
517 | goto error1; | |
518 | } | |
519 | /* Allocate structures */ | |
de6eb66b | 520 | mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); |
1da177e4 LT |
521 | if (!mad_snoop_priv) { |
522 | ret = ERR_PTR(-ENOMEM); | |
523 | goto error1; | |
524 | } | |
525 | ||
526 | /* Now, fill in the various structures */ | |
1da177e4 LT |
527 | mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; |
528 | mad_snoop_priv->agent.device = device; | |
529 | mad_snoop_priv->agent.recv_handler = recv_handler; | |
530 | mad_snoop_priv->agent.snoop_handler = snoop_handler; | |
531 | mad_snoop_priv->agent.context = context; | |
532 | mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; | |
533 | mad_snoop_priv->agent.port_num = port_num; | |
534 | mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; | |
1b52fa98 | 535 | init_completion(&mad_snoop_priv->comp); |
1da177e4 LT |
536 | mad_snoop_priv->snoop_index = register_snoop_agent( |
537 | &port_priv->qp_info[qpn], | |
538 | mad_snoop_priv); | |
539 | if (mad_snoop_priv->snoop_index < 0) { | |
540 | ret = ERR_PTR(mad_snoop_priv->snoop_index); | |
541 | goto error2; | |
542 | } | |
543 | ||
544 | atomic_set(&mad_snoop_priv->refcount, 1); | |
545 | return &mad_snoop_priv->agent; | |
546 | ||
547 | error2: | |
548 | kfree(mad_snoop_priv); | |
549 | error1: | |
550 | return ret; | |
551 | } | |
552 | EXPORT_SYMBOL(ib_register_mad_snoop); | |
553 | ||
1b52fa98 SH |
554 | static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) |
555 | { | |
556 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | |
557 | complete(&mad_agent_priv->comp); | |
558 | } | |
559 | ||
560 | static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) | |
561 | { | |
562 | if (atomic_dec_and_test(&mad_snoop_priv->refcount)) | |
563 | complete(&mad_snoop_priv->comp); | |
564 | } | |
565 | ||
1da177e4 LT |
566 | static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) |
567 | { | |
568 | struct ib_mad_port_private *port_priv; | |
569 | unsigned long flags; | |
570 | ||
571 | /* Note that we could still be handling received MADs */ | |
572 | ||
573 | /* | |
574 | * Canceling all sends results in dropping received response | |
575 | * MADs, preventing us from queuing additional work | |
576 | */ | |
577 | cancel_mads(mad_agent_priv); | |
1da177e4 | 578 | port_priv = mad_agent_priv->qp_info->port_priv; |
1da177e4 | 579 | cancel_delayed_work(&mad_agent_priv->timed_work); |
1da177e4 LT |
580 | |
581 | spin_lock_irqsave(&port_priv->reg_lock, flags); | |
582 | remove_mad_reg_req(mad_agent_priv); | |
583 | list_del(&mad_agent_priv->agent_list); | |
584 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | |
585 | ||
b82cab6b | 586 | flush_workqueue(port_priv->wq); |
fa619a77 | 587 | ib_cancel_rmpp_recvs(mad_agent_priv); |
1da177e4 | 588 | |
1b52fa98 SH |
589 | deref_mad_agent(mad_agent_priv); |
590 | wait_for_completion(&mad_agent_priv->comp); | |
1da177e4 | 591 | |
6044ec88 | 592 | kfree(mad_agent_priv->reg_req); |
b82cab6b | 593 | ib_dereg_mr(mad_agent_priv->agent.mr); |
1da177e4 LT |
594 | kfree(mad_agent_priv); |
595 | } | |
596 | ||
597 | static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) | |
598 | { | |
599 | struct ib_mad_qp_info *qp_info; | |
600 | unsigned long flags; | |
601 | ||
602 | qp_info = mad_snoop_priv->qp_info; | |
603 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | |
604 | qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; | |
605 | atomic_dec(&qp_info->snoop_count); | |
606 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | |
607 | ||
1b52fa98 SH |
608 | deref_snoop_agent(mad_snoop_priv); |
609 | wait_for_completion(&mad_snoop_priv->comp); | |
1da177e4 LT |
610 | |
611 | kfree(mad_snoop_priv); | |
612 | } | |
613 | ||
614 | /* | |
615 | * ib_unregister_mad_agent - Unregisters a client from using MAD services | |
616 | */ | |
617 | int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) | |
618 | { | |
619 | struct ib_mad_agent_private *mad_agent_priv; | |
620 | struct ib_mad_snoop_private *mad_snoop_priv; | |
621 | ||
622 | /* If the TID is zero, the agent can only snoop. */ | |
623 | if (mad_agent->hi_tid) { | |
624 | mad_agent_priv = container_of(mad_agent, | |
625 | struct ib_mad_agent_private, | |
626 | agent); | |
627 | unregister_mad_agent(mad_agent_priv); | |
628 | } else { | |
629 | mad_snoop_priv = container_of(mad_agent, | |
630 | struct ib_mad_snoop_private, | |
631 | agent); | |
632 | unregister_mad_snoop(mad_snoop_priv); | |
633 | } | |
634 | return 0; | |
635 | } | |
636 | EXPORT_SYMBOL(ib_unregister_mad_agent); | |
637 | ||
638 | static void dequeue_mad(struct ib_mad_list_head *mad_list) | |
639 | { | |
640 | struct ib_mad_queue *mad_queue; | |
641 | unsigned long flags; | |
642 | ||
643 | BUG_ON(!mad_list->mad_queue); | |
644 | mad_queue = mad_list->mad_queue; | |
645 | spin_lock_irqsave(&mad_queue->lock, flags); | |
646 | list_del(&mad_list->list); | |
647 | mad_queue->count--; | |
648 | spin_unlock_irqrestore(&mad_queue->lock, flags); | |
649 | } | |
650 | ||
651 | static void snoop_send(struct ib_mad_qp_info *qp_info, | |
34816ad9 | 652 | struct ib_mad_send_buf *send_buf, |
1da177e4 LT |
653 | struct ib_mad_send_wc *mad_send_wc, |
654 | int mad_snoop_flags) | |
655 | { | |
656 | struct ib_mad_snoop_private *mad_snoop_priv; | |
657 | unsigned long flags; | |
658 | int i; | |
659 | ||
660 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | |
661 | for (i = 0; i < qp_info->snoop_table_size; i++) { | |
662 | mad_snoop_priv = qp_info->snoop_table[i]; | |
663 | if (!mad_snoop_priv || | |
664 | !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) | |
665 | continue; | |
666 | ||
667 | atomic_inc(&mad_snoop_priv->refcount); | |
668 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | |
669 | mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, | |
34816ad9 | 670 | send_buf, mad_send_wc); |
1b52fa98 | 671 | deref_snoop_agent(mad_snoop_priv); |
1da177e4 LT |
672 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
673 | } | |
674 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | |
675 | } | |
676 | ||
677 | static void snoop_recv(struct ib_mad_qp_info *qp_info, | |
678 | struct ib_mad_recv_wc *mad_recv_wc, | |
679 | int mad_snoop_flags) | |
680 | { | |
681 | struct ib_mad_snoop_private *mad_snoop_priv; | |
682 | unsigned long flags; | |
683 | int i; | |
684 | ||
685 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | |
686 | for (i = 0; i < qp_info->snoop_table_size; i++) { | |
687 | mad_snoop_priv = qp_info->snoop_table[i]; | |
688 | if (!mad_snoop_priv || | |
689 | !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) | |
690 | continue; | |
691 | ||
692 | atomic_inc(&mad_snoop_priv->refcount); | |
693 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | |
694 | mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, | |
695 | mad_recv_wc); | |
1b52fa98 | 696 | deref_snoop_agent(mad_snoop_priv); |
1da177e4 LT |
697 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
698 | } | |
699 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | |
700 | } | |
701 | ||
062dbb69 MT |
702 | static void build_smp_wc(struct ib_qp *qp, |
703 | u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, | |
1da177e4 LT |
704 | struct ib_wc *wc) |
705 | { | |
706 | memset(wc, 0, sizeof *wc); | |
707 | wc->wr_id = wr_id; | |
708 | wc->status = IB_WC_SUCCESS; | |
709 | wc->opcode = IB_WC_RECV; | |
710 | wc->pkey_index = pkey_index; | |
711 | wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); | |
712 | wc->src_qp = IB_QP0; | |
062dbb69 | 713 | wc->qp = qp; |
1da177e4 LT |
714 | wc->slid = slid; |
715 | wc->sl = 0; | |
716 | wc->dlid_path_bits = 0; | |
717 | wc->port_num = port_num; | |
718 | } | |
719 | ||
720 | /* | |
721 | * Return 0 if SMP is to be sent | |
722 | * Return 1 if SMP was consumed locally (whether or not solicited) | |
723 | * Return < 0 if error | |
724 | */ | |
725 | static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |
34816ad9 | 726 | struct ib_mad_send_wr_private *mad_send_wr) |
1da177e4 | 727 | { |
de493d47 | 728 | int ret = 0; |
34816ad9 | 729 | struct ib_smp *smp = mad_send_wr->send_buf.mad; |
1da177e4 LT |
730 | unsigned long flags; |
731 | struct ib_mad_local_private *local; | |
732 | struct ib_mad_private *mad_priv; | |
733 | struct ib_mad_port_private *port_priv; | |
734 | struct ib_mad_agent_private *recv_mad_agent = NULL; | |
735 | struct ib_device *device = mad_agent_priv->agent.device; | |
1bae4dbf | 736 | u8 port_num; |
1da177e4 | 737 | struct ib_wc mad_wc; |
34816ad9 | 738 | struct ib_send_wr *send_wr = &mad_send_wr->send_wr; |
1da177e4 | 739 | |
1bae4dbf HR |
740 | if (device->node_type == RDMA_NODE_IB_SWITCH && |
741 | smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | |
742 | port_num = send_wr->wr.ud.port_num; | |
743 | else | |
744 | port_num = mad_agent_priv->agent.port_num; | |
745 | ||
8cf3f04f RC |
746 | /* |
747 | * Directed route handling starts if the initial LID routed part of | |
748 | * a request or the ending LID routed part of a response is empty. | |
749 | * If we are at the start of the LID routed part, don't update the | |
750 | * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. | |
751 | */ | |
752 | if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == | |
753 | IB_LID_PERMISSIVE && | |
de493d47 HR |
754 | smi_handle_dr_smp_send(smp, device->node_type, port_num) == |
755 | IB_SMI_DISCARD) { | |
1da177e4 | 756 | ret = -EINVAL; |
7ef5d4b0 | 757 | dev_err(&device->dev, "Invalid directed route\n"); |
1da177e4 LT |
758 | goto out; |
759 | } | |
de493d47 | 760 | |
1da177e4 | 761 | /* Check to post send on QP or process locally */ |
727792da SW |
762 | if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && |
763 | smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) | |
1da177e4 LT |
764 | goto out; |
765 | ||
766 | local = kmalloc(sizeof *local, GFP_ATOMIC); | |
767 | if (!local) { | |
768 | ret = -ENOMEM; | |
7ef5d4b0 | 769 | dev_err(&device->dev, "No memory for ib_mad_local_private\n"); |
1da177e4 LT |
770 | goto out; |
771 | } | |
772 | local->mad_priv = NULL; | |
773 | local->recv_mad_agent = NULL; | |
774 | mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); | |
775 | if (!mad_priv) { | |
776 | ret = -ENOMEM; | |
7ef5d4b0 | 777 | dev_err(&device->dev, "No memory for local response MAD\n"); |
1da177e4 LT |
778 | kfree(local); |
779 | goto out; | |
780 | } | |
781 | ||
062dbb69 MT |
782 | build_smp_wc(mad_agent_priv->agent.qp, |
783 | send_wr->wr_id, be16_to_cpu(smp->dr_slid), | |
97f52eb4 | 784 | send_wr->wr.ud.pkey_index, |
1da177e4 LT |
785 | send_wr->wr.ud.port_num, &mad_wc); |
786 | ||
787 | /* No GRH for DR SMP */ | |
788 | ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, | |
789 | (struct ib_mad *)smp, | |
790 | (struct ib_mad *)&mad_priv->mad); | |
791 | switch (ret) | |
792 | { | |
793 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: | |
96909308 | 794 | if (ib_response_mad(&mad_priv->mad.mad.mad_hdr) && |
1da177e4 LT |
795 | mad_agent_priv->agent.recv_handler) { |
796 | local->mad_priv = mad_priv; | |
797 | local->recv_mad_agent = mad_agent_priv; | |
798 | /* | |
799 | * Reference MAD agent until receive | |
800 | * side of local completion handled | |
801 | */ | |
802 | atomic_inc(&mad_agent_priv->refcount); | |
803 | } else | |
804 | kmem_cache_free(ib_mad_cache, mad_priv); | |
805 | break; | |
806 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: | |
807 | kmem_cache_free(ib_mad_cache, mad_priv); | |
4780c195 | 808 | break; |
1da177e4 LT |
809 | case IB_MAD_RESULT_SUCCESS: |
810 | /* Treat like an incoming receive MAD */ | |
1da177e4 LT |
811 | port_priv = ib_get_mad_port(mad_agent_priv->agent.device, |
812 | mad_agent_priv->agent.port_num); | |
813 | if (port_priv) { | |
727792da | 814 | memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad)); |
1da177e4 | 815 | recv_mad_agent = find_mad_agent(port_priv, |
4a0754fa | 816 | &mad_priv->mad.mad); |
1da177e4 LT |
817 | } |
818 | if (!port_priv || !recv_mad_agent) { | |
4780c195 RC |
819 | /* |
820 | * No receiving agent so drop packet and | |
821 | * generate send completion. | |
822 | */ | |
1da177e4 | 823 | kmem_cache_free(ib_mad_cache, mad_priv); |
4780c195 | 824 | break; |
1da177e4 LT |
825 | } |
826 | local->mad_priv = mad_priv; | |
827 | local->recv_mad_agent = recv_mad_agent; | |
828 | break; | |
829 | default: | |
830 | kmem_cache_free(ib_mad_cache, mad_priv); | |
831 | kfree(local); | |
832 | ret = -EINVAL; | |
833 | goto out; | |
834 | } | |
835 | ||
34816ad9 | 836 | local->mad_send_wr = mad_send_wr; |
1da177e4 LT |
837 | /* Reference MAD agent until send side of local completion handled */ |
838 | atomic_inc(&mad_agent_priv->refcount); | |
839 | /* Queue local completion to local list */ | |
840 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
841 | list_add_tail(&local->completion_list, &mad_agent_priv->local_list); | |
842 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
843 | queue_work(mad_agent_priv->qp_info->port_priv->wq, | |
b82cab6b | 844 | &mad_agent_priv->local_work); |
1da177e4 LT |
845 | ret = 1; |
846 | out: | |
847 | return ret; | |
848 | } | |
849 | ||
f36e1793 | 850 | static int get_pad_size(int hdr_len, int data_len) |
824c8ae7 HR |
851 | { |
852 | int seg_size, pad; | |
853 | ||
854 | seg_size = sizeof(struct ib_mad) - hdr_len; | |
855 | if (data_len && seg_size) { | |
856 | pad = seg_size - data_len % seg_size; | |
f36e1793 | 857 | return pad == seg_size ? 0 : pad; |
824c8ae7 | 858 | } else |
f36e1793 JM |
859 | return seg_size; |
860 | } | |
861 | ||
862 | static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) | |
863 | { | |
864 | struct ib_rmpp_segment *s, *t; | |
865 | ||
866 | list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { | |
867 | list_del(&s->list); | |
868 | kfree(s); | |
869 | } | |
870 | } | |
871 | ||
872 | static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, | |
873 | gfp_t gfp_mask) | |
874 | { | |
875 | struct ib_mad_send_buf *send_buf = &send_wr->send_buf; | |
876 | struct ib_rmpp_mad *rmpp_mad = send_buf->mad; | |
877 | struct ib_rmpp_segment *seg = NULL; | |
878 | int left, seg_size, pad; | |
879 | ||
880 | send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len; | |
881 | seg_size = send_buf->seg_size; | |
882 | pad = send_wr->pad; | |
883 | ||
884 | /* Allocate data segments. */ | |
885 | for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { | |
886 | seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); | |
887 | if (!seg) { | |
7ef5d4b0 IW |
888 | dev_err(&send_buf->mad_agent->device->dev, |
889 | "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n", | |
890 | sizeof (*seg) + seg_size, gfp_mask); | |
f36e1793 JM |
891 | free_send_rmpp_list(send_wr); |
892 | return -ENOMEM; | |
893 | } | |
894 | seg->num = ++send_buf->seg_count; | |
895 | list_add_tail(&seg->list, &send_wr->rmpp_list); | |
896 | } | |
897 | ||
898 | /* Zero any padding */ | |
899 | if (pad) | |
900 | memset(seg->data + seg_size - pad, 0, pad); | |
901 | ||
902 | rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> | |
903 | agent.rmpp_version; | |
904 | rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; | |
905 | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); | |
906 | ||
907 | send_wr->cur_seg = container_of(send_wr->rmpp_list.next, | |
908 | struct ib_rmpp_segment, list); | |
909 | send_wr->last_ack_seg = send_wr->cur_seg; | |
910 | return 0; | |
824c8ae7 HR |
911 | } |
912 | ||
f766c58f | 913 | int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) |
1471cb6c IW |
914 | { |
915 | return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); | |
916 | } | |
917 | EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); | |
918 | ||
824c8ae7 HR |
919 | struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, |
920 | u32 remote_qpn, u16 pkey_index, | |
34816ad9 | 921 | int rmpp_active, |
824c8ae7 | 922 | int hdr_len, int data_len, |
dd0fc66f | 923 | gfp_t gfp_mask) |
824c8ae7 HR |
924 | { |
925 | struct ib_mad_agent_private *mad_agent_priv; | |
34816ad9 | 926 | struct ib_mad_send_wr_private *mad_send_wr; |
f36e1793 | 927 | int pad, message_size, ret, size; |
824c8ae7 HR |
928 | void *buf; |
929 | ||
34816ad9 SH |
930 | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, |
931 | agent); | |
f36e1793 JM |
932 | pad = get_pad_size(hdr_len, data_len); |
933 | message_size = hdr_len + data_len + pad; | |
824c8ae7 | 934 | |
1471cb6c IW |
935 | if (ib_mad_kernel_rmpp_agent(mad_agent)) { |
936 | if (!rmpp_active && message_size > sizeof(struct ib_mad)) | |
937 | return ERR_PTR(-EINVAL); | |
938 | } else | |
939 | if (rmpp_active || message_size > sizeof(struct ib_mad)) | |
940 | return ERR_PTR(-EINVAL); | |
fa619a77 | 941 | |
f36e1793 JM |
942 | size = rmpp_active ? hdr_len : sizeof(struct ib_mad); |
943 | buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); | |
824c8ae7 HR |
944 | if (!buf) |
945 | return ERR_PTR(-ENOMEM); | |
34816ad9 | 946 | |
f36e1793 JM |
947 | mad_send_wr = buf + size; |
948 | INIT_LIST_HEAD(&mad_send_wr->rmpp_list); | |
34816ad9 | 949 | mad_send_wr->send_buf.mad = buf; |
f36e1793 JM |
950 | mad_send_wr->send_buf.hdr_len = hdr_len; |
951 | mad_send_wr->send_buf.data_len = data_len; | |
952 | mad_send_wr->pad = pad; | |
34816ad9 SH |
953 | |
954 | mad_send_wr->mad_agent_priv = mad_agent_priv; | |
f36e1793 | 955 | mad_send_wr->sg_list[0].length = hdr_len; |
34816ad9 | 956 | mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey; |
f36e1793 JM |
957 | mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len; |
958 | mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey; | |
34816ad9 SH |
959 | |
960 | mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr; | |
961 | mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; | |
f36e1793 | 962 | mad_send_wr->send_wr.num_sge = 2; |
34816ad9 SH |
963 | mad_send_wr->send_wr.opcode = IB_WR_SEND; |
964 | mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED; | |
965 | mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn; | |
966 | mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; | |
967 | mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index; | |
fa619a77 HR |
968 | |
969 | if (rmpp_active) { | |
f36e1793 JM |
970 | ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask); |
971 | if (ret) { | |
972 | kfree(buf); | |
973 | return ERR_PTR(ret); | |
974 | } | |
fa619a77 HR |
975 | } |
976 | ||
34816ad9 | 977 | mad_send_wr->send_buf.mad_agent = mad_agent; |
824c8ae7 | 978 | atomic_inc(&mad_agent_priv->refcount); |
34816ad9 | 979 | return &mad_send_wr->send_buf; |
824c8ae7 HR |
980 | } |
981 | EXPORT_SYMBOL(ib_create_send_mad); | |
982 | ||
618a3c03 HR |
983 | int ib_get_mad_data_offset(u8 mgmt_class) |
984 | { | |
985 | if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) | |
986 | return IB_MGMT_SA_HDR; | |
987 | else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || | |
988 | (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || | |
989 | (mgmt_class == IB_MGMT_CLASS_BIS)) | |
990 | return IB_MGMT_DEVICE_HDR; | |
991 | else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | |
992 | (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) | |
993 | return IB_MGMT_VENDOR_HDR; | |
994 | else | |
995 | return IB_MGMT_MAD_HDR; | |
996 | } | |
997 | EXPORT_SYMBOL(ib_get_mad_data_offset); | |
998 | ||
999 | int ib_is_mad_class_rmpp(u8 mgmt_class) | |
1000 | { | |
1001 | if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || | |
1002 | (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || | |
1003 | (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || | |
1004 | (mgmt_class == IB_MGMT_CLASS_BIS) || | |
1005 | ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | |
1006 | (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) | |
1007 | return 1; | |
1008 | return 0; | |
1009 | } | |
1010 | EXPORT_SYMBOL(ib_is_mad_class_rmpp); | |
1011 | ||
f36e1793 JM |
1012 | void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) |
1013 | { | |
1014 | struct ib_mad_send_wr_private *mad_send_wr; | |
1015 | struct list_head *list; | |
1016 | ||
1017 | mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, | |
1018 | send_buf); | |
1019 | list = &mad_send_wr->cur_seg->list; | |
1020 | ||
1021 | if (mad_send_wr->cur_seg->num < seg_num) { | |
1022 | list_for_each_entry(mad_send_wr->cur_seg, list, list) | |
1023 | if (mad_send_wr->cur_seg->num == seg_num) | |
1024 | break; | |
1025 | } else if (mad_send_wr->cur_seg->num > seg_num) { | |
1026 | list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) | |
1027 | if (mad_send_wr->cur_seg->num == seg_num) | |
1028 | break; | |
1029 | } | |
1030 | return mad_send_wr->cur_seg->data; | |
1031 | } | |
1032 | EXPORT_SYMBOL(ib_get_rmpp_segment); | |
1033 | ||
1034 | static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) | |
1035 | { | |
1036 | if (mad_send_wr->send_buf.seg_count) | |
1037 | return ib_get_rmpp_segment(&mad_send_wr->send_buf, | |
1038 | mad_send_wr->seg_num); | |
1039 | else | |
1040 | return mad_send_wr->send_buf.mad + | |
1041 | mad_send_wr->send_buf.hdr_len; | |
1042 | } | |
1043 | ||
824c8ae7 HR |
1044 | void ib_free_send_mad(struct ib_mad_send_buf *send_buf) |
1045 | { | |
1046 | struct ib_mad_agent_private *mad_agent_priv; | |
f36e1793 | 1047 | struct ib_mad_send_wr_private *mad_send_wr; |
824c8ae7 HR |
1048 | |
1049 | mad_agent_priv = container_of(send_buf->mad_agent, | |
1050 | struct ib_mad_agent_private, agent); | |
f36e1793 JM |
1051 | mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, |
1052 | send_buf); | |
824c8ae7 | 1053 | |
f36e1793 JM |
1054 | free_send_rmpp_list(mad_send_wr); |
1055 | kfree(send_buf->mad); | |
1b52fa98 | 1056 | deref_mad_agent(mad_agent_priv); |
824c8ae7 HR |
1057 | } |
1058 | EXPORT_SYMBOL(ib_free_send_mad); | |
1059 | ||
fa619a77 | 1060 | int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) |
1da177e4 LT |
1061 | { |
1062 | struct ib_mad_qp_info *qp_info; | |
cabe3cbc | 1063 | struct list_head *list; |
34816ad9 SH |
1064 | struct ib_send_wr *bad_send_wr; |
1065 | struct ib_mad_agent *mad_agent; | |
1066 | struct ib_sge *sge; | |
1da177e4 LT |
1067 | unsigned long flags; |
1068 | int ret; | |
1069 | ||
f8197a4e | 1070 | /* Set WR ID to find mad_send_wr upon completion */ |
d760ce8f | 1071 | qp_info = mad_send_wr->mad_agent_priv->qp_info; |
1da177e4 LT |
1072 | mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; |
1073 | mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; | |
1074 | ||
34816ad9 SH |
1075 | mad_agent = mad_send_wr->send_buf.mad_agent; |
1076 | sge = mad_send_wr->sg_list; | |
1527106f RC |
1077 | sge[0].addr = ib_dma_map_single(mad_agent->device, |
1078 | mad_send_wr->send_buf.mad, | |
1079 | sge[0].length, | |
1080 | DMA_TO_DEVICE); | |
2c34e68f YB |
1081 | if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) |
1082 | return -ENOMEM; | |
1083 | ||
1527106f RC |
1084 | mad_send_wr->header_mapping = sge[0].addr; |
1085 | ||
1086 | sge[1].addr = ib_dma_map_single(mad_agent->device, | |
1087 | ib_get_payload(mad_send_wr), | |
1088 | sge[1].length, | |
1089 | DMA_TO_DEVICE); | |
2c34e68f YB |
1090 | if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { |
1091 | ib_dma_unmap_single(mad_agent->device, | |
1092 | mad_send_wr->header_mapping, | |
1093 | sge[0].length, DMA_TO_DEVICE); | |
1094 | return -ENOMEM; | |
1095 | } | |
1527106f | 1096 | mad_send_wr->payload_mapping = sge[1].addr; |
34816ad9 | 1097 | |
1da177e4 | 1098 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); |
cabe3cbc | 1099 | if (qp_info->send_queue.count < qp_info->send_queue.max_active) { |
34816ad9 SH |
1100 | ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr, |
1101 | &bad_send_wr); | |
cabe3cbc | 1102 | list = &qp_info->send_queue.list; |
1da177e4 | 1103 | } else { |
1da177e4 | 1104 | ret = 0; |
cabe3cbc | 1105 | list = &qp_info->overflow_list; |
1da177e4 | 1106 | } |
cabe3cbc HR |
1107 | |
1108 | if (!ret) { | |
1109 | qp_info->send_queue.count++; | |
1110 | list_add_tail(&mad_send_wr->mad_list.list, list); | |
1111 | } | |
1112 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | |
f36e1793 | 1113 | if (ret) { |
1527106f RC |
1114 | ib_dma_unmap_single(mad_agent->device, |
1115 | mad_send_wr->header_mapping, | |
1116 | sge[0].length, DMA_TO_DEVICE); | |
1117 | ib_dma_unmap_single(mad_agent->device, | |
1118 | mad_send_wr->payload_mapping, | |
1119 | sge[1].length, DMA_TO_DEVICE); | |
f36e1793 | 1120 | } |
1da177e4 LT |
1121 | return ret; |
1122 | } | |
1123 | ||
1124 | /* | |
1125 | * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated | |
1126 | * with the registered client | |
1127 | */ | |
34816ad9 SH |
1128 | int ib_post_send_mad(struct ib_mad_send_buf *send_buf, |
1129 | struct ib_mad_send_buf **bad_send_buf) | |
1da177e4 | 1130 | { |
1da177e4 | 1131 | struct ib_mad_agent_private *mad_agent_priv; |
34816ad9 SH |
1132 | struct ib_mad_send_buf *next_send_buf; |
1133 | struct ib_mad_send_wr_private *mad_send_wr; | |
1134 | unsigned long flags; | |
1135 | int ret = -EINVAL; | |
1da177e4 LT |
1136 | |
1137 | /* Walk list of send WRs and post each on send list */ | |
34816ad9 | 1138 | for (; send_buf; send_buf = next_send_buf) { |
1da177e4 | 1139 | |
34816ad9 SH |
1140 | mad_send_wr = container_of(send_buf, |
1141 | struct ib_mad_send_wr_private, | |
1142 | send_buf); | |
1143 | mad_agent_priv = mad_send_wr->mad_agent_priv; | |
1da177e4 | 1144 | |
34816ad9 SH |
1145 | if (!send_buf->mad_agent->send_handler || |
1146 | (send_buf->timeout_ms && | |
1147 | !send_buf->mad_agent->recv_handler)) { | |
1148 | ret = -EINVAL; | |
1149 | goto error; | |
1da177e4 LT |
1150 | } |
1151 | ||
618a3c03 HR |
1152 | if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { |
1153 | if (mad_agent_priv->agent.rmpp_version) { | |
1154 | ret = -EINVAL; | |
1155 | goto error; | |
1156 | } | |
1157 | } | |
1158 | ||
1da177e4 LT |
1159 | /* |
1160 | * Save pointer to next work request to post in case the | |
1161 | * current one completes, and the user modifies the work | |
1162 | * request associated with the completion | |
1163 | */ | |
34816ad9 SH |
1164 | next_send_buf = send_buf->next; |
1165 | mad_send_wr->send_wr.wr.ud.ah = send_buf->ah; | |
1da177e4 | 1166 | |
34816ad9 SH |
1167 | if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == |
1168 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | |
1169 | ret = handle_outgoing_dr_smp(mad_agent_priv, | |
1170 | mad_send_wr); | |
1da177e4 | 1171 | if (ret < 0) /* error */ |
34816ad9 | 1172 | goto error; |
1da177e4 | 1173 | else if (ret == 1) /* locally consumed */ |
34816ad9 | 1174 | continue; |
1da177e4 LT |
1175 | } |
1176 | ||
34816ad9 | 1177 | mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; |
1da177e4 | 1178 | /* Timeout will be updated after send completes */ |
34816ad9 | 1179 | mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); |
4fc8cd49 SH |
1180 | mad_send_wr->max_retries = send_buf->retries; |
1181 | mad_send_wr->retries_left = send_buf->retries; | |
1182 | send_buf->retries = 0; | |
34816ad9 | 1183 | /* Reference for work request to QP + response */ |
1da177e4 LT |
1184 | mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); |
1185 | mad_send_wr->status = IB_WC_SUCCESS; | |
1186 | ||
1187 | /* Reference MAD agent until send completes */ | |
1188 | atomic_inc(&mad_agent_priv->refcount); | |
1189 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
1190 | list_add_tail(&mad_send_wr->agent_list, | |
1191 | &mad_agent_priv->send_list); | |
1192 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
1193 | ||
1471cb6c | 1194 | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { |
fa619a77 HR |
1195 | ret = ib_send_rmpp_mad(mad_send_wr); |
1196 | if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) | |
1197 | ret = ib_send_mad(mad_send_wr); | |
1198 | } else | |
1199 | ret = ib_send_mad(mad_send_wr); | |
1200 | if (ret < 0) { | |
1da177e4 LT |
1201 | /* Fail send request */ |
1202 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
1203 | list_del(&mad_send_wr->agent_list); | |
1204 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
1205 | atomic_dec(&mad_agent_priv->refcount); | |
34816ad9 | 1206 | goto error; |
1da177e4 | 1207 | } |
1da177e4 LT |
1208 | } |
1209 | return 0; | |
34816ad9 SH |
1210 | error: |
1211 | if (bad_send_buf) | |
1212 | *bad_send_buf = send_buf; | |
1da177e4 LT |
1213 | return ret; |
1214 | } | |
1215 | EXPORT_SYMBOL(ib_post_send_mad); | |
1216 | ||
1217 | /* | |
1218 | * ib_free_recv_mad - Returns data buffers used to receive | |
1219 | * a MAD to the access layer | |
1220 | */ | |
1221 | void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) | |
1222 | { | |
fa619a77 | 1223 | struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; |
1da177e4 LT |
1224 | struct ib_mad_private_header *mad_priv_hdr; |
1225 | struct ib_mad_private *priv; | |
fa619a77 | 1226 | struct list_head free_list; |
1da177e4 | 1227 | |
fa619a77 HR |
1228 | INIT_LIST_HEAD(&free_list); |
1229 | list_splice_init(&mad_recv_wc->rmpp_list, &free_list); | |
1da177e4 | 1230 | |
fa619a77 HR |
1231 | list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, |
1232 | &free_list, list) { | |
1233 | mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, | |
1234 | recv_buf); | |
1da177e4 LT |
1235 | mad_priv_hdr = container_of(mad_recv_wc, |
1236 | struct ib_mad_private_header, | |
1237 | recv_wc); | |
1238 | priv = container_of(mad_priv_hdr, struct ib_mad_private, | |
1239 | header); | |
fa619a77 | 1240 | kmem_cache_free(ib_mad_cache, priv); |
1da177e4 | 1241 | } |
1da177e4 LT |
1242 | } |
1243 | EXPORT_SYMBOL(ib_free_recv_mad); | |
1244 | ||
1da177e4 LT |
1245 | struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, |
1246 | u8 rmpp_version, | |
1247 | ib_mad_send_handler send_handler, | |
1248 | ib_mad_recv_handler recv_handler, | |
1249 | void *context) | |
1250 | { | |
1251 | return ERR_PTR(-EINVAL); /* XXX: for now */ | |
1252 | } | |
1253 | EXPORT_SYMBOL(ib_redirect_mad_qp); | |
1254 | ||
1255 | int ib_process_mad_wc(struct ib_mad_agent *mad_agent, | |
1256 | struct ib_wc *wc) | |
1257 | { | |
7ef5d4b0 IW |
1258 | dev_err(&mad_agent->device->dev, |
1259 | "ib_process_mad_wc() not implemented yet\n"); | |
1da177e4 LT |
1260 | return 0; |
1261 | } | |
1262 | EXPORT_SYMBOL(ib_process_mad_wc); | |
1263 | ||
1264 | static int method_in_use(struct ib_mad_mgmt_method_table **method, | |
1265 | struct ib_mad_reg_req *mad_reg_req) | |
1266 | { | |
1267 | int i; | |
1268 | ||
19b629f5 | 1269 | for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { |
1da177e4 | 1270 | if ((*method)->agent[i]) { |
7ef5d4b0 | 1271 | pr_err("Method %d already in use\n", i); |
1da177e4 LT |
1272 | return -EINVAL; |
1273 | } | |
1274 | } | |
1275 | return 0; | |
1276 | } | |
1277 | ||
1278 | static int allocate_method_table(struct ib_mad_mgmt_method_table **method) | |
1279 | { | |
1280 | /* Allocate management method table */ | |
de6eb66b | 1281 | *method = kzalloc(sizeof **method, GFP_ATOMIC); |
1da177e4 | 1282 | if (!*method) { |
7ef5d4b0 | 1283 | pr_err("No memory for ib_mad_mgmt_method_table\n"); |
1da177e4 LT |
1284 | return -ENOMEM; |
1285 | } | |
1da177e4 LT |
1286 | |
1287 | return 0; | |
1288 | } | |
1289 | ||
1290 | /* | |
1291 | * Check to see if there are any methods still in use | |
1292 | */ | |
1293 | static int check_method_table(struct ib_mad_mgmt_method_table *method) | |
1294 | { | |
1295 | int i; | |
1296 | ||
1297 | for (i = 0; i < IB_MGMT_MAX_METHODS; i++) | |
1298 | if (method->agent[i]) | |
1299 | return 1; | |
1300 | return 0; | |
1301 | } | |
1302 | ||
1303 | /* | |
1304 | * Check to see if there are any method tables for this class still in use | |
1305 | */ | |
1306 | static int check_class_table(struct ib_mad_mgmt_class_table *class) | |
1307 | { | |
1308 | int i; | |
1309 | ||
1310 | for (i = 0; i < MAX_MGMT_CLASS; i++) | |
1311 | if (class->method_table[i]) | |
1312 | return 1; | |
1313 | return 0; | |
1314 | } | |
1315 | ||
1316 | static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) | |
1317 | { | |
1318 | int i; | |
1319 | ||
1320 | for (i = 0; i < MAX_MGMT_OUI; i++) | |
1321 | if (vendor_class->method_table[i]) | |
1322 | return 1; | |
1323 | return 0; | |
1324 | } | |
1325 | ||
1326 | static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, | |
1327 | char *oui) | |
1328 | { | |
1329 | int i; | |
1330 | ||
1331 | for (i = 0; i < MAX_MGMT_OUI; i++) | |
3cd96564 RD |
1332 | /* Is there matching OUI for this vendor class ? */ |
1333 | if (!memcmp(vendor_class->oui[i], oui, 3)) | |
1da177e4 LT |
1334 | return i; |
1335 | ||
1336 | return -1; | |
1337 | } | |
1338 | ||
1339 | static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) | |
1340 | { | |
1341 | int i; | |
1342 | ||
1343 | for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) | |
1344 | if (vendor->vendor_class[i]) | |
1345 | return 1; | |
1346 | ||
1347 | return 0; | |
1348 | } | |
1349 | ||
1350 | static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, | |
1351 | struct ib_mad_agent_private *agent) | |
1352 | { | |
1353 | int i; | |
1354 | ||
1355 | /* Remove any methods for this mad agent */ | |
1356 | for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { | |
1357 | if (method->agent[i] == agent) { | |
1358 | method->agent[i] = NULL; | |
1359 | } | |
1360 | } | |
1361 | } | |
1362 | ||
1363 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | |
1364 | struct ib_mad_agent_private *agent_priv, | |
1365 | u8 mgmt_class) | |
1366 | { | |
1367 | struct ib_mad_port_private *port_priv; | |
1368 | struct ib_mad_mgmt_class_table **class; | |
1369 | struct ib_mad_mgmt_method_table **method; | |
1370 | int i, ret; | |
1371 | ||
1372 | port_priv = agent_priv->qp_info->port_priv; | |
1373 | class = &port_priv->version[mad_reg_req->mgmt_class_version].class; | |
1374 | if (!*class) { | |
1375 | /* Allocate management class table for "new" class version */ | |
de6eb66b | 1376 | *class = kzalloc(sizeof **class, GFP_ATOMIC); |
1da177e4 | 1377 | if (!*class) { |
7ef5d4b0 IW |
1378 | dev_err(&agent_priv->agent.device->dev, |
1379 | "No memory for ib_mad_mgmt_class_table\n"); | |
1da177e4 LT |
1380 | ret = -ENOMEM; |
1381 | goto error1; | |
1382 | } | |
de6eb66b | 1383 | |
1da177e4 LT |
1384 | /* Allocate method table for this management class */ |
1385 | method = &(*class)->method_table[mgmt_class]; | |
1386 | if ((ret = allocate_method_table(method))) | |
1387 | goto error2; | |
1388 | } else { | |
1389 | method = &(*class)->method_table[mgmt_class]; | |
1390 | if (!*method) { | |
1391 | /* Allocate method table for this management class */ | |
1392 | if ((ret = allocate_method_table(method))) | |
1393 | goto error1; | |
1394 | } | |
1395 | } | |
1396 | ||
1397 | /* Now, make sure methods are not already in use */ | |
1398 | if (method_in_use(method, mad_reg_req)) | |
1399 | goto error3; | |
1400 | ||
1401 | /* Finally, add in methods being registered */ | |
19b629f5 | 1402 | for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) |
1da177e4 | 1403 | (*method)->agent[i] = agent_priv; |
19b629f5 | 1404 | |
1da177e4 LT |
1405 | return 0; |
1406 | ||
1407 | error3: | |
1408 | /* Remove any methods for this mad agent */ | |
1409 | remove_methods_mad_agent(*method, agent_priv); | |
1410 | /* Now, check to see if there are any methods in use */ | |
1411 | if (!check_method_table(*method)) { | |
1412 | /* If not, release management method table */ | |
1413 | kfree(*method); | |
1414 | *method = NULL; | |
1415 | } | |
1416 | ret = -EINVAL; | |
1417 | goto error1; | |
1418 | error2: | |
1419 | kfree(*class); | |
1420 | *class = NULL; | |
1421 | error1: | |
1422 | return ret; | |
1423 | } | |
1424 | ||
1425 | static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | |
1426 | struct ib_mad_agent_private *agent_priv) | |
1427 | { | |
1428 | struct ib_mad_port_private *port_priv; | |
1429 | struct ib_mad_mgmt_vendor_class_table **vendor_table; | |
1430 | struct ib_mad_mgmt_vendor_class_table *vendor = NULL; | |
1431 | struct ib_mad_mgmt_vendor_class *vendor_class = NULL; | |
1432 | struct ib_mad_mgmt_method_table **method; | |
1433 | int i, ret = -ENOMEM; | |
1434 | u8 vclass; | |
1435 | ||
1436 | /* "New" vendor (with OUI) class */ | |
1437 | vclass = vendor_class_index(mad_reg_req->mgmt_class); | |
1438 | port_priv = agent_priv->qp_info->port_priv; | |
1439 | vendor_table = &port_priv->version[ | |
1440 | mad_reg_req->mgmt_class_version].vendor; | |
1441 | if (!*vendor_table) { | |
1442 | /* Allocate mgmt vendor class table for "new" class version */ | |
de6eb66b | 1443 | vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); |
1da177e4 | 1444 | if (!vendor) { |
7ef5d4b0 IW |
1445 | dev_err(&agent_priv->agent.device->dev, |
1446 | "No memory for ib_mad_mgmt_vendor_class_table\n"); | |
1da177e4 LT |
1447 | goto error1; |
1448 | } | |
de6eb66b | 1449 | |
1da177e4 LT |
1450 | *vendor_table = vendor; |
1451 | } | |
1452 | if (!(*vendor_table)->vendor_class[vclass]) { | |
1453 | /* Allocate table for this management vendor class */ | |
de6eb66b | 1454 | vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); |
1da177e4 | 1455 | if (!vendor_class) { |
7ef5d4b0 IW |
1456 | dev_err(&agent_priv->agent.device->dev, |
1457 | "No memory for ib_mad_mgmt_vendor_class\n"); | |
1da177e4 LT |
1458 | goto error2; |
1459 | } | |
de6eb66b | 1460 | |
1da177e4 LT |
1461 | (*vendor_table)->vendor_class[vclass] = vendor_class; |
1462 | } | |
1463 | for (i = 0; i < MAX_MGMT_OUI; i++) { | |
1464 | /* Is there matching OUI for this vendor class ? */ | |
1465 | if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], | |
1466 | mad_reg_req->oui, 3)) { | |
1467 | method = &(*vendor_table)->vendor_class[ | |
1468 | vclass]->method_table[i]; | |
1469 | BUG_ON(!*method); | |
1470 | goto check_in_use; | |
1471 | } | |
1472 | } | |
1473 | for (i = 0; i < MAX_MGMT_OUI; i++) { | |
1474 | /* OUI slot available ? */ | |
1475 | if (!is_vendor_oui((*vendor_table)->vendor_class[ | |
1476 | vclass]->oui[i])) { | |
1477 | method = &(*vendor_table)->vendor_class[ | |
1478 | vclass]->method_table[i]; | |
1479 | BUG_ON(*method); | |
1480 | /* Allocate method table for this OUI */ | |
1481 | if ((ret = allocate_method_table(method))) | |
1482 | goto error3; | |
1483 | memcpy((*vendor_table)->vendor_class[vclass]->oui[i], | |
1484 | mad_reg_req->oui, 3); | |
1485 | goto check_in_use; | |
1486 | } | |
1487 | } | |
7ef5d4b0 | 1488 | dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); |
1da177e4 LT |
1489 | goto error3; |
1490 | ||
1491 | check_in_use: | |
1492 | /* Now, make sure methods are not already in use */ | |
1493 | if (method_in_use(method, mad_reg_req)) | |
1494 | goto error4; | |
1495 | ||
1496 | /* Finally, add in methods being registered */ | |
19b629f5 | 1497 | for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) |
1da177e4 | 1498 | (*method)->agent[i] = agent_priv; |
19b629f5 | 1499 | |
1da177e4 LT |
1500 | return 0; |
1501 | ||
1502 | error4: | |
1503 | /* Remove any methods for this mad agent */ | |
1504 | remove_methods_mad_agent(*method, agent_priv); | |
1505 | /* Now, check to see if there are any methods in use */ | |
1506 | if (!check_method_table(*method)) { | |
1507 | /* If not, release management method table */ | |
1508 | kfree(*method); | |
1509 | *method = NULL; | |
1510 | } | |
1511 | ret = -EINVAL; | |
1512 | error3: | |
1513 | if (vendor_class) { | |
1514 | (*vendor_table)->vendor_class[vclass] = NULL; | |
1515 | kfree(vendor_class); | |
1516 | } | |
1517 | error2: | |
1518 | if (vendor) { | |
1519 | *vendor_table = NULL; | |
1520 | kfree(vendor); | |
1521 | } | |
1522 | error1: | |
1523 | return ret; | |
1524 | } | |
1525 | ||
1526 | static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) | |
1527 | { | |
1528 | struct ib_mad_port_private *port_priv; | |
1529 | struct ib_mad_mgmt_class_table *class; | |
1530 | struct ib_mad_mgmt_method_table *method; | |
1531 | struct ib_mad_mgmt_vendor_class_table *vendor; | |
1532 | struct ib_mad_mgmt_vendor_class *vendor_class; | |
1533 | int index; | |
1534 | u8 mgmt_class; | |
1535 | ||
1536 | /* | |
1537 | * Was MAD registration request supplied | |
1538 | * with original registration ? | |
1539 | */ | |
1540 | if (!agent_priv->reg_req) { | |
1541 | goto out; | |
1542 | } | |
1543 | ||
1544 | port_priv = agent_priv->qp_info->port_priv; | |
1545 | mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); | |
1546 | class = port_priv->version[ | |
1547 | agent_priv->reg_req->mgmt_class_version].class; | |
1548 | if (!class) | |
1549 | goto vendor_check; | |
1550 | ||
1551 | method = class->method_table[mgmt_class]; | |
1552 | if (method) { | |
1553 | /* Remove any methods for this mad agent */ | |
1554 | remove_methods_mad_agent(method, agent_priv); | |
1555 | /* Now, check to see if there are any methods still in use */ | |
1556 | if (!check_method_table(method)) { | |
1557 | /* If not, release management method table */ | |
1558 | kfree(method); | |
1559 | class->method_table[mgmt_class] = NULL; | |
1560 | /* Any management classes left ? */ | |
1561 | if (!check_class_table(class)) { | |
1562 | /* If not, release management class table */ | |
1563 | kfree(class); | |
1564 | port_priv->version[ | |
1565 | agent_priv->reg_req-> | |
1566 | mgmt_class_version].class = NULL; | |
1567 | } | |
1568 | } | |
1569 | } | |
1570 | ||
1571 | vendor_check: | |
1572 | if (!is_vendor_class(mgmt_class)) | |
1573 | goto out; | |
1574 | ||
1575 | /* normalize mgmt_class to vendor range 2 */ | |
1576 | mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); | |
1577 | vendor = port_priv->version[ | |
1578 | agent_priv->reg_req->mgmt_class_version].vendor; | |
1579 | ||
1580 | if (!vendor) | |
1581 | goto out; | |
1582 | ||
1583 | vendor_class = vendor->vendor_class[mgmt_class]; | |
1584 | if (vendor_class) { | |
1585 | index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); | |
1586 | if (index < 0) | |
1587 | goto out; | |
1588 | method = vendor_class->method_table[index]; | |
1589 | if (method) { | |
1590 | /* Remove any methods for this mad agent */ | |
1591 | remove_methods_mad_agent(method, agent_priv); | |
1592 | /* | |
1593 | * Now, check to see if there are | |
1594 | * any methods still in use | |
1595 | */ | |
1596 | if (!check_method_table(method)) { | |
1597 | /* If not, release management method table */ | |
1598 | kfree(method); | |
1599 | vendor_class->method_table[index] = NULL; | |
1600 | memset(vendor_class->oui[index], 0, 3); | |
1601 | /* Any OUIs left ? */ | |
1602 | if (!check_vendor_class(vendor_class)) { | |
1603 | /* If not, release vendor class table */ | |
1604 | kfree(vendor_class); | |
1605 | vendor->vendor_class[mgmt_class] = NULL; | |
1606 | /* Any other vendor classes left ? */ | |
1607 | if (!check_vendor_table(vendor)) { | |
1608 | kfree(vendor); | |
1609 | port_priv->version[ | |
1610 | agent_priv->reg_req-> | |
1611 | mgmt_class_version]. | |
1612 | vendor = NULL; | |
1613 | } | |
1614 | } | |
1615 | } | |
1616 | } | |
1617 | } | |
1618 | ||
1619 | out: | |
1620 | return; | |
1621 | } | |
1622 | ||
1da177e4 LT |
1623 | static struct ib_mad_agent_private * |
1624 | find_mad_agent(struct ib_mad_port_private *port_priv, | |
4a0754fa | 1625 | struct ib_mad *mad) |
1da177e4 LT |
1626 | { |
1627 | struct ib_mad_agent_private *mad_agent = NULL; | |
1628 | unsigned long flags; | |
1629 | ||
1630 | spin_lock_irqsave(&port_priv->reg_lock, flags); | |
96909308 | 1631 | if (ib_response_mad(&mad->mad_hdr)) { |
1da177e4 LT |
1632 | u32 hi_tid; |
1633 | struct ib_mad_agent_private *entry; | |
1634 | ||
1635 | /* | |
1636 | * Routing is based on high 32 bits of transaction ID | |
1637 | * of MAD. | |
1638 | */ | |
1639 | hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32; | |
34816ad9 | 1640 | list_for_each_entry(entry, &port_priv->agent_list, agent_list) { |
1da177e4 LT |
1641 | if (entry->agent.hi_tid == hi_tid) { |
1642 | mad_agent = entry; | |
1643 | break; | |
1644 | } | |
1645 | } | |
1646 | } else { | |
1647 | struct ib_mad_mgmt_class_table *class; | |
1648 | struct ib_mad_mgmt_method_table *method; | |
1649 | struct ib_mad_mgmt_vendor_class_table *vendor; | |
1650 | struct ib_mad_mgmt_vendor_class *vendor_class; | |
1651 | struct ib_vendor_mad *vendor_mad; | |
1652 | int index; | |
1653 | ||
1654 | /* | |
1655 | * Routing is based on version, class, and method | |
1656 | * For "newer" vendor MADs, also based on OUI | |
1657 | */ | |
1658 | if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION) | |
1659 | goto out; | |
1660 | if (!is_vendor_class(mad->mad_hdr.mgmt_class)) { | |
1661 | class = port_priv->version[ | |
1662 | mad->mad_hdr.class_version].class; | |
1663 | if (!class) | |
1664 | goto out; | |
b7ab0b19 HS |
1665 | if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >= |
1666 | IB_MGMT_MAX_METHODS) | |
1667 | goto out; | |
1da177e4 LT |
1668 | method = class->method_table[convert_mgmt_class( |
1669 | mad->mad_hdr.mgmt_class)]; | |
1670 | if (method) | |
1671 | mad_agent = method->agent[mad->mad_hdr.method & | |
1672 | ~IB_MGMT_METHOD_RESP]; | |
1673 | } else { | |
1674 | vendor = port_priv->version[ | |
1675 | mad->mad_hdr.class_version].vendor; | |
1676 | if (!vendor) | |
1677 | goto out; | |
1678 | vendor_class = vendor->vendor_class[vendor_class_index( | |
1679 | mad->mad_hdr.mgmt_class)]; | |
1680 | if (!vendor_class) | |
1681 | goto out; | |
1682 | /* Find matching OUI */ | |
1683 | vendor_mad = (struct ib_vendor_mad *)mad; | |
1684 | index = find_vendor_oui(vendor_class, vendor_mad->oui); | |
1685 | if (index == -1) | |
1686 | goto out; | |
1687 | method = vendor_class->method_table[index]; | |
1688 | if (method) { | |
1689 | mad_agent = method->agent[mad->mad_hdr.method & | |
1690 | ~IB_MGMT_METHOD_RESP]; | |
1691 | } | |
1692 | } | |
1693 | } | |
1694 | ||
1695 | if (mad_agent) { | |
1696 | if (mad_agent->agent.recv_handler) | |
1697 | atomic_inc(&mad_agent->refcount); | |
1698 | else { | |
7ef5d4b0 IW |
1699 | dev_notice(&port_priv->device->dev, |
1700 | "No receive handler for client %p on port %d\n", | |
1701 | &mad_agent->agent, port_priv->port_num); | |
1da177e4 LT |
1702 | mad_agent = NULL; |
1703 | } | |
1704 | } | |
1705 | out: | |
1706 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | |
1707 | ||
1708 | return mad_agent; | |
1709 | } | |
1710 | ||
77f60833 | 1711 | static int validate_mad(const struct ib_mad_hdr *mad_hdr, u32 qp_num) |
1da177e4 LT |
1712 | { |
1713 | int valid = 0; | |
1714 | ||
1715 | /* Make sure MAD base version is understood */ | |
77f60833 | 1716 | if (mad_hdr->base_version != IB_MGMT_BASE_VERSION) { |
7ef5d4b0 | 1717 | pr_err("MAD received with unsupported base version %d\n", |
77f60833 | 1718 | mad_hdr->base_version); |
1da177e4 LT |
1719 | goto out; |
1720 | } | |
1721 | ||
1722 | /* Filter SMI packets sent to other than QP0 */ | |
77f60833 IW |
1723 | if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || |
1724 | (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { | |
1da177e4 LT |
1725 | if (qp_num == 0) |
1726 | valid = 1; | |
1727 | } else { | |
1728 | /* Filter GSI packets sent to QP0 */ | |
1729 | if (qp_num != 0) | |
1730 | valid = 1; | |
1731 | } | |
1732 | ||
1733 | out: | |
1734 | return valid; | |
1735 | } | |
1736 | ||
f766c58f IW |
1737 | static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, |
1738 | const struct ib_mad_hdr *mad_hdr) | |
fa619a77 HR |
1739 | { |
1740 | struct ib_rmpp_mad *rmpp_mad; | |
1741 | ||
1742 | rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; | |
1743 | return !mad_agent_priv->agent.rmpp_version || | |
1471cb6c | 1744 | !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || |
fa619a77 HR |
1745 | !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & |
1746 | IB_MGMT_RMPP_FLAG_ACTIVE) || | |
1747 | (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); | |
1748 | } | |
1749 | ||
8bf4b30c IW |
1750 | static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, |
1751 | const struct ib_mad_recv_wc *rwc) | |
fa9656bb | 1752 | { |
8bf4b30c | 1753 | return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == |
fa9656bb JM |
1754 | rwc->recv_buf.mad->mad_hdr.mgmt_class; |
1755 | } | |
1756 | ||
f766c58f IW |
1757 | static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, |
1758 | const struct ib_mad_send_wr_private *wr, | |
1759 | const struct ib_mad_recv_wc *rwc ) | |
fa9656bb JM |
1760 | { |
1761 | struct ib_ah_attr attr; | |
1762 | u8 send_resp, rcv_resp; | |
9874e746 JM |
1763 | union ib_gid sgid; |
1764 | struct ib_device *device = mad_agent_priv->agent.device; | |
1765 | u8 port_num = mad_agent_priv->agent.port_num; | |
1766 | u8 lmc; | |
fa9656bb | 1767 | |
96909308 IW |
1768 | send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); |
1769 | rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); | |
fa9656bb | 1770 | |
fa9656bb JM |
1771 | if (send_resp == rcv_resp) |
1772 | /* both requests, or both responses. GIDs different */ | |
1773 | return 0; | |
1774 | ||
1775 | if (ib_query_ah(wr->send_buf.ah, &attr)) | |
1776 | /* Assume not equal, to avoid false positives. */ | |
1777 | return 0; | |
1778 | ||
9874e746 JM |
1779 | if (!!(attr.ah_flags & IB_AH_GRH) != |
1780 | !!(rwc->wc->wc_flags & IB_WC_GRH)) | |
fa9656bb JM |
1781 | /* one has GID, other does not. Assume different */ |
1782 | return 0; | |
9874e746 JM |
1783 | |
1784 | if (!send_resp && rcv_resp) { | |
1785 | /* is request/response. */ | |
1786 | if (!(attr.ah_flags & IB_AH_GRH)) { | |
1787 | if (ib_get_cached_lmc(device, port_num, &lmc)) | |
1788 | return 0; | |
1789 | return (!lmc || !((attr.src_path_bits ^ | |
1790 | rwc->wc->dlid_path_bits) & | |
1791 | ((1 << lmc) - 1))); | |
1792 | } else { | |
1793 | if (ib_get_cached_gid(device, port_num, | |
1794 | attr.grh.sgid_index, &sgid)) | |
1795 | return 0; | |
1796 | return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, | |
1797 | 16); | |
1798 | } | |
1799 | } | |
1800 | ||
1801 | if (!(attr.ah_flags & IB_AH_GRH)) | |
1802 | return attr.dlid == rwc->wc->slid; | |
1803 | else | |
1804 | return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw, | |
1805 | 16); | |
1806 | } | |
1807 | ||
1808 | static inline int is_direct(u8 class) | |
1809 | { | |
1810 | return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); | |
fa9656bb | 1811 | } |
9874e746 | 1812 | |
fa619a77 | 1813 | struct ib_mad_send_wr_private* |
f766c58f IW |
1814 | ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, |
1815 | const struct ib_mad_recv_wc *wc) | |
1da177e4 | 1816 | { |
9874e746 | 1817 | struct ib_mad_send_wr_private *wr; |
fa9656bb JM |
1818 | struct ib_mad *mad; |
1819 | ||
9874e746 JM |
1820 | mad = (struct ib_mad *)wc->recv_buf.mad; |
1821 | ||
1822 | list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { | |
1823 | if ((wr->tid == mad->mad_hdr.tid) && | |
1824 | rcv_has_same_class(wr, wc) && | |
1825 | /* | |
1826 | * Don't check GID for direct routed MADs. | |
1827 | * These might have permissive LIDs. | |
1828 | */ | |
1829 | (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || | |
1830 | rcv_has_same_gid(mad_agent_priv, wr, wc))) | |
39798695 | 1831 | return (wr->status == IB_WC_SUCCESS) ? wr : NULL; |
1da177e4 LT |
1832 | } |
1833 | ||
1834 | /* | |
1835 | * It's possible to receive the response before we've | |
1836 | * been notified that the send has completed | |
1837 | */ | |
9874e746 | 1838 | list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { |
c597eee5 | 1839 | if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && |
9874e746 JM |
1840 | wr->tid == mad->mad_hdr.tid && |
1841 | wr->timeout && | |
1842 | rcv_has_same_class(wr, wc) && | |
1843 | /* | |
1844 | * Don't check GID for direct routed MADs. | |
1845 | * These might have permissive LIDs. | |
1846 | */ | |
1847 | (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || | |
1848 | rcv_has_same_gid(mad_agent_priv, wr, wc))) | |
1da177e4 | 1849 | /* Verify request has not been canceled */ |
9874e746 | 1850 | return (wr->status == IB_WC_SUCCESS) ? wr : NULL; |
1da177e4 LT |
1851 | } |
1852 | return NULL; | |
1853 | } | |
1854 | ||
fa619a77 | 1855 | void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) |
6a0c435e HR |
1856 | { |
1857 | mad_send_wr->timeout = 0; | |
179e0917 AM |
1858 | if (mad_send_wr->refcount == 1) |
1859 | list_move_tail(&mad_send_wr->agent_list, | |
6a0c435e | 1860 | &mad_send_wr->mad_agent_priv->done_list); |
6a0c435e HR |
1861 | } |
1862 | ||
1da177e4 | 1863 | static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, |
4a0754fa | 1864 | struct ib_mad_recv_wc *mad_recv_wc) |
1da177e4 LT |
1865 | { |
1866 | struct ib_mad_send_wr_private *mad_send_wr; | |
1867 | struct ib_mad_send_wc mad_send_wc; | |
1868 | unsigned long flags; | |
1869 | ||
fa619a77 HR |
1870 | INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); |
1871 | list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); | |
1471cb6c | 1872 | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { |
fa619a77 HR |
1873 | mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, |
1874 | mad_recv_wc); | |
1875 | if (!mad_recv_wc) { | |
1b52fa98 | 1876 | deref_mad_agent(mad_agent_priv); |
fa619a77 HR |
1877 | return; |
1878 | } | |
1879 | } | |
1880 | ||
1da177e4 | 1881 | /* Complete corresponding request */ |
96909308 | 1882 | if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { |
1da177e4 | 1883 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
fa9656bb | 1884 | mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); |
1da177e4 LT |
1885 | if (!mad_send_wr) { |
1886 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
1471cb6c IW |
1887 | if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) |
1888 | && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) | |
1889 | && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) | |
1890 | & IB_MGMT_RMPP_FLAG_ACTIVE)) { | |
1891 | /* user rmpp is in effect | |
1892 | * and this is an active RMPP MAD | |
1893 | */ | |
1894 | mad_recv_wc->wc->wr_id = 0; | |
1895 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | |
1896 | mad_recv_wc); | |
1897 | atomic_dec(&mad_agent_priv->refcount); | |
1898 | } else { | |
1899 | /* not user rmpp, revert to normal behavior and | |
1900 | * drop the mad */ | |
1901 | ib_free_recv_mad(mad_recv_wc); | |
1902 | deref_mad_agent(mad_agent_priv); | |
1903 | return; | |
1904 | } | |
1905 | } else { | |
1906 | ib_mark_mad_done(mad_send_wr); | |
1907 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
1da177e4 | 1908 | |
1471cb6c IW |
1909 | /* Defined behavior is to complete response before request */ |
1910 | mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; | |
1911 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | |
1912 | mad_recv_wc); | |
1913 | atomic_dec(&mad_agent_priv->refcount); | |
1da177e4 | 1914 | |
1471cb6c IW |
1915 | mad_send_wc.status = IB_WC_SUCCESS; |
1916 | mad_send_wc.vendor_err = 0; | |
1917 | mad_send_wc.send_buf = &mad_send_wr->send_buf; | |
1918 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); | |
1919 | } | |
1da177e4 | 1920 | } else { |
4a0754fa HR |
1921 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, |
1922 | mad_recv_wc); | |
1b52fa98 | 1923 | deref_mad_agent(mad_agent_priv); |
1da177e4 LT |
1924 | } |
1925 | } | |
1926 | ||
0b307043 ST |
1927 | static bool generate_unmatched_resp(struct ib_mad_private *recv, |
1928 | struct ib_mad_private *response) | |
1929 | { | |
1930 | if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET || | |
1931 | recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) { | |
1932 | memcpy(response, recv, sizeof *response); | |
1933 | response->header.recv_wc.wc = &response->header.wc; | |
1934 | response->header.recv_wc.recv_buf.mad = &response->mad.mad; | |
1935 | response->header.recv_wc.recv_buf.grh = &response->grh; | |
1936 | response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; | |
1937 | response->mad.mad.mad_hdr.status = | |
1938 | cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); | |
840777de JM |
1939 | if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) |
1940 | response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION; | |
0b307043 ST |
1941 | |
1942 | return true; | |
1943 | } else { | |
1944 | return false; | |
1945 | } | |
1946 | } | |
1da177e4 LT |
1947 | static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, |
1948 | struct ib_wc *wc) | |
1949 | { | |
1950 | struct ib_mad_qp_info *qp_info; | |
1951 | struct ib_mad_private_header *mad_priv_hdr; | |
445d6807 | 1952 | struct ib_mad_private *recv, *response = NULL; |
1da177e4 LT |
1953 | struct ib_mad_list_head *mad_list; |
1954 | struct ib_mad_agent_private *mad_agent; | |
1bae4dbf | 1955 | int port_num; |
a9e74323 | 1956 | int ret = IB_MAD_RESULT_SUCCESS; |
1da177e4 | 1957 | |
1da177e4 LT |
1958 | mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; |
1959 | qp_info = mad_list->mad_queue->qp_info; | |
1960 | dequeue_mad(mad_list); | |
1961 | ||
1962 | mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, | |
1963 | mad_list); | |
1964 | recv = container_of(mad_priv_hdr, struct ib_mad_private, header); | |
1527106f RC |
1965 | ib_dma_unmap_single(port_priv->device, |
1966 | recv->header.mapping, | |
1967 | sizeof(struct ib_mad_private) - | |
1968 | sizeof(struct ib_mad_private_header), | |
1969 | DMA_FROM_DEVICE); | |
1da177e4 LT |
1970 | |
1971 | /* Setup MAD receive work completion from "normal" work completion */ | |
24239aff SH |
1972 | recv->header.wc = *wc; |
1973 | recv->header.recv_wc.wc = &recv->header.wc; | |
1da177e4 LT |
1974 | recv->header.recv_wc.mad_len = sizeof(struct ib_mad); |
1975 | recv->header.recv_wc.recv_buf.mad = &recv->mad.mad; | |
1976 | recv->header.recv_wc.recv_buf.grh = &recv->grh; | |
1977 | ||
1978 | if (atomic_read(&qp_info->snoop_count)) | |
1979 | snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); | |
1980 | ||
1981 | /* Validate MAD */ | |
77f60833 | 1982 | if (!validate_mad(&recv->mad.mad.mad_hdr, qp_info->qp->qp_num)) |
1da177e4 LT |
1983 | goto out; |
1984 | ||
445d6807 HR |
1985 | response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); |
1986 | if (!response) { | |
7ef5d4b0 IW |
1987 | dev_err(&port_priv->device->dev, |
1988 | "ib_mad_recv_done_handler no memory for response buffer\n"); | |
445d6807 HR |
1989 | goto out; |
1990 | } | |
1991 | ||
1bae4dbf HR |
1992 | if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) |
1993 | port_num = wc->port_num; | |
1994 | else | |
1995 | port_num = port_priv->port_num; | |
1996 | ||
1da177e4 LT |
1997 | if (recv->mad.mad.mad_hdr.mgmt_class == |
1998 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | |
1bae4dbf HR |
1999 | enum smi_forward_action retsmi; |
2000 | ||
de493d47 HR |
2001 | if (smi_handle_dr_smp_recv(&recv->mad.smp, |
2002 | port_priv->device->node_type, | |
1bae4dbf | 2003 | port_num, |
de493d47 HR |
2004 | port_priv->device->phys_port_cnt) == |
2005 | IB_SMI_DISCARD) | |
1da177e4 | 2006 | goto out; |
de493d47 | 2007 | |
1bae4dbf HR |
2008 | retsmi = smi_check_forward_dr_smp(&recv->mad.smp); |
2009 | if (retsmi == IB_SMI_LOCAL) | |
1da177e4 | 2010 | goto local; |
de493d47 | 2011 | |
1bae4dbf HR |
2012 | if (retsmi == IB_SMI_SEND) { /* don't forward */ |
2013 | if (smi_handle_dr_smp_send(&recv->mad.smp, | |
2014 | port_priv->device->node_type, | |
2015 | port_num) == IB_SMI_DISCARD) | |
2016 | goto out; | |
2017 | ||
2018 | if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD) | |
2019 | goto out; | |
2020 | } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { | |
2021 | /* forward case for switches */ | |
2022 | memcpy(response, recv, sizeof(*response)); | |
2023 | response->header.recv_wc.wc = &response->header.wc; | |
2024 | response->header.recv_wc.recv_buf.mad = &response->mad.mad; | |
2025 | response->header.recv_wc.recv_buf.grh = &response->grh; | |
2026 | ||
86dfbecd HR |
2027 | agent_send_response(&response->mad.mad, |
2028 | &response->grh, wc, | |
2029 | port_priv->device, | |
2030 | smi_get_fwd_port(&recv->mad.smp), | |
2031 | qp_info->qp->qp_num); | |
de493d47 | 2032 | |
1da177e4 | 2033 | goto out; |
1bae4dbf | 2034 | } |
1da177e4 LT |
2035 | } |
2036 | ||
2037 | local: | |
2038 | /* Give driver "right of first refusal" on incoming MAD */ | |
2039 | if (port_priv->device->process_mad) { | |
1da177e4 LT |
2040 | ret = port_priv->device->process_mad(port_priv->device, 0, |
2041 | port_priv->port_num, | |
2042 | wc, &recv->grh, | |
2043 | &recv->mad.mad, | |
2044 | &response->mad.mad); | |
2045 | if (ret & IB_MAD_RESULT_SUCCESS) { | |
2046 | if (ret & IB_MAD_RESULT_CONSUMED) | |
2047 | goto out; | |
2048 | if (ret & IB_MAD_RESULT_REPLY) { | |
34816ad9 SH |
2049 | agent_send_response(&response->mad.mad, |
2050 | &recv->grh, wc, | |
2051 | port_priv->device, | |
1bae4dbf | 2052 | port_num, |
34816ad9 | 2053 | qp_info->qp->qp_num); |
1da177e4 LT |
2054 | goto out; |
2055 | } | |
2056 | } | |
2057 | } | |
2058 | ||
4a0754fa | 2059 | mad_agent = find_mad_agent(port_priv, &recv->mad.mad); |
1da177e4 | 2060 | if (mad_agent) { |
4a0754fa | 2061 | ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); |
1da177e4 LT |
2062 | /* |
2063 | * recv is freed up in error cases in ib_mad_complete_recv | |
2064 | * or via recv_handler in ib_mad_complete_recv() | |
2065 | */ | |
2066 | recv = NULL; | |
a9e74323 JM |
2067 | } else if ((ret & IB_MAD_RESULT_SUCCESS) && |
2068 | generate_unmatched_resp(recv, response)) { | |
0b307043 ST |
2069 | agent_send_response(&response->mad.mad, &recv->grh, wc, |
2070 | port_priv->device, port_num, qp_info->qp->qp_num); | |
1da177e4 LT |
2071 | } |
2072 | ||
2073 | out: | |
2074 | /* Post another receive request for this QP */ | |
2075 | if (response) { | |
2076 | ib_mad_post_receive_mads(qp_info, response); | |
2077 | if (recv) | |
2078 | kmem_cache_free(ib_mad_cache, recv); | |
2079 | } else | |
2080 | ib_mad_post_receive_mads(qp_info, recv); | |
2081 | } | |
2082 | ||
2083 | static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) | |
2084 | { | |
2085 | struct ib_mad_send_wr_private *mad_send_wr; | |
2086 | unsigned long delay; | |
2087 | ||
2088 | if (list_empty(&mad_agent_priv->wait_list)) { | |
136b5721 | 2089 | cancel_delayed_work(&mad_agent_priv->timed_work); |
1da177e4 LT |
2090 | } else { |
2091 | mad_send_wr = list_entry(mad_agent_priv->wait_list.next, | |
2092 | struct ib_mad_send_wr_private, | |
2093 | agent_list); | |
2094 | ||
2095 | if (time_after(mad_agent_priv->timeout, | |
2096 | mad_send_wr->timeout)) { | |
2097 | mad_agent_priv->timeout = mad_send_wr->timeout; | |
1da177e4 LT |
2098 | delay = mad_send_wr->timeout - jiffies; |
2099 | if ((long)delay <= 0) | |
2100 | delay = 1; | |
e7c2f967 TH |
2101 | mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, |
2102 | &mad_agent_priv->timed_work, delay); | |
1da177e4 LT |
2103 | } |
2104 | } | |
2105 | } | |
2106 | ||
d760ce8f | 2107 | static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) |
1da177e4 | 2108 | { |
d760ce8f | 2109 | struct ib_mad_agent_private *mad_agent_priv; |
1da177e4 LT |
2110 | struct ib_mad_send_wr_private *temp_mad_send_wr; |
2111 | struct list_head *list_item; | |
2112 | unsigned long delay; | |
2113 | ||
d760ce8f | 2114 | mad_agent_priv = mad_send_wr->mad_agent_priv; |
1da177e4 LT |
2115 | list_del(&mad_send_wr->agent_list); |
2116 | ||
2117 | delay = mad_send_wr->timeout; | |
2118 | mad_send_wr->timeout += jiffies; | |
2119 | ||
29bb33dd HR |
2120 | if (delay) { |
2121 | list_for_each_prev(list_item, &mad_agent_priv->wait_list) { | |
2122 | temp_mad_send_wr = list_entry(list_item, | |
2123 | struct ib_mad_send_wr_private, | |
2124 | agent_list); | |
2125 | if (time_after(mad_send_wr->timeout, | |
2126 | temp_mad_send_wr->timeout)) | |
2127 | break; | |
2128 | } | |
1da177e4 | 2129 | } |
29bb33dd HR |
2130 | else |
2131 | list_item = &mad_agent_priv->wait_list; | |
1da177e4 LT |
2132 | list_add(&mad_send_wr->agent_list, list_item); |
2133 | ||
2134 | /* Reschedule a work item if we have a shorter timeout */ | |
e7c2f967 TH |
2135 | if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) |
2136 | mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, | |
2137 | &mad_agent_priv->timed_work, delay); | |
1da177e4 LT |
2138 | } |
2139 | ||
03b61ad2 HR |
2140 | void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, |
2141 | int timeout_ms) | |
2142 | { | |
2143 | mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); | |
2144 | wait_for_response(mad_send_wr); | |
2145 | } | |
2146 | ||
1da177e4 LT |
2147 | /* |
2148 | * Process a send work completion | |
2149 | */ | |
fa619a77 HR |
2150 | void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, |
2151 | struct ib_mad_send_wc *mad_send_wc) | |
1da177e4 LT |
2152 | { |
2153 | struct ib_mad_agent_private *mad_agent_priv; | |
2154 | unsigned long flags; | |
fa619a77 | 2155 | int ret; |
1da177e4 | 2156 | |
d760ce8f | 2157 | mad_agent_priv = mad_send_wr->mad_agent_priv; |
1da177e4 | 2158 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
1471cb6c | 2159 | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { |
fa619a77 HR |
2160 | ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); |
2161 | if (ret == IB_RMPP_RESULT_CONSUMED) | |
2162 | goto done; | |
2163 | } else | |
2164 | ret = IB_RMPP_RESULT_UNHANDLED; | |
2165 | ||
1da177e4 LT |
2166 | if (mad_send_wc->status != IB_WC_SUCCESS && |
2167 | mad_send_wr->status == IB_WC_SUCCESS) { | |
2168 | mad_send_wr->status = mad_send_wc->status; | |
2169 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); | |
2170 | } | |
2171 | ||
2172 | if (--mad_send_wr->refcount > 0) { | |
2173 | if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && | |
2174 | mad_send_wr->status == IB_WC_SUCCESS) { | |
d760ce8f | 2175 | wait_for_response(mad_send_wr); |
1da177e4 | 2176 | } |
fa619a77 | 2177 | goto done; |
1da177e4 LT |
2178 | } |
2179 | ||
2180 | /* Remove send from MAD agent and notify client of completion */ | |
2181 | list_del(&mad_send_wr->agent_list); | |
2182 | adjust_timeout(mad_agent_priv); | |
2183 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
2184 | ||
2185 | if (mad_send_wr->status != IB_WC_SUCCESS ) | |
2186 | mad_send_wc->status = mad_send_wr->status; | |
34816ad9 SH |
2187 | if (ret == IB_RMPP_RESULT_INTERNAL) |
2188 | ib_rmpp_send_handler(mad_send_wc); | |
2189 | else | |
fa619a77 HR |
2190 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
2191 | mad_send_wc); | |
1da177e4 LT |
2192 | |
2193 | /* Release reference on agent taken when sending */ | |
1b52fa98 | 2194 | deref_mad_agent(mad_agent_priv); |
fa619a77 HR |
2195 | return; |
2196 | done: | |
2197 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
1da177e4 LT |
2198 | } |
2199 | ||
2200 | static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, | |
2201 | struct ib_wc *wc) | |
2202 | { | |
2203 | struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; | |
2204 | struct ib_mad_list_head *mad_list; | |
2205 | struct ib_mad_qp_info *qp_info; | |
2206 | struct ib_mad_queue *send_queue; | |
2207 | struct ib_send_wr *bad_send_wr; | |
34816ad9 | 2208 | struct ib_mad_send_wc mad_send_wc; |
1da177e4 LT |
2209 | unsigned long flags; |
2210 | int ret; | |
2211 | ||
2212 | mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; | |
2213 | mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, | |
2214 | mad_list); | |
2215 | send_queue = mad_list->mad_queue; | |
2216 | qp_info = send_queue->qp_info; | |
2217 | ||
2218 | retry: | |
1527106f RC |
2219 | ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, |
2220 | mad_send_wr->header_mapping, | |
2221 | mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); | |
2222 | ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, | |
2223 | mad_send_wr->payload_mapping, | |
2224 | mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); | |
1da177e4 LT |
2225 | queued_send_wr = NULL; |
2226 | spin_lock_irqsave(&send_queue->lock, flags); | |
2227 | list_del(&mad_list->list); | |
2228 | ||
2229 | /* Move queued send to the send queue */ | |
2230 | if (send_queue->count-- > send_queue->max_active) { | |
2231 | mad_list = container_of(qp_info->overflow_list.next, | |
2232 | struct ib_mad_list_head, list); | |
2233 | queued_send_wr = container_of(mad_list, | |
2234 | struct ib_mad_send_wr_private, | |
2235 | mad_list); | |
179e0917 | 2236 | list_move_tail(&mad_list->list, &send_queue->list); |
1da177e4 LT |
2237 | } |
2238 | spin_unlock_irqrestore(&send_queue->lock, flags); | |
2239 | ||
34816ad9 SH |
2240 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
2241 | mad_send_wc.status = wc->status; | |
2242 | mad_send_wc.vendor_err = wc->vendor_err; | |
1da177e4 | 2243 | if (atomic_read(&qp_info->snoop_count)) |
34816ad9 | 2244 | snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, |
1da177e4 | 2245 | IB_MAD_SNOOP_SEND_COMPLETIONS); |
34816ad9 | 2246 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); |
1da177e4 LT |
2247 | |
2248 | if (queued_send_wr) { | |
2249 | ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, | |
34816ad9 | 2250 | &bad_send_wr); |
1da177e4 | 2251 | if (ret) { |
7ef5d4b0 IW |
2252 | dev_err(&port_priv->device->dev, |
2253 | "ib_post_send failed: %d\n", ret); | |
1da177e4 LT |
2254 | mad_send_wr = queued_send_wr; |
2255 | wc->status = IB_WC_LOC_QP_OP_ERR; | |
2256 | goto retry; | |
2257 | } | |
2258 | } | |
2259 | } | |
2260 | ||
2261 | static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) | |
2262 | { | |
2263 | struct ib_mad_send_wr_private *mad_send_wr; | |
2264 | struct ib_mad_list_head *mad_list; | |
2265 | unsigned long flags; | |
2266 | ||
2267 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); | |
2268 | list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { | |
2269 | mad_send_wr = container_of(mad_list, | |
2270 | struct ib_mad_send_wr_private, | |
2271 | mad_list); | |
2272 | mad_send_wr->retry = 1; | |
2273 | } | |
2274 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | |
2275 | } | |
2276 | ||
2277 | static void mad_error_handler(struct ib_mad_port_private *port_priv, | |
2278 | struct ib_wc *wc) | |
2279 | { | |
2280 | struct ib_mad_list_head *mad_list; | |
2281 | struct ib_mad_qp_info *qp_info; | |
2282 | struct ib_mad_send_wr_private *mad_send_wr; | |
2283 | int ret; | |
2284 | ||
2285 | /* Determine if failure was a send or receive */ | |
2286 | mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; | |
2287 | qp_info = mad_list->mad_queue->qp_info; | |
2288 | if (mad_list->mad_queue == &qp_info->recv_queue) | |
2289 | /* | |
2290 | * Receive errors indicate that the QP has entered the error | |
2291 | * state - error handling/shutdown code will cleanup | |
2292 | */ | |
2293 | return; | |
2294 | ||
2295 | /* | |
2296 | * Send errors will transition the QP to SQE - move | |
2297 | * QP to RTS and repost flushed work requests | |
2298 | */ | |
2299 | mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, | |
2300 | mad_list); | |
2301 | if (wc->status == IB_WC_WR_FLUSH_ERR) { | |
2302 | if (mad_send_wr->retry) { | |
2303 | /* Repost send */ | |
2304 | struct ib_send_wr *bad_send_wr; | |
2305 | ||
2306 | mad_send_wr->retry = 0; | |
2307 | ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr, | |
2308 | &bad_send_wr); | |
2309 | if (ret) | |
2310 | ib_mad_send_done_handler(port_priv, wc); | |
2311 | } else | |
2312 | ib_mad_send_done_handler(port_priv, wc); | |
2313 | } else { | |
2314 | struct ib_qp_attr *attr; | |
2315 | ||
2316 | /* Transition QP to RTS and fail offending send */ | |
2317 | attr = kmalloc(sizeof *attr, GFP_KERNEL); | |
2318 | if (attr) { | |
2319 | attr->qp_state = IB_QPS_RTS; | |
2320 | attr->cur_qp_state = IB_QPS_SQE; | |
2321 | ret = ib_modify_qp(qp_info->qp, attr, | |
2322 | IB_QP_STATE | IB_QP_CUR_STATE); | |
2323 | kfree(attr); | |
2324 | if (ret) | |
7ef5d4b0 IW |
2325 | dev_err(&port_priv->device->dev, |
2326 | "mad_error_handler - ib_modify_qp to RTS : %d\n", | |
2327 | ret); | |
1da177e4 LT |
2328 | else |
2329 | mark_sends_for_retry(qp_info); | |
2330 | } | |
2331 | ib_mad_send_done_handler(port_priv, wc); | |
2332 | } | |
2333 | } | |
2334 | ||
2335 | /* | |
2336 | * IB MAD completion callback | |
2337 | */ | |
c4028958 | 2338 | static void ib_mad_completion_handler(struct work_struct *work) |
1da177e4 LT |
2339 | { |
2340 | struct ib_mad_port_private *port_priv; | |
2341 | struct ib_wc wc; | |
2342 | ||
c4028958 | 2343 | port_priv = container_of(work, struct ib_mad_port_private, work); |
1da177e4 LT |
2344 | ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); |
2345 | ||
2346 | while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { | |
2347 | if (wc.status == IB_WC_SUCCESS) { | |
2348 | switch (wc.opcode) { | |
2349 | case IB_WC_SEND: | |
2350 | ib_mad_send_done_handler(port_priv, &wc); | |
2351 | break; | |
2352 | case IB_WC_RECV: | |
2353 | ib_mad_recv_done_handler(port_priv, &wc); | |
2354 | break; | |
2355 | default: | |
2356 | BUG_ON(1); | |
2357 | break; | |
2358 | } | |
2359 | } else | |
2360 | mad_error_handler(port_priv, &wc); | |
2361 | } | |
2362 | } | |
2363 | ||
2364 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) | |
2365 | { | |
2366 | unsigned long flags; | |
2367 | struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; | |
2368 | struct ib_mad_send_wc mad_send_wc; | |
2369 | struct list_head cancel_list; | |
2370 | ||
2371 | INIT_LIST_HEAD(&cancel_list); | |
2372 | ||
2373 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
2374 | list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, | |
2375 | &mad_agent_priv->send_list, agent_list) { | |
2376 | if (mad_send_wr->status == IB_WC_SUCCESS) { | |
3cd96564 | 2377 | mad_send_wr->status = IB_WC_WR_FLUSH_ERR; |
1da177e4 LT |
2378 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); |
2379 | } | |
2380 | } | |
2381 | ||
2382 | /* Empty wait list to prevent receives from finding a request */ | |
2383 | list_splice_init(&mad_agent_priv->wait_list, &cancel_list); | |
2384 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
2385 | ||
2386 | /* Report all cancelled requests */ | |
2387 | mad_send_wc.status = IB_WC_WR_FLUSH_ERR; | |
2388 | mad_send_wc.vendor_err = 0; | |
2389 | ||
2390 | list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, | |
2391 | &cancel_list, agent_list) { | |
34816ad9 SH |
2392 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
2393 | list_del(&mad_send_wr->agent_list); | |
1da177e4 LT |
2394 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
2395 | &mad_send_wc); | |
1da177e4 LT |
2396 | atomic_dec(&mad_agent_priv->refcount); |
2397 | } | |
2398 | } | |
2399 | ||
2400 | static struct ib_mad_send_wr_private* | |
34816ad9 SH |
2401 | find_send_wr(struct ib_mad_agent_private *mad_agent_priv, |
2402 | struct ib_mad_send_buf *send_buf) | |
1da177e4 LT |
2403 | { |
2404 | struct ib_mad_send_wr_private *mad_send_wr; | |
2405 | ||
2406 | list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, | |
2407 | agent_list) { | |
34816ad9 | 2408 | if (&mad_send_wr->send_buf == send_buf) |
1da177e4 LT |
2409 | return mad_send_wr; |
2410 | } | |
2411 | ||
2412 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, | |
2413 | agent_list) { | |
c597eee5 IW |
2414 | if (is_rmpp_data_mad(mad_agent_priv, |
2415 | mad_send_wr->send_buf.mad) && | |
34816ad9 | 2416 | &mad_send_wr->send_buf == send_buf) |
1da177e4 LT |
2417 | return mad_send_wr; |
2418 | } | |
2419 | return NULL; | |
2420 | } | |
2421 | ||
34816ad9 SH |
2422 | int ib_modify_mad(struct ib_mad_agent *mad_agent, |
2423 | struct ib_mad_send_buf *send_buf, u32 timeout_ms) | |
1da177e4 LT |
2424 | { |
2425 | struct ib_mad_agent_private *mad_agent_priv; | |
2426 | struct ib_mad_send_wr_private *mad_send_wr; | |
2427 | unsigned long flags; | |
cabe3cbc | 2428 | int active; |
1da177e4 LT |
2429 | |
2430 | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, | |
2431 | agent); | |
2432 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
34816ad9 | 2433 | mad_send_wr = find_send_wr(mad_agent_priv, send_buf); |
03b61ad2 | 2434 | if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { |
1da177e4 | 2435 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
03b61ad2 | 2436 | return -EINVAL; |
1da177e4 LT |
2437 | } |
2438 | ||
cabe3cbc | 2439 | active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); |
03b61ad2 | 2440 | if (!timeout_ms) { |
1da177e4 | 2441 | mad_send_wr->status = IB_WC_WR_FLUSH_ERR; |
03b61ad2 | 2442 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); |
1da177e4 LT |
2443 | } |
2444 | ||
34816ad9 | 2445 | mad_send_wr->send_buf.timeout_ms = timeout_ms; |
cabe3cbc | 2446 | if (active) |
03b61ad2 HR |
2447 | mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); |
2448 | else | |
2449 | ib_reset_mad_timeout(mad_send_wr, timeout_ms); | |
2450 | ||
1da177e4 | 2451 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
03b61ad2 HR |
2452 | return 0; |
2453 | } | |
2454 | EXPORT_SYMBOL(ib_modify_mad); | |
1da177e4 | 2455 | |
34816ad9 SH |
2456 | void ib_cancel_mad(struct ib_mad_agent *mad_agent, |
2457 | struct ib_mad_send_buf *send_buf) | |
03b61ad2 | 2458 | { |
34816ad9 | 2459 | ib_modify_mad(mad_agent, send_buf, 0); |
1da177e4 LT |
2460 | } |
2461 | EXPORT_SYMBOL(ib_cancel_mad); | |
2462 | ||
c4028958 | 2463 | static void local_completions(struct work_struct *work) |
1da177e4 LT |
2464 | { |
2465 | struct ib_mad_agent_private *mad_agent_priv; | |
2466 | struct ib_mad_local_private *local; | |
2467 | struct ib_mad_agent_private *recv_mad_agent; | |
2468 | unsigned long flags; | |
1d9bc6d6 | 2469 | int free_mad; |
1da177e4 LT |
2470 | struct ib_wc wc; |
2471 | struct ib_mad_send_wc mad_send_wc; | |
2472 | ||
c4028958 DH |
2473 | mad_agent_priv = |
2474 | container_of(work, struct ib_mad_agent_private, local_work); | |
1da177e4 LT |
2475 | |
2476 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
2477 | while (!list_empty(&mad_agent_priv->local_list)) { | |
2478 | local = list_entry(mad_agent_priv->local_list.next, | |
2479 | struct ib_mad_local_private, | |
2480 | completion_list); | |
37289efe | 2481 | list_del(&local->completion_list); |
1da177e4 | 2482 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
1d9bc6d6 | 2483 | free_mad = 0; |
1da177e4 LT |
2484 | if (local->mad_priv) { |
2485 | recv_mad_agent = local->recv_mad_agent; | |
2486 | if (!recv_mad_agent) { | |
7ef5d4b0 IW |
2487 | dev_err(&mad_agent_priv->agent.device->dev, |
2488 | "No receive MAD agent for local completion\n"); | |
1d9bc6d6 | 2489 | free_mad = 1; |
1da177e4 LT |
2490 | goto local_send_completion; |
2491 | } | |
2492 | ||
2493 | /* | |
2494 | * Defined behavior is to complete response | |
2495 | * before request | |
2496 | */ | |
062dbb69 MT |
2497 | build_smp_wc(recv_mad_agent->agent.qp, |
2498 | (unsigned long) local->mad_send_wr, | |
97f52eb4 | 2499 | be16_to_cpu(IB_LID_PERMISSIVE), |
34816ad9 | 2500 | 0, recv_mad_agent->agent.port_num, &wc); |
1da177e4 LT |
2501 | |
2502 | local->mad_priv->header.recv_wc.wc = &wc; | |
2503 | local->mad_priv->header.recv_wc.mad_len = | |
2504 | sizeof(struct ib_mad); | |
fa619a77 HR |
2505 | INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); |
2506 | list_add(&local->mad_priv->header.recv_wc.recv_buf.list, | |
2507 | &local->mad_priv->header.recv_wc.rmpp_list); | |
1da177e4 LT |
2508 | local->mad_priv->header.recv_wc.recv_buf.grh = NULL; |
2509 | local->mad_priv->header.recv_wc.recv_buf.mad = | |
2510 | &local->mad_priv->mad.mad; | |
2511 | if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) | |
2512 | snoop_recv(recv_mad_agent->qp_info, | |
2513 | &local->mad_priv->header.recv_wc, | |
2514 | IB_MAD_SNOOP_RECVS); | |
2515 | recv_mad_agent->agent.recv_handler( | |
2516 | &recv_mad_agent->agent, | |
2517 | &local->mad_priv->header.recv_wc); | |
2518 | spin_lock_irqsave(&recv_mad_agent->lock, flags); | |
2519 | atomic_dec(&recv_mad_agent->refcount); | |
2520 | spin_unlock_irqrestore(&recv_mad_agent->lock, flags); | |
2521 | } | |
2522 | ||
2523 | local_send_completion: | |
2524 | /* Complete send */ | |
2525 | mad_send_wc.status = IB_WC_SUCCESS; | |
2526 | mad_send_wc.vendor_err = 0; | |
34816ad9 | 2527 | mad_send_wc.send_buf = &local->mad_send_wr->send_buf; |
1da177e4 | 2528 | if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) |
34816ad9 SH |
2529 | snoop_send(mad_agent_priv->qp_info, |
2530 | &local->mad_send_wr->send_buf, | |
2531 | &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); | |
1da177e4 LT |
2532 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
2533 | &mad_send_wc); | |
2534 | ||
2535 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
1da177e4 | 2536 | atomic_dec(&mad_agent_priv->refcount); |
1d9bc6d6 | 2537 | if (free_mad) |
2c153b93 | 2538 | kmem_cache_free(ib_mad_cache, local->mad_priv); |
1da177e4 LT |
2539 | kfree(local); |
2540 | } | |
2541 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
2542 | } | |
2543 | ||
f75b7a52 HR |
2544 | static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) |
2545 | { | |
2546 | int ret; | |
2547 | ||
4fc8cd49 | 2548 | if (!mad_send_wr->retries_left) |
f75b7a52 HR |
2549 | return -ETIMEDOUT; |
2550 | ||
4fc8cd49 SH |
2551 | mad_send_wr->retries_left--; |
2552 | mad_send_wr->send_buf.retries++; | |
2553 | ||
34816ad9 | 2554 | mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); |
f75b7a52 | 2555 | |
1471cb6c | 2556 | if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { |
fa619a77 HR |
2557 | ret = ib_retry_rmpp(mad_send_wr); |
2558 | switch (ret) { | |
2559 | case IB_RMPP_RESULT_UNHANDLED: | |
2560 | ret = ib_send_mad(mad_send_wr); | |
2561 | break; | |
2562 | case IB_RMPP_RESULT_CONSUMED: | |
2563 | ret = 0; | |
2564 | break; | |
2565 | default: | |
2566 | ret = -ECOMM; | |
2567 | break; | |
2568 | } | |
2569 | } else | |
2570 | ret = ib_send_mad(mad_send_wr); | |
f75b7a52 HR |
2571 | |
2572 | if (!ret) { | |
2573 | mad_send_wr->refcount++; | |
f75b7a52 HR |
2574 | list_add_tail(&mad_send_wr->agent_list, |
2575 | &mad_send_wr->mad_agent_priv->send_list); | |
2576 | } | |
2577 | return ret; | |
2578 | } | |
2579 | ||
c4028958 | 2580 | static void timeout_sends(struct work_struct *work) |
1da177e4 LT |
2581 | { |
2582 | struct ib_mad_agent_private *mad_agent_priv; | |
2583 | struct ib_mad_send_wr_private *mad_send_wr; | |
2584 | struct ib_mad_send_wc mad_send_wc; | |
2585 | unsigned long flags, delay; | |
2586 | ||
c4028958 DH |
2587 | mad_agent_priv = container_of(work, struct ib_mad_agent_private, |
2588 | timed_work.work); | |
1da177e4 LT |
2589 | mad_send_wc.vendor_err = 0; |
2590 | ||
2591 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
2592 | while (!list_empty(&mad_agent_priv->wait_list)) { | |
2593 | mad_send_wr = list_entry(mad_agent_priv->wait_list.next, | |
2594 | struct ib_mad_send_wr_private, | |
2595 | agent_list); | |
2596 | ||
2597 | if (time_after(mad_send_wr->timeout, jiffies)) { | |
2598 | delay = mad_send_wr->timeout - jiffies; | |
2599 | if ((long)delay <= 0) | |
2600 | delay = 1; | |
2601 | queue_delayed_work(mad_agent_priv->qp_info-> | |
2602 | port_priv->wq, | |
2603 | &mad_agent_priv->timed_work, delay); | |
2604 | break; | |
2605 | } | |
2606 | ||
dbf9227b | 2607 | list_del(&mad_send_wr->agent_list); |
29bb33dd HR |
2608 | if (mad_send_wr->status == IB_WC_SUCCESS && |
2609 | !retry_send(mad_send_wr)) | |
f75b7a52 HR |
2610 | continue; |
2611 | ||
1da177e4 LT |
2612 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
2613 | ||
03b61ad2 HR |
2614 | if (mad_send_wr->status == IB_WC_SUCCESS) |
2615 | mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; | |
2616 | else | |
2617 | mad_send_wc.status = mad_send_wr->status; | |
34816ad9 | 2618 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
1da177e4 LT |
2619 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
2620 | &mad_send_wc); | |
2621 | ||
1da177e4 LT |
2622 | atomic_dec(&mad_agent_priv->refcount); |
2623 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
2624 | } | |
2625 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
2626 | } | |
2627 | ||
5dd2ce12 | 2628 | static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg) |
1da177e4 LT |
2629 | { |
2630 | struct ib_mad_port_private *port_priv = cq->cq_context; | |
dc05980d | 2631 | unsigned long flags; |
1da177e4 | 2632 | |
dc05980d MT |
2633 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); |
2634 | if (!list_empty(&port_priv->port_list)) | |
2635 | queue_work(port_priv->wq, &port_priv->work); | |
2636 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | |
1da177e4 LT |
2637 | } |
2638 | ||
2639 | /* | |
2640 | * Allocate receive MADs and post receive WRs for them | |
2641 | */ | |
2642 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | |
2643 | struct ib_mad_private *mad) | |
2644 | { | |
2645 | unsigned long flags; | |
2646 | int post, ret; | |
2647 | struct ib_mad_private *mad_priv; | |
2648 | struct ib_sge sg_list; | |
2649 | struct ib_recv_wr recv_wr, *bad_recv_wr; | |
2650 | struct ib_mad_queue *recv_queue = &qp_info->recv_queue; | |
2651 | ||
2652 | /* Initialize common scatter list fields */ | |
2653 | sg_list.length = sizeof *mad_priv - sizeof mad_priv->header; | |
2654 | sg_list.lkey = (*qp_info->port_priv->mr).lkey; | |
2655 | ||
2656 | /* Initialize common receive WR fields */ | |
2657 | recv_wr.next = NULL; | |
2658 | recv_wr.sg_list = &sg_list; | |
2659 | recv_wr.num_sge = 1; | |
2660 | ||
2661 | do { | |
2662 | /* Allocate and map receive buffer */ | |
2663 | if (mad) { | |
2664 | mad_priv = mad; | |
2665 | mad = NULL; | |
2666 | } else { | |
2667 | mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); | |
2668 | if (!mad_priv) { | |
7ef5d4b0 IW |
2669 | dev_err(&qp_info->port_priv->device->dev, |
2670 | "No memory for receive buffer\n"); | |
1da177e4 LT |
2671 | ret = -ENOMEM; |
2672 | break; | |
2673 | } | |
2674 | } | |
1527106f RC |
2675 | sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, |
2676 | &mad_priv->grh, | |
2677 | sizeof *mad_priv - | |
2678 | sizeof mad_priv->header, | |
2679 | DMA_FROM_DEVICE); | |
2c34e68f YB |
2680 | if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, |
2681 | sg_list.addr))) { | |
2682 | ret = -ENOMEM; | |
2683 | break; | |
2684 | } | |
1527106f | 2685 | mad_priv->header.mapping = sg_list.addr; |
1da177e4 LT |
2686 | recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; |
2687 | mad_priv->header.mad_list.mad_queue = recv_queue; | |
2688 | ||
2689 | /* Post receive WR */ | |
2690 | spin_lock_irqsave(&recv_queue->lock, flags); | |
2691 | post = (++recv_queue->count < recv_queue->max_active); | |
2692 | list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); | |
2693 | spin_unlock_irqrestore(&recv_queue->lock, flags); | |
2694 | ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); | |
2695 | if (ret) { | |
2696 | spin_lock_irqsave(&recv_queue->lock, flags); | |
2697 | list_del(&mad_priv->header.mad_list.list); | |
2698 | recv_queue->count--; | |
2699 | spin_unlock_irqrestore(&recv_queue->lock, flags); | |
1527106f RC |
2700 | ib_dma_unmap_single(qp_info->port_priv->device, |
2701 | mad_priv->header.mapping, | |
2702 | sizeof *mad_priv - | |
2703 | sizeof mad_priv->header, | |
2704 | DMA_FROM_DEVICE); | |
1da177e4 | 2705 | kmem_cache_free(ib_mad_cache, mad_priv); |
7ef5d4b0 IW |
2706 | dev_err(&qp_info->port_priv->device->dev, |
2707 | "ib_post_recv failed: %d\n", ret); | |
1da177e4 LT |
2708 | break; |
2709 | } | |
2710 | } while (post); | |
2711 | ||
2712 | return ret; | |
2713 | } | |
2714 | ||
2715 | /* | |
2716 | * Return all the posted receive MADs | |
2717 | */ | |
2718 | static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) | |
2719 | { | |
2720 | struct ib_mad_private_header *mad_priv_hdr; | |
2721 | struct ib_mad_private *recv; | |
2722 | struct ib_mad_list_head *mad_list; | |
2723 | ||
fac70d51 EC |
2724 | if (!qp_info->qp) |
2725 | return; | |
2726 | ||
1da177e4 LT |
2727 | while (!list_empty(&qp_info->recv_queue.list)) { |
2728 | ||
2729 | mad_list = list_entry(qp_info->recv_queue.list.next, | |
2730 | struct ib_mad_list_head, list); | |
2731 | mad_priv_hdr = container_of(mad_list, | |
2732 | struct ib_mad_private_header, | |
2733 | mad_list); | |
2734 | recv = container_of(mad_priv_hdr, struct ib_mad_private, | |
2735 | header); | |
2736 | ||
2737 | /* Remove from posted receive MAD list */ | |
2738 | list_del(&mad_list->list); | |
2739 | ||
1527106f RC |
2740 | ib_dma_unmap_single(qp_info->port_priv->device, |
2741 | recv->header.mapping, | |
2742 | sizeof(struct ib_mad_private) - | |
2743 | sizeof(struct ib_mad_private_header), | |
2744 | DMA_FROM_DEVICE); | |
1da177e4 LT |
2745 | kmem_cache_free(ib_mad_cache, recv); |
2746 | } | |
2747 | ||
2748 | qp_info->recv_queue.count = 0; | |
2749 | } | |
2750 | ||
2751 | /* | |
2752 | * Start the port | |
2753 | */ | |
2754 | static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |
2755 | { | |
2756 | int ret, i; | |
2757 | struct ib_qp_attr *attr; | |
2758 | struct ib_qp *qp; | |
ef5ed416 | 2759 | u16 pkey_index; |
1da177e4 LT |
2760 | |
2761 | attr = kmalloc(sizeof *attr, GFP_KERNEL); | |
3cd96564 | 2762 | if (!attr) { |
7ef5d4b0 IW |
2763 | dev_err(&port_priv->device->dev, |
2764 | "Couldn't kmalloc ib_qp_attr\n"); | |
1da177e4 LT |
2765 | return -ENOMEM; |
2766 | } | |
2767 | ||
ef5ed416 JM |
2768 | ret = ib_find_pkey(port_priv->device, port_priv->port_num, |
2769 | IB_DEFAULT_PKEY_FULL, &pkey_index); | |
2770 | if (ret) | |
2771 | pkey_index = 0; | |
2772 | ||
1da177e4 LT |
2773 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { |
2774 | qp = port_priv->qp_info[i].qp; | |
fac70d51 EC |
2775 | if (!qp) |
2776 | continue; | |
2777 | ||
1da177e4 LT |
2778 | /* |
2779 | * PKey index for QP1 is irrelevant but | |
2780 | * one is needed for the Reset to Init transition | |
2781 | */ | |
2782 | attr->qp_state = IB_QPS_INIT; | |
ef5ed416 | 2783 | attr->pkey_index = pkey_index; |
1da177e4 LT |
2784 | attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; |
2785 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | | |
2786 | IB_QP_PKEY_INDEX | IB_QP_QKEY); | |
2787 | if (ret) { | |
7ef5d4b0 IW |
2788 | dev_err(&port_priv->device->dev, |
2789 | "Couldn't change QP%d state to INIT: %d\n", | |
2790 | i, ret); | |
1da177e4 LT |
2791 | goto out; |
2792 | } | |
2793 | ||
2794 | attr->qp_state = IB_QPS_RTR; | |
2795 | ret = ib_modify_qp(qp, attr, IB_QP_STATE); | |
2796 | if (ret) { | |
7ef5d4b0 IW |
2797 | dev_err(&port_priv->device->dev, |
2798 | "Couldn't change QP%d state to RTR: %d\n", | |
2799 | i, ret); | |
1da177e4 LT |
2800 | goto out; |
2801 | } | |
2802 | ||
2803 | attr->qp_state = IB_QPS_RTS; | |
2804 | attr->sq_psn = IB_MAD_SEND_Q_PSN; | |
2805 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); | |
2806 | if (ret) { | |
7ef5d4b0 IW |
2807 | dev_err(&port_priv->device->dev, |
2808 | "Couldn't change QP%d state to RTS: %d\n", | |
2809 | i, ret); | |
1da177e4 LT |
2810 | goto out; |
2811 | } | |
2812 | } | |
2813 | ||
2814 | ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); | |
2815 | if (ret) { | |
7ef5d4b0 IW |
2816 | dev_err(&port_priv->device->dev, |
2817 | "Failed to request completion notification: %d\n", | |
2818 | ret); | |
1da177e4 LT |
2819 | goto out; |
2820 | } | |
2821 | ||
2822 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { | |
fac70d51 EC |
2823 | if (!port_priv->qp_info[i].qp) |
2824 | continue; | |
2825 | ||
1da177e4 LT |
2826 | ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); |
2827 | if (ret) { | |
7ef5d4b0 IW |
2828 | dev_err(&port_priv->device->dev, |
2829 | "Couldn't post receive WRs\n"); | |
1da177e4 LT |
2830 | goto out; |
2831 | } | |
2832 | } | |
2833 | out: | |
2834 | kfree(attr); | |
2835 | return ret; | |
2836 | } | |
2837 | ||
2838 | static void qp_event_handler(struct ib_event *event, void *qp_context) | |
2839 | { | |
2840 | struct ib_mad_qp_info *qp_info = qp_context; | |
2841 | ||
2842 | /* It's worse than that! He's dead, Jim! */ | |
7ef5d4b0 IW |
2843 | dev_err(&qp_info->port_priv->device->dev, |
2844 | "Fatal error (%d) on MAD QP (%d)\n", | |
1da177e4 LT |
2845 | event->event, qp_info->qp->qp_num); |
2846 | } | |
2847 | ||
2848 | static void init_mad_queue(struct ib_mad_qp_info *qp_info, | |
2849 | struct ib_mad_queue *mad_queue) | |
2850 | { | |
2851 | mad_queue->qp_info = qp_info; | |
2852 | mad_queue->count = 0; | |
2853 | spin_lock_init(&mad_queue->lock); | |
2854 | INIT_LIST_HEAD(&mad_queue->list); | |
2855 | } | |
2856 | ||
2857 | static void init_mad_qp(struct ib_mad_port_private *port_priv, | |
2858 | struct ib_mad_qp_info *qp_info) | |
2859 | { | |
2860 | qp_info->port_priv = port_priv; | |
2861 | init_mad_queue(qp_info, &qp_info->send_queue); | |
2862 | init_mad_queue(qp_info, &qp_info->recv_queue); | |
2863 | INIT_LIST_HEAD(&qp_info->overflow_list); | |
2864 | spin_lock_init(&qp_info->snoop_lock); | |
2865 | qp_info->snoop_table = NULL; | |
2866 | qp_info->snoop_table_size = 0; | |
2867 | atomic_set(&qp_info->snoop_count, 0); | |
2868 | } | |
2869 | ||
2870 | static int create_mad_qp(struct ib_mad_qp_info *qp_info, | |
2871 | enum ib_qp_type qp_type) | |
2872 | { | |
2873 | struct ib_qp_init_attr qp_init_attr; | |
2874 | int ret; | |
2875 | ||
2876 | memset(&qp_init_attr, 0, sizeof qp_init_attr); | |
2877 | qp_init_attr.send_cq = qp_info->port_priv->cq; | |
2878 | qp_init_attr.recv_cq = qp_info->port_priv->cq; | |
2879 | qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; | |
b76aabc3 HR |
2880 | qp_init_attr.cap.max_send_wr = mad_sendq_size; |
2881 | qp_init_attr.cap.max_recv_wr = mad_recvq_size; | |
1da177e4 LT |
2882 | qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; |
2883 | qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; | |
2884 | qp_init_attr.qp_type = qp_type; | |
2885 | qp_init_attr.port_num = qp_info->port_priv->port_num; | |
2886 | qp_init_attr.qp_context = qp_info; | |
2887 | qp_init_attr.event_handler = qp_event_handler; | |
2888 | qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); | |
2889 | if (IS_ERR(qp_info->qp)) { | |
7ef5d4b0 IW |
2890 | dev_err(&qp_info->port_priv->device->dev, |
2891 | "Couldn't create ib_mad QP%d\n", | |
2892 | get_spl_qp_index(qp_type)); | |
1da177e4 LT |
2893 | ret = PTR_ERR(qp_info->qp); |
2894 | goto error; | |
2895 | } | |
2896 | /* Use minimum queue sizes unless the CQ is resized */ | |
b76aabc3 HR |
2897 | qp_info->send_queue.max_active = mad_sendq_size; |
2898 | qp_info->recv_queue.max_active = mad_recvq_size; | |
1da177e4 LT |
2899 | return 0; |
2900 | ||
2901 | error: | |
2902 | return ret; | |
2903 | } | |
2904 | ||
2905 | static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) | |
2906 | { | |
fac70d51 EC |
2907 | if (!qp_info->qp) |
2908 | return; | |
2909 | ||
1da177e4 | 2910 | ib_destroy_qp(qp_info->qp); |
6044ec88 | 2911 | kfree(qp_info->snoop_table); |
1da177e4 LT |
2912 | } |
2913 | ||
2914 | /* | |
2915 | * Open the port | |
2916 | * Create the QP, PD, MR, and CQ if needed | |
2917 | */ | |
2918 | static int ib_mad_port_open(struct ib_device *device, | |
2919 | int port_num) | |
2920 | { | |
2921 | int ret, cq_size; | |
2922 | struct ib_mad_port_private *port_priv; | |
2923 | unsigned long flags; | |
2924 | char name[sizeof "ib_mad123"]; | |
fac70d51 | 2925 | int has_smi; |
1da177e4 | 2926 | |
1da177e4 | 2927 | /* Create new device info */ |
de6eb66b | 2928 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); |
1da177e4 | 2929 | if (!port_priv) { |
7ef5d4b0 | 2930 | dev_err(&device->dev, "No memory for ib_mad_port_private\n"); |
1da177e4 LT |
2931 | return -ENOMEM; |
2932 | } | |
de6eb66b | 2933 | |
1da177e4 LT |
2934 | port_priv->device = device; |
2935 | port_priv->port_num = port_num; | |
2936 | spin_lock_init(&port_priv->reg_lock); | |
2937 | INIT_LIST_HEAD(&port_priv->agent_list); | |
2938 | init_mad_qp(port_priv, &port_priv->qp_info[0]); | |
2939 | init_mad_qp(port_priv, &port_priv->qp_info[1]); | |
2940 | ||
fac70d51 | 2941 | cq_size = mad_sendq_size + mad_recvq_size; |
29541e3a | 2942 | has_smi = rdma_cap_ib_smi(device, port_num); |
fac70d51 EC |
2943 | if (has_smi) |
2944 | cq_size *= 2; | |
2945 | ||
1da177e4 | 2946 | port_priv->cq = ib_create_cq(port_priv->device, |
5dd2ce12 | 2947 | ib_mad_thread_completion_handler, |
f4fd0b22 | 2948 | NULL, port_priv, cq_size, 0); |
1da177e4 | 2949 | if (IS_ERR(port_priv->cq)) { |
7ef5d4b0 | 2950 | dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); |
1da177e4 LT |
2951 | ret = PTR_ERR(port_priv->cq); |
2952 | goto error3; | |
2953 | } | |
2954 | ||
2955 | port_priv->pd = ib_alloc_pd(device); | |
2956 | if (IS_ERR(port_priv->pd)) { | |
7ef5d4b0 | 2957 | dev_err(&device->dev, "Couldn't create ib_mad PD\n"); |
1da177e4 LT |
2958 | ret = PTR_ERR(port_priv->pd); |
2959 | goto error4; | |
2960 | } | |
2961 | ||
2962 | port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); | |
2963 | if (IS_ERR(port_priv->mr)) { | |
7ef5d4b0 | 2964 | dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n"); |
1da177e4 LT |
2965 | ret = PTR_ERR(port_priv->mr); |
2966 | goto error5; | |
2967 | } | |
2968 | ||
fac70d51 EC |
2969 | if (has_smi) { |
2970 | ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); | |
2971 | if (ret) | |
2972 | goto error6; | |
2973 | } | |
1da177e4 LT |
2974 | ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); |
2975 | if (ret) | |
2976 | goto error7; | |
2977 | ||
2978 | snprintf(name, sizeof name, "ib_mad%d", port_num); | |
2979 | port_priv->wq = create_singlethread_workqueue(name); | |
2980 | if (!port_priv->wq) { | |
2981 | ret = -ENOMEM; | |
2982 | goto error8; | |
2983 | } | |
c4028958 | 2984 | INIT_WORK(&port_priv->work, ib_mad_completion_handler); |
1da177e4 | 2985 | |
dc05980d MT |
2986 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); |
2987 | list_add_tail(&port_priv->port_list, &ib_mad_port_list); | |
2988 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | |
2989 | ||
1da177e4 LT |
2990 | ret = ib_mad_port_start(port_priv); |
2991 | if (ret) { | |
7ef5d4b0 | 2992 | dev_err(&device->dev, "Couldn't start port\n"); |
1da177e4 LT |
2993 | goto error9; |
2994 | } | |
2995 | ||
1da177e4 LT |
2996 | return 0; |
2997 | ||
2998 | error9: | |
dc05980d MT |
2999 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); |
3000 | list_del_init(&port_priv->port_list); | |
3001 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | |
3002 | ||
1da177e4 LT |
3003 | destroy_workqueue(port_priv->wq); |
3004 | error8: | |
3005 | destroy_mad_qp(&port_priv->qp_info[1]); | |
3006 | error7: | |
3007 | destroy_mad_qp(&port_priv->qp_info[0]); | |
3008 | error6: | |
3009 | ib_dereg_mr(port_priv->mr); | |
3010 | error5: | |
3011 | ib_dealloc_pd(port_priv->pd); | |
3012 | error4: | |
3013 | ib_destroy_cq(port_priv->cq); | |
3014 | cleanup_recv_queue(&port_priv->qp_info[1]); | |
3015 | cleanup_recv_queue(&port_priv->qp_info[0]); | |
3016 | error3: | |
3017 | kfree(port_priv); | |
3018 | ||
3019 | return ret; | |
3020 | } | |
3021 | ||
3022 | /* | |
3023 | * Close the port | |
3024 | * If there are no classes using the port, free the port | |
3025 | * resources (CQ, MR, PD, QP) and remove the port's info structure | |
3026 | */ | |
3027 | static int ib_mad_port_close(struct ib_device *device, int port_num) | |
3028 | { | |
3029 | struct ib_mad_port_private *port_priv; | |
3030 | unsigned long flags; | |
3031 | ||
3032 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); | |
3033 | port_priv = __ib_get_mad_port(device, port_num); | |
3034 | if (port_priv == NULL) { | |
3035 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | |
7ef5d4b0 | 3036 | dev_err(&device->dev, "Port %d not found\n", port_num); |
1da177e4 LT |
3037 | return -ENODEV; |
3038 | } | |
dc05980d | 3039 | list_del_init(&port_priv->port_list); |
1da177e4 LT |
3040 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); |
3041 | ||
1da177e4 LT |
3042 | destroy_workqueue(port_priv->wq); |
3043 | destroy_mad_qp(&port_priv->qp_info[1]); | |
3044 | destroy_mad_qp(&port_priv->qp_info[0]); | |
3045 | ib_dereg_mr(port_priv->mr); | |
3046 | ib_dealloc_pd(port_priv->pd); | |
3047 | ib_destroy_cq(port_priv->cq); | |
3048 | cleanup_recv_queue(&port_priv->qp_info[1]); | |
3049 | cleanup_recv_queue(&port_priv->qp_info[0]); | |
3050 | /* XXX: Handle deallocation of MAD registration tables */ | |
3051 | ||
3052 | kfree(port_priv); | |
3053 | ||
3054 | return 0; | |
3055 | } | |
3056 | ||
3057 | static void ib_mad_init_device(struct ib_device *device) | |
3058 | { | |
4ab6fb7e | 3059 | int start, end, i; |
1da177e4 | 3060 | |
07ebafba | 3061 | if (device->node_type == RDMA_NODE_IB_SWITCH) { |
4ab6fb7e RD |
3062 | start = 0; |
3063 | end = 0; | |
1da177e4 | 3064 | } else { |
4ab6fb7e RD |
3065 | start = 1; |
3066 | end = device->phys_port_cnt; | |
1da177e4 | 3067 | } |
4ab6fb7e RD |
3068 | |
3069 | for (i = start; i <= end; i++) { | |
c757dea8 | 3070 | if (!rdma_cap_ib_mad(device, i)) |
827f2a8b MW |
3071 | continue; |
3072 | ||
4ab6fb7e | 3073 | if (ib_mad_port_open(device, i)) { |
7ef5d4b0 | 3074 | dev_err(&device->dev, "Couldn't open port %d\n", i); |
4ab6fb7e | 3075 | goto error; |
1da177e4 | 3076 | } |
4ab6fb7e | 3077 | if (ib_agent_port_open(device, i)) { |
7ef5d4b0 IW |
3078 | dev_err(&device->dev, |
3079 | "Couldn't open port %d for agents\n", i); | |
4ab6fb7e | 3080 | goto error_agent; |
1da177e4 LT |
3081 | } |
3082 | } | |
f68bcc2d | 3083 | return; |
1da177e4 | 3084 | |
4ab6fb7e RD |
3085 | error_agent: |
3086 | if (ib_mad_port_close(device, i)) | |
7ef5d4b0 | 3087 | dev_err(&device->dev, "Couldn't close port %d\n", i); |
4ab6fb7e RD |
3088 | |
3089 | error: | |
827f2a8b | 3090 | while (--i >= start) { |
c757dea8 | 3091 | if (!rdma_cap_ib_mad(device, i)) |
827f2a8b | 3092 | continue; |
4ab6fb7e | 3093 | |
4ab6fb7e | 3094 | if (ib_agent_port_close(device, i)) |
7ef5d4b0 IW |
3095 | dev_err(&device->dev, |
3096 | "Couldn't close port %d for agents\n", i); | |
4ab6fb7e | 3097 | if (ib_mad_port_close(device, i)) |
7ef5d4b0 | 3098 | dev_err(&device->dev, "Couldn't close port %d\n", i); |
1da177e4 | 3099 | } |
1da177e4 LT |
3100 | } |
3101 | ||
3102 | static void ib_mad_remove_device(struct ib_device *device) | |
3103 | { | |
827f2a8b | 3104 | int start, end, i; |
070e140c | 3105 | |
07ebafba | 3106 | if (device->node_type == RDMA_NODE_IB_SWITCH) { |
827f2a8b MW |
3107 | start = 0; |
3108 | end = 0; | |
1da177e4 | 3109 | } else { |
827f2a8b MW |
3110 | start = 1; |
3111 | end = device->phys_port_cnt; | |
1da177e4 | 3112 | } |
827f2a8b MW |
3113 | |
3114 | for (i = start; i <= end; i++) { | |
c757dea8 | 3115 | if (!rdma_cap_ib_mad(device, i)) |
827f2a8b MW |
3116 | continue; |
3117 | ||
3118 | if (ib_agent_port_close(device, i)) | |
7ef5d4b0 | 3119 | dev_err(&device->dev, |
827f2a8b MW |
3120 | "Couldn't close port %d for agents\n", i); |
3121 | if (ib_mad_port_close(device, i)) | |
3122 | dev_err(&device->dev, "Couldn't close port %d\n", i); | |
1da177e4 LT |
3123 | } |
3124 | } | |
3125 | ||
3126 | static struct ib_client mad_client = { | |
3127 | .name = "mad", | |
3128 | .add = ib_mad_init_device, | |
3129 | .remove = ib_mad_remove_device | |
3130 | }; | |
3131 | ||
3132 | static int __init ib_mad_init_module(void) | |
3133 | { | |
3134 | int ret; | |
3135 | ||
b76aabc3 HR |
3136 | mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); |
3137 | mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); | |
3138 | ||
3139 | mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); | |
3140 | mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); | |
3141 | ||
1da177e4 LT |
3142 | ib_mad_cache = kmem_cache_create("ib_mad", |
3143 | sizeof(struct ib_mad_private), | |
3144 | 0, | |
3145 | SLAB_HWCACHE_ALIGN, | |
1da177e4 LT |
3146 | NULL); |
3147 | if (!ib_mad_cache) { | |
7ef5d4b0 | 3148 | pr_err("Couldn't create ib_mad cache\n"); |
1da177e4 LT |
3149 | ret = -ENOMEM; |
3150 | goto error1; | |
3151 | } | |
3152 | ||
3153 | INIT_LIST_HEAD(&ib_mad_port_list); | |
3154 | ||
3155 | if (ib_register_client(&mad_client)) { | |
7ef5d4b0 | 3156 | pr_err("Couldn't register ib_mad client\n"); |
1da177e4 LT |
3157 | ret = -EINVAL; |
3158 | goto error2; | |
3159 | } | |
3160 | ||
3161 | return 0; | |
3162 | ||
3163 | error2: | |
3164 | kmem_cache_destroy(ib_mad_cache); | |
3165 | error1: | |
3166 | return ret; | |
3167 | } | |
3168 | ||
3169 | static void __exit ib_mad_cleanup_module(void) | |
3170 | { | |
3171 | ib_unregister_client(&mad_client); | |
1a1d92c1 | 3172 | kmem_cache_destroy(ib_mad_cache); |
1da177e4 LT |
3173 | } |
3174 | ||
3175 | module_init(ib_mad_init_module); | |
3176 | module_exit(ib_mad_cleanup_module); |