]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/rdma/amso1100/c2_cm.c
ASoC: cs42xx8: fix the noise in the right dac channel with mono playback
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / rdma / amso1100 / c2_cm.c
1 /*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34 #include <linux/slab.h>
35
36 #include "c2.h"
37 #include "c2_wr.h"
38 #include "c2_vq.h"
39 #include <rdma/iw_cm.h>
40
41 int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
42 {
43 struct c2_dev *c2dev = to_c2dev(cm_id->device);
44 struct ib_qp *ibqp;
45 struct c2_qp *qp;
46 struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */
47 struct c2_vq_req *vq_req;
48 int err;
49 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
50
51 if (cm_id->remote_addr.ss_family != AF_INET)
52 return -ENOSYS;
53
54 ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
55 if (!ibqp)
56 return -EINVAL;
57 qp = to_c2qp(ibqp);
58
59 /* Associate QP <--> CM_ID */
60 cm_id->provider_data = qp;
61 cm_id->add_ref(cm_id);
62 qp->cm_id = cm_id;
63
64 /*
65 * only support the max private_data length
66 */
67 if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
68 err = -EINVAL;
69 goto bail0;
70 }
71 /*
72 * Set the rdma read limits
73 */
74 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
75 if (err)
76 goto bail0;
77
78 /*
79 * Create and send a WR_QP_CONNECT...
80 */
81 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
82 if (!wr) {
83 err = -ENOMEM;
84 goto bail0;
85 }
86
87 vq_req = vq_req_alloc(c2dev);
88 if (!vq_req) {
89 err = -ENOMEM;
90 goto bail1;
91 }
92
93 c2_wr_set_id(wr, CCWR_QP_CONNECT);
94 wr->hdr.context = 0;
95 wr->rnic_handle = c2dev->adapter_handle;
96 wr->qp_handle = qp->adapter_handle;
97
98 wr->remote_addr = raddr->sin_addr.s_addr;
99 wr->remote_port = raddr->sin_port;
100
101 /*
102 * Move any private data from the callers's buf into
103 * the WR.
104 */
105 if (iw_param->private_data) {
106 wr->private_data_length =
107 cpu_to_be32(iw_param->private_data_len);
108 memcpy(&wr->private_data[0], iw_param->private_data,
109 iw_param->private_data_len);
110 } else
111 wr->private_data_length = 0;
112
113 /*
114 * Send WR to adapter. NOTE: There is no synch reply from
115 * the adapter.
116 */
117 err = vq_send_wr(c2dev, (union c2wr *) wr);
118 vq_req_free(c2dev, vq_req);
119
120 bail1:
121 kfree(wr);
122 bail0:
123 if (err) {
124 /*
125 * If we fail, release reference on QP and
126 * disassociate QP from CM_ID
127 */
128 cm_id->provider_data = NULL;
129 qp->cm_id = NULL;
130 cm_id->rem_ref(cm_id);
131 }
132 return err;
133 }
134
135 int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
136 {
137 struct c2_dev *c2dev;
138 struct c2wr_ep_listen_create_req wr;
139 struct c2wr_ep_listen_create_rep *reply;
140 struct c2_vq_req *vq_req;
141 int err;
142 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
143
144 if (cm_id->local_addr.ss_family != AF_INET)
145 return -ENOSYS;
146
147 c2dev = to_c2dev(cm_id->device);
148 if (c2dev == NULL)
149 return -EINVAL;
150
151 /*
152 * Allocate verbs request.
153 */
154 vq_req = vq_req_alloc(c2dev);
155 if (!vq_req)
156 return -ENOMEM;
157
158 /*
159 * Build the WR
160 */
161 c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
162 wr.hdr.context = (u64) (unsigned long) vq_req;
163 wr.rnic_handle = c2dev->adapter_handle;
164 wr.local_addr = laddr->sin_addr.s_addr;
165 wr.local_port = laddr->sin_port;
166 wr.backlog = cpu_to_be32(backlog);
167 wr.user_context = (u64) (unsigned long) cm_id;
168
169 /*
170 * Reference the request struct. Dereferenced in the int handler.
171 */
172 vq_req_get(c2dev, vq_req);
173
174 /*
175 * Send WR to adapter
176 */
177 err = vq_send_wr(c2dev, (union c2wr *) & wr);
178 if (err) {
179 vq_req_put(c2dev, vq_req);
180 goto bail0;
181 }
182
183 /*
184 * Wait for reply from adapter
185 */
186 err = vq_wait_for_reply(c2dev, vq_req);
187 if (err)
188 goto bail0;
189
190 /*
191 * Process reply
192 */
193 reply =
194 (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
195 if (!reply) {
196 err = -ENOMEM;
197 goto bail1;
198 }
199
200 if ((err = c2_errno(reply)) != 0)
201 goto bail1;
202
203 /*
204 * Keep the adapter handle. Used in subsequent destroy
205 */
206 cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;
207
208 /*
209 * free vq stuff
210 */
211 vq_repbuf_free(c2dev, reply);
212 vq_req_free(c2dev, vq_req);
213
214 return 0;
215
216 bail1:
217 vq_repbuf_free(c2dev, reply);
218 bail0:
219 vq_req_free(c2dev, vq_req);
220 return err;
221 }
222
223
224 int c2_llp_service_destroy(struct iw_cm_id *cm_id)
225 {
226
227 struct c2_dev *c2dev;
228 struct c2wr_ep_listen_destroy_req wr;
229 struct c2wr_ep_listen_destroy_rep *reply;
230 struct c2_vq_req *vq_req;
231 int err;
232
233 c2dev = to_c2dev(cm_id->device);
234 if (c2dev == NULL)
235 return -EINVAL;
236
237 /*
238 * Allocate verbs request.
239 */
240 vq_req = vq_req_alloc(c2dev);
241 if (!vq_req)
242 return -ENOMEM;
243
244 /*
245 * Build the WR
246 */
247 c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
248 wr.hdr.context = (unsigned long) vq_req;
249 wr.rnic_handle = c2dev->adapter_handle;
250 wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;
251
252 /*
253 * reference the request struct. dereferenced in the int handler.
254 */
255 vq_req_get(c2dev, vq_req);
256
257 /*
258 * Send WR to adapter
259 */
260 err = vq_send_wr(c2dev, (union c2wr *) & wr);
261 if (err) {
262 vq_req_put(c2dev, vq_req);
263 goto bail0;
264 }
265
266 /*
267 * Wait for reply from adapter
268 */
269 err = vq_wait_for_reply(c2dev, vq_req);
270 if (err)
271 goto bail0;
272
273 /*
274 * Process reply
275 */
276 reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
277 if (!reply) {
278 err = -ENOMEM;
279 goto bail0;
280 }
281
282 vq_repbuf_free(c2dev, reply);
283 bail0:
284 vq_req_free(c2dev, vq_req);
285 return err;
286 }
287
288 int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
289 {
290 struct c2_dev *c2dev = to_c2dev(cm_id->device);
291 struct c2_qp *qp;
292 struct ib_qp *ibqp;
293 struct c2wr_cr_accept_req *wr; /* variable length WR */
294 struct c2_vq_req *vq_req;
295 struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */
296 int err;
297
298 ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
299 if (!ibqp)
300 return -EINVAL;
301 qp = to_c2qp(ibqp);
302
303 /* Set the RDMA read limits */
304 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
305 if (err)
306 goto bail0;
307
308 /* Allocate verbs request. */
309 vq_req = vq_req_alloc(c2dev);
310 if (!vq_req) {
311 err = -ENOMEM;
312 goto bail0;
313 }
314 vq_req->qp = qp;
315 vq_req->cm_id = cm_id;
316 vq_req->event = IW_CM_EVENT_ESTABLISHED;
317
318 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
319 if (!wr) {
320 err = -ENOMEM;
321 goto bail1;
322 }
323
324 /* Build the WR */
325 c2_wr_set_id(wr, CCWR_CR_ACCEPT);
326 wr->hdr.context = (unsigned long) vq_req;
327 wr->rnic_handle = c2dev->adapter_handle;
328 wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
329 wr->qp_handle = qp->adapter_handle;
330
331 /* Replace the cr_handle with the QP after accept */
332 cm_id->provider_data = qp;
333 cm_id->add_ref(cm_id);
334 qp->cm_id = cm_id;
335
336 cm_id->provider_data = qp;
337
338 /* Validate private_data length */
339 if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
340 err = -EINVAL;
341 goto bail1;
342 }
343
344 if (iw_param->private_data) {
345 wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
346 memcpy(&wr->private_data[0],
347 iw_param->private_data, iw_param->private_data_len);
348 } else
349 wr->private_data_length = 0;
350
351 /* Reference the request struct. Dereferenced in the int handler. */
352 vq_req_get(c2dev, vq_req);
353
354 /* Send WR to adapter */
355 err = vq_send_wr(c2dev, (union c2wr *) wr);
356 if (err) {
357 vq_req_put(c2dev, vq_req);
358 goto bail1;
359 }
360
361 /* Wait for reply from adapter */
362 err = vq_wait_for_reply(c2dev, vq_req);
363 if (err)
364 goto bail1;
365
366 /* Check that reply is present */
367 reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
368 if (!reply) {
369 err = -ENOMEM;
370 goto bail1;
371 }
372
373 err = c2_errno(reply);
374 vq_repbuf_free(c2dev, reply);
375
376 if (!err)
377 c2_set_qp_state(qp, C2_QP_STATE_RTS);
378 bail1:
379 kfree(wr);
380 vq_req_free(c2dev, vq_req);
381 bail0:
382 if (err) {
383 /*
384 * If we fail, release reference on QP and
385 * disassociate QP from CM_ID
386 */
387 cm_id->provider_data = NULL;
388 qp->cm_id = NULL;
389 cm_id->rem_ref(cm_id);
390 }
391 return err;
392 }
393
394 int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
395 {
396 struct c2_dev *c2dev;
397 struct c2wr_cr_reject_req wr;
398 struct c2_vq_req *vq_req;
399 struct c2wr_cr_reject_rep *reply;
400 int err;
401
402 c2dev = to_c2dev(cm_id->device);
403
404 /*
405 * Allocate verbs request.
406 */
407 vq_req = vq_req_alloc(c2dev);
408 if (!vq_req)
409 return -ENOMEM;
410
411 /*
412 * Build the WR
413 */
414 c2_wr_set_id(&wr, CCWR_CR_REJECT);
415 wr.hdr.context = (unsigned long) vq_req;
416 wr.rnic_handle = c2dev->adapter_handle;
417 wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;
418
419 /*
420 * reference the request struct. dereferenced in the int handler.
421 */
422 vq_req_get(c2dev, vq_req);
423
424 /*
425 * Send WR to adapter
426 */
427 err = vq_send_wr(c2dev, (union c2wr *) & wr);
428 if (err) {
429 vq_req_put(c2dev, vq_req);
430 goto bail0;
431 }
432
433 /*
434 * Wait for reply from adapter
435 */
436 err = vq_wait_for_reply(c2dev, vq_req);
437 if (err)
438 goto bail0;
439
440 /*
441 * Process reply
442 */
443 reply = (struct c2wr_cr_reject_rep *) (unsigned long)
444 vq_req->reply_msg;
445 if (!reply) {
446 err = -ENOMEM;
447 goto bail0;
448 }
449 err = c2_errno(reply);
450 /*
451 * free vq stuff
452 */
453 vq_repbuf_free(c2dev, reply);
454
455 bail0:
456 vq_req_free(c2dev, vq_req);
457 return err;
458 }