]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/infiniband/hw/cxgb3/iwch_cm.c
ipv4: Make output route lookup return rtable directly.
[mirror_ubuntu-jammy-kernel.git] / drivers / infiniband / hw / cxgb3 / iwch_cm.c
CommitLineData
b038ced7
SW
1/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
b038ced7
SW
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
5a0e3ad6 34#include <linux/slab.h>
b038ced7
SW
35#include <linux/workqueue.h>
36#include <linux/skbuff.h>
37#include <linux/timer.h>
38#include <linux/notifier.h>
8704e9a8 39#include <linux/inetdevice.h>
b038ced7
SW
40
41#include <net/neighbour.h>
42#include <net/netevent.h>
43#include <net/route.h>
44
45#include "tcb.h"
46#include "cxgb3_offload.h"
47#include "iwch.h"
48#include "iwch_provider.h"
49#include "iwch_cm.h"
50
51static char *states[] = {
52 "idle",
53 "listen",
54 "connecting",
55 "mpa_wait_req",
56 "mpa_req_sent",
57 "mpa_req_rcvd",
58 "mpa_rep_sent",
59 "fpdu_mode",
60 "aborting",
61 "closing",
62 "moribund",
63 "dead",
64 NULL,
65};
66
f8b0dfd1
SW
67int peer2peer = 0;
68module_param(peer2peer, int, 0644);
69MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
70
77a8d574 71static int ep_timeout_secs = 60;
e54664c0 72module_param(ep_timeout_secs, int, 0644);
b038ced7 73MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
77a8d574 74 "in seconds (default=60)");
b038ced7
SW
75
76static int mpa_rev = 1;
e54664c0 77module_param(mpa_rev, int, 0644);
b038ced7
SW
78MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
79 "1 is spec compliant. (default=1)");
80
81static int markers_enabled = 0;
e54664c0 82module_param(markers_enabled, int, 0644);
b038ced7
SW
83MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
84
85static int crc_enabled = 1;
e54664c0 86module_param(crc_enabled, int, 0644);
b038ced7
SW
87MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
88
89static int rcv_win = 256 * 1024;
e54664c0 90module_param(rcv_win, int, 0644);
b038ced7
SW
91MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
92
93static int snd_win = 32 * 1024;
e54664c0 94module_param(snd_win, int, 0644);
b038ced7
SW
95MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
96
97static unsigned int nocong = 0;
e54664c0 98module_param(nocong, uint, 0644);
b038ced7
SW
99MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
100
101static unsigned int cong_flavor = 1;
e54664c0 102module_param(cong_flavor, uint, 0644);
b038ced7
SW
103MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
104
b038ced7 105static struct workqueue_struct *workq;
b038ced7
SW
106
107static struct sk_buff_head rxq;
b038ced7
SW
108
109static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
110static void ep_timeout(unsigned long arg);
111static void connect_reply_upcall(struct iwch_ep *ep, int status);
112
113static void start_ep_timer(struct iwch_ep *ep)
114{
33718363 115 PDBG("%s ep %p\n", __func__, ep);
b038ced7 116 if (timer_pending(&ep->timer)) {
33718363 117 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
b038ced7
SW
118 del_timer_sync(&ep->timer);
119 } else
120 get_ep(&ep->com);
121 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
122 ep->timer.data = (unsigned long)ep;
123 ep->timer.function = ep_timeout;
124 add_timer(&ep->timer);
125}
126
127static void stop_ep_timer(struct iwch_ep *ep)
128{
33718363 129 PDBG("%s ep %p\n", __func__, ep);
989a1780
SW
130 if (!timer_pending(&ep->timer)) {
131 printk(KERN_ERR "%s timer stopped when its not running! ep %p state %u\n",
132 __func__, ep, ep->com.state);
133 WARN_ON(1);
134 return;
135 }
b038ced7
SW
136 del_timer_sync(&ep->timer);
137 put_ep(&ep->com);
138}
139
18199f57 140static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
04b5d028
SW
141{
142 int error = 0;
143 struct cxio_rdev *rdev;
144
145 rdev = (struct cxio_rdev *)tdev->ulp;
146 if (cxio_fatal_error(rdev)) {
147 kfree_skb(skb);
148 return -EIO;
149 }
150 error = l2t_send(tdev, skb, l2e);
73a203d2 151 if (error < 0)
04b5d028
SW
152 kfree_skb(skb);
153 return error;
154}
155
156int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
157{
158 int error = 0;
159 struct cxio_rdev *rdev;
160
161 rdev = (struct cxio_rdev *)tdev->ulp;
162 if (cxio_fatal_error(rdev)) {
163 kfree_skb(skb);
164 return -EIO;
165 }
166 error = cxgb3_ofld_send(tdev, skb);
73a203d2 167 if (error < 0)
04b5d028
SW
168 kfree_skb(skb);
169 return error;
170}
171
b038ced7
SW
172static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
173{
174 struct cpl_tid_release *req;
175
176 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
177 if (!skb)
178 return;
179 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
180 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
181 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
182 skb->priority = CPL_PRIORITY_SETUP;
04b5d028 183 iwch_cxgb3_ofld_send(tdev, skb);
b038ced7
SW
184 return;
185}
186
187int iwch_quiesce_tid(struct iwch_ep *ep)
188{
189 struct cpl_set_tcb_field *req;
190 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
191
192 if (!skb)
193 return -ENOMEM;
194 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
195 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
196 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
197 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
198 req->reply = 0;
199 req->cpu_idx = 0;
200 req->word = htons(W_TCB_RX_QUIESCE);
201 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
202 req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
203
204 skb->priority = CPL_PRIORITY_DATA;
04b5d028 205 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
b038ced7
SW
206}
207
208int iwch_resume_tid(struct iwch_ep *ep)
209{
210 struct cpl_set_tcb_field *req;
211 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
212
213 if (!skb)
214 return -ENOMEM;
215 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
216 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
217 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
218 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
219 req->reply = 0;
220 req->cpu_idx = 0;
221 req->word = htons(W_TCB_RX_QUIESCE);
222 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
223 req->val = 0;
224
225 skb->priority = CPL_PRIORITY_DATA;
04b5d028 226 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
b038ced7
SW
227}
228
229static void set_emss(struct iwch_ep *ep, u16 opt)
230{
33718363 231 PDBG("%s ep %p opt %u\n", __func__, ep, opt);
b038ced7
SW
232 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
233 if (G_TCPOPT_TSTAMP(opt))
234 ep->emss -= 12;
235 if (ep->emss < 128)
236 ep->emss = 128;
237 PDBG("emss=%d\n", ep->emss);
238}
239
240static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
241{
242 unsigned long flags;
243 enum iwch_ep_state state;
244
245 spin_lock_irqsave(&epc->lock, flags);
246 state = epc->state;
247 spin_unlock_irqrestore(&epc->lock, flags);
248 return state;
249}
250
2b540355 251static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
b038ced7
SW
252{
253 epc->state = new;
254}
255
256static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
257{
258 unsigned long flags;
259
260 spin_lock_irqsave(&epc->lock, flags);
33718363 261 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
b038ced7
SW
262 __state_set(epc, new);
263 spin_unlock_irqrestore(&epc->lock, flags);
264 return;
265}
266
267static void *alloc_ep(int size, gfp_t gfp)
268{
269 struct iwch_ep_common *epc;
270
dd00cc48 271 epc = kzalloc(size, gfp);
b038ced7 272 if (epc) {
b038ced7
SW
273 kref_init(&epc->kref);
274 spin_lock_init(&epc->lock);
275 init_waitqueue_head(&epc->waitq);
276 }
33718363 277 PDBG("%s alloc ep %p\n", __func__, epc);
b038ced7
SW
278 return epc;
279}
280
281void __free_ep(struct kref *kref)
282{
874d8df5
SW
283 struct iwch_ep *ep;
284 ep = container_of(container_of(kref, struct iwch_ep_common, kref),
285 struct iwch_ep, com);
286 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
6e47fe43 287 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
874d8df5
SW
288 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
289 dst_release(ep->dst);
290 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
291 }
292 kfree(ep);
b038ced7
SW
293}
294
295static void release_ep_resources(struct iwch_ep *ep)
296{
33718363 297 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
6e47fe43 298 set_bit(RELEASE_RESOURCES, &ep->com.flags);
b038ced7
SW
299 put_ep(&ep->com);
300}
301
b038ced7
SW
302static int status2errno(int status)
303{
304 switch (status) {
305 case CPL_ERR_NONE:
306 return 0;
307 case CPL_ERR_CONN_RESET:
308 return -ECONNRESET;
309 case CPL_ERR_ARP_MISS:
310 return -EHOSTUNREACH;
311 case CPL_ERR_CONN_TIMEDOUT:
312 return -ETIMEDOUT;
313 case CPL_ERR_TCAM_FULL:
314 return -ENOMEM;
315 case CPL_ERR_CONN_EXIST:
316 return -EADDRINUSE;
317 default:
318 return -EIO;
319 }
320}
321
322/*
323 * Try and reuse skbs already allocated...
324 */
325static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
326{
1f6a849b 327 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
b038ced7
SW
328 skb_trim(skb, 0);
329 skb_get(skb);
330 } else {
331 skb = alloc_skb(len, gfp);
332 }
333 return skb;
334}
335
336static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
337 __be32 peer_ip, __be16 local_port,
338 __be16 peer_port, u8 tos)
339{
340 struct rtable *rt;
341 struct flowi fl = {
342 .oif = 0,
343 .nl_u = {
344 .ip4_u = {
345 .daddr = peer_ip,
346 .saddr = local_ip,
347 .tos = tos}
348 },
349 .proto = IPPROTO_TCP,
350 .uli_u = {
351 .ports = {
352 .sport = local_port,
353 .dport = peer_port}
354 }
355 };
356
b23dd4fe
DM
357 rt = ip_route_output_flow(&init_net, &fl, NULL);
358 if (IS_ERR(rt))
b038ced7
SW
359 return NULL;
360 return rt;
361}
362
363static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
364{
365 int i = 0;
366
367 while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
368 ++i;
369 return i;
370}
371
372static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
373{
33718363 374 PDBG("%s t3cdev %p\n", __func__, dev);
b038ced7
SW
375 kfree_skb(skb);
376}
377
378/*
379 * Handle an ARP failure for an active open.
380 */
381static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
382{
383 printk(KERN_ERR MOD "ARP failure duing connect\n");
384 kfree_skb(skb);
385}
386
387/*
388 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
389 * and send it along.
390 */
391static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
392{
393 struct cpl_abort_req *req = cplhdr(skb);
394
33718363 395 PDBG("%s t3cdev %p\n", __func__, dev);
b038ced7 396 req->cmd = CPL_ABORT_NO_RST;
04b5d028 397 iwch_cxgb3_ofld_send(dev, skb);
b038ced7
SW
398}
399
400static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
401{
402 struct cpl_close_con_req *req;
403 struct sk_buff *skb;
404
33718363 405 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
406 skb = get_skb(NULL, sizeof(*req), gfp);
407 if (!skb) {
33718363 408 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
b038ced7
SW
409 return -ENOMEM;
410 }
411 skb->priority = CPL_PRIORITY_DATA;
412 set_arp_failure_handler(skb, arp_failure_discard);
413 req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
414 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
415 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
416 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
04b5d028 417 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
b038ced7
SW
418}
419
420static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
421{
422 struct cpl_abort_req *req;
423
33718363 424 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
425 skb = get_skb(skb, sizeof(*req), gfp);
426 if (!skb) {
427 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
33718363 428 __func__);
b038ced7
SW
429 return -ENOMEM;
430 }
431 skb->priority = CPL_PRIORITY_DATA;
432 set_arp_failure_handler(skb, abort_arp_failure);
433 req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
434 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
435 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
436 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
437 req->cmd = CPL_ABORT_SEND_RST;
04b5d028 438 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
b038ced7
SW
439}
440
441static int send_connect(struct iwch_ep *ep)
442{
443 struct cpl_act_open_req *req;
444 struct sk_buff *skb;
445 u32 opt0h, opt0l, opt2;
446 unsigned int mtu_idx;
447 int wscale;
448
33718363 449 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
450
451 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
452 if (!skb) {
453 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
33718363 454 __func__);
b038ced7
SW
455 return -ENOMEM;
456 }
457 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
458 wscale = compute_wscale(rcv_win);
459 opt0h = V_NAGLE(0) |
460 V_NO_CONG(nocong) |
461 V_KEEP_ALIVE(1) |
462 F_TCAM_BYPASS |
463 V_WND_SCALE(wscale) |
464 V_MSS_IDX(mtu_idx) |
465 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
466 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
bec658ff
SW
467 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
468 V_CONG_CONTROL_FLAVOR(cong_flavor);
b038ced7
SW
469 skb->priority = CPL_PRIORITY_SETUP;
470 set_arp_failure_handler(skb, act_open_req_arp_failure);
471
472 req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
473 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
474 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
475 req->local_port = ep->com.local_addr.sin_port;
476 req->peer_port = ep->com.remote_addr.sin_port;
477 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
478 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
479 req->opt0h = htonl(opt0h);
480 req->opt0l = htonl(opt0l);
481 req->params = 0;
482 req->opt2 = htonl(opt2);
04b5d028 483 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
b038ced7
SW
484}
485
486static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
487{
488 int mpalen;
489 struct tx_data_wr *req;
490 struct mpa_message *mpa;
491 int len;
492
33718363 493 PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
b038ced7
SW
494
495 BUG_ON(skb_cloned(skb));
496
497 mpalen = sizeof(*mpa) + ep->plen;
4305b541 498 if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
b038ced7
SW
499 kfree_skb(skb);
500 skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
501 if (!skb) {
502 connect_reply_upcall(ep, -ENOMEM);
503 return;
504 }
505 }
506 skb_trim(skb, 0);
507 skb_reserve(skb, sizeof(*req));
508 skb_put(skb, mpalen);
509 skb->priority = CPL_PRIORITY_DATA;
510 mpa = (struct mpa_message *) skb->data;
511 memset(mpa, 0, sizeof(*mpa));
512 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
513 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
514 (markers_enabled ? MPA_MARKERS : 0);
515 mpa->private_data_size = htons(ep->plen);
516 mpa->revision = mpa_rev;
517
518 if (ep->plen)
519 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
520
521 /*
522 * Reference the mpa skb. This ensures the data area
523 * will remain in memory until the hw acks the tx.
524 * Function tx_ack() will deref it.
525 */
526 skb_get(skb);
527 set_arp_failure_handler(skb, arp_failure_discard);
badff6d0 528 skb_reset_transport_header(skb);
b038ced7
SW
529 len = skb->len;
530 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
f8b0dfd1 531 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
b038ced7
SW
532 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
533 req->len = htonl(len);
534 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
535 V_TX_SNDBUF(snd_win>>15));
de3d3530 536 req->flags = htonl(F_TX_INIT);
b038ced7
SW
537 req->sndseq = htonl(ep->snd_seq);
538 BUG_ON(ep->mpa_skb);
539 ep->mpa_skb = skb;
04b5d028 540 iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
b038ced7
SW
541 start_ep_timer(ep);
542 state_set(&ep->com, MPA_REQ_SENT);
543 return;
544}
545
546static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
547{
548 int mpalen;
549 struct tx_data_wr *req;
550 struct mpa_message *mpa;
551 struct sk_buff *skb;
552
33718363 553 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
b038ced7
SW
554
555 mpalen = sizeof(*mpa) + plen;
556
557 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
558 if (!skb) {
33718363 559 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
b038ced7
SW
560 return -ENOMEM;
561 }
562 skb_reserve(skb, sizeof(*req));
563 mpa = (struct mpa_message *) skb_put(skb, mpalen);
564 memset(mpa, 0, sizeof(*mpa));
565 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
566 mpa->flags = MPA_REJECT;
567 mpa->revision = mpa_rev;
568 mpa->private_data_size = htons(plen);
569 if (plen)
570 memcpy(mpa->private_data, pdata, plen);
571
572 /*
573 * Reference the mpa skb again. This ensures the data area
574 * will remain in memory until the hw acks the tx.
575 * Function tx_ack() will deref it.
576 */
577 skb_get(skb);
578 skb->priority = CPL_PRIORITY_DATA;
579 set_arp_failure_handler(skb, arp_failure_discard);
badff6d0 580 skb_reset_transport_header(skb);
b038ced7 581 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
f8b0dfd1 582 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
b038ced7
SW
583 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
584 req->len = htonl(mpalen);
585 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
586 V_TX_SNDBUF(snd_win>>15));
de3d3530 587 req->flags = htonl(F_TX_INIT);
b038ced7
SW
588 req->sndseq = htonl(ep->snd_seq);
589 BUG_ON(ep->mpa_skb);
590 ep->mpa_skb = skb;
04b5d028 591 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
b038ced7
SW
592}
593
594static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
595{
596 int mpalen;
597 struct tx_data_wr *req;
598 struct mpa_message *mpa;
599 int len;
600 struct sk_buff *skb;
601
33718363 602 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
b038ced7
SW
603
604 mpalen = sizeof(*mpa) + plen;
605
606 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
607 if (!skb) {
33718363 608 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
b038ced7
SW
609 return -ENOMEM;
610 }
611 skb->priority = CPL_PRIORITY_DATA;
612 skb_reserve(skb, sizeof(*req));
613 mpa = (struct mpa_message *) skb_put(skb, mpalen);
614 memset(mpa, 0, sizeof(*mpa));
615 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
616 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
617 (markers_enabled ? MPA_MARKERS : 0);
618 mpa->revision = mpa_rev;
619 mpa->private_data_size = htons(plen);
620 if (plen)
621 memcpy(mpa->private_data, pdata, plen);
622
623 /*
624 * Reference the mpa skb. This ensures the data area
625 * will remain in memory until the hw acks the tx.
626 * Function tx_ack() will deref it.
627 */
628 skb_get(skb);
629 set_arp_failure_handler(skb, arp_failure_discard);
badff6d0 630 skb_reset_transport_header(skb);
b038ced7
SW
631 len = skb->len;
632 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
f8b0dfd1 633 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
b038ced7
SW
634 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
635 req->len = htonl(len);
636 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
637 V_TX_SNDBUF(snd_win>>15));
de3d3530 638 req->flags = htonl(F_TX_INIT);
b038ced7
SW
639 req->sndseq = htonl(ep->snd_seq);
640 ep->mpa_skb = skb;
641 state_set(&ep->com, MPA_REP_SENT);
04b5d028 642 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
b038ced7
SW
643}
644
645static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
646{
647 struct iwch_ep *ep = ctx;
648 struct cpl_act_establish *req = cplhdr(skb);
649 unsigned int tid = GET_TID(req);
650
33718363 651 PDBG("%s ep %p tid %d\n", __func__, ep, tid);
b038ced7
SW
652
653 dst_confirm(ep->dst);
654
655 /* setup the hwtid for this connection */
656 ep->hwtid = tid;
657 cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
658
659 ep->snd_seq = ntohl(req->snd_isn);
de3d3530 660 ep->rcv_seq = ntohl(req->rcv_isn);
b038ced7
SW
661
662 set_emss(ep, ntohs(req->tcp_opt));
663
664 /* dealloc the atid */
665 cxgb3_free_atid(ep->com.tdev, ep->atid);
666
667 /* start MPA negotiation */
668 send_mpa_req(ep, skb);
669
670 return 0;
671}
672
673static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
674{
675 PDBG("%s ep %p\n", __FILE__, ep);
676 state_set(&ep->com, ABORTING);
677 send_abort(ep, skb, gfp);
678}
679
680static void close_complete_upcall(struct iwch_ep *ep)
681{
682 struct iw_cm_event event;
683
33718363 684 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
685 memset(&event, 0, sizeof(event));
686 event.event = IW_CM_EVENT_CLOSE;
687 if (ep->com.cm_id) {
688 PDBG("close complete delivered ep %p cm_id %p tid %d\n",
689 ep, ep->com.cm_id, ep->hwtid);
690 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
691 ep->com.cm_id->rem_ref(ep->com.cm_id);
692 ep->com.cm_id = NULL;
693 ep->com.qp = NULL;
694 }
695}
696
697static void peer_close_upcall(struct iwch_ep *ep)
698{
699 struct iw_cm_event event;
700
33718363 701 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
702 memset(&event, 0, sizeof(event));
703 event.event = IW_CM_EVENT_DISCONNECT;
704 if (ep->com.cm_id) {
705 PDBG("peer close delivered ep %p cm_id %p tid %d\n",
706 ep, ep->com.cm_id, ep->hwtid);
707 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
708 }
709}
710
711static void peer_abort_upcall(struct iwch_ep *ep)
712{
713 struct iw_cm_event event;
714
33718363 715 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
716 memset(&event, 0, sizeof(event));
717 event.event = IW_CM_EVENT_CLOSE;
718 event.status = -ECONNRESET;
719 if (ep->com.cm_id) {
720 PDBG("abort delivered ep %p cm_id %p tid %d\n", ep,
721 ep->com.cm_id, ep->hwtid);
722 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
723 ep->com.cm_id->rem_ref(ep->com.cm_id);
724 ep->com.cm_id = NULL;
725 ep->com.qp = NULL;
726 }
727}
728
729static void connect_reply_upcall(struct iwch_ep *ep, int status)
730{
731 struct iw_cm_event event;
732
33718363 733 PDBG("%s ep %p status %d\n", __func__, ep, status);
b038ced7
SW
734 memset(&event, 0, sizeof(event));
735 event.event = IW_CM_EVENT_CONNECT_REPLY;
736 event.status = status;
737 event.local_addr = ep->com.local_addr;
738 event.remote_addr = ep->com.remote_addr;
739
740 if ((status == 0) || (status == -ECONNREFUSED)) {
741 event.private_data_len = ep->plen;
742 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
743 }
744 if (ep->com.cm_id) {
33718363 745 PDBG("%s ep %p tid %d status %d\n", __func__, ep,
b038ced7
SW
746 ep->hwtid, status);
747 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
748 }
749 if (status < 0) {
750 ep->com.cm_id->rem_ref(ep->com.cm_id);
751 ep->com.cm_id = NULL;
752 ep->com.qp = NULL;
753 }
754}
755
756static void connect_request_upcall(struct iwch_ep *ep)
757{
758 struct iw_cm_event event;
759
33718363 760 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
b038ced7
SW
761 memset(&event, 0, sizeof(event));
762 event.event = IW_CM_EVENT_CONNECT_REQUEST;
763 event.local_addr = ep->com.local_addr;
764 event.remote_addr = ep->com.remote_addr;
765 event.private_data_len = ep->plen;
766 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
767 event.provider_data = ep;
6e47fe43
SW
768 if (state_read(&ep->parent_ep->com) != DEAD) {
769 get_ep(&ep->com);
b038ced7
SW
770 ep->parent_ep->com.cm_id->event_handler(
771 ep->parent_ep->com.cm_id,
772 &event);
6e47fe43 773 }
b038ced7
SW
774 put_ep(&ep->parent_ep->com);
775 ep->parent_ep = NULL;
776}
777
778static void established_upcall(struct iwch_ep *ep)
779{
780 struct iw_cm_event event;
781
33718363 782 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
783 memset(&event, 0, sizeof(event));
784 event.event = IW_CM_EVENT_ESTABLISHED;
785 if (ep->com.cm_id) {
33718363 786 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
b038ced7
SW
787 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
788 }
789}
790
791static int update_rx_credits(struct iwch_ep *ep, u32 credits)
792{
793 struct cpl_rx_data_ack *req;
794 struct sk_buff *skb;
795
33718363 796 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
b038ced7
SW
797 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
798 if (!skb) {
799 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
800 return 0;
801 }
802
803 req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
804 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
805 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
806 req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
807 skb->priority = CPL_PRIORITY_ACK;
04b5d028 808 iwch_cxgb3_ofld_send(ep->com.tdev, skb);
b038ced7
SW
809 return credits;
810}
811
812static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
813{
814 struct mpa_message *mpa;
815 u16 plen;
816 struct iwch_qp_attributes attrs;
817 enum iwch_qp_attr_mask mask;
818 int err;
819
33718363 820 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
821
822 /*
823 * Stop mpa timer. If it expired, then the state has
824 * changed and we bail since ep_timeout already aborted
825 * the connection.
826 */
827 stop_ep_timer(ep);
828 if (state_read(&ep->com) != MPA_REQ_SENT)
829 return;
830
831 /*
832 * If we get more than the supported amount of private data
833 * then we must fail this connection.
834 */
835 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
836 err = -EINVAL;
837 goto err;
838 }
839
840 /*
841 * copy the new data into our accumulation buffer.
842 */
d626f62b
ACM
843 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
844 skb->len);
b038ced7
SW
845 ep->mpa_pkt_len += skb->len;
846
847 /*
848 * if we don't even have the mpa message, then bail.
849 */
850 if (ep->mpa_pkt_len < sizeof(*mpa))
851 return;
852 mpa = (struct mpa_message *) ep->mpa_pkt;
853
854 /* Validate MPA header. */
855 if (mpa->revision != mpa_rev) {
856 err = -EPROTO;
857 goto err;
858 }
859 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
860 err = -EPROTO;
861 goto err;
862 }
863
864 plen = ntohs(mpa->private_data_size);
865
866 /*
867 * Fail if there's too much private data.
868 */
869 if (plen > MPA_MAX_PRIVATE_DATA) {
870 err = -EPROTO;
871 goto err;
872 }
873
874 /*
875 * If plen does not account for pkt size
876 */
877 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
878 err = -EPROTO;
879 goto err;
880 }
881
882 ep->plen = (u8) plen;
883
884 /*
885 * If we don't have all the pdata yet, then bail.
886 * We'll continue process when more data arrives.
887 */
888 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
889 return;
890
891 if (mpa->flags & MPA_REJECT) {
892 err = -ECONNREFUSED;
893 goto err;
894 }
895
896 /*
897 * If we get here we have accumulated the entire mpa
898 * start reply message including private data. And
899 * the MPA header is valid.
900 */
901 state_set(&ep->com, FPDU_MODE);
f8b0dfd1 902 ep->mpa_attr.initiator = 1;
b038ced7
SW
903 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
904 ep->mpa_attr.recv_marker_enabled = markers_enabled;
905 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
906 ep->mpa_attr.version = mpa_rev;
907 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
33718363 908 "xmit_marker_enabled=%d, version=%d\n", __func__,
b038ced7
SW
909 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
910 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
911
912 attrs.mpa_attr = ep->mpa_attr;
913 attrs.max_ird = ep->ird;
914 attrs.max_ord = ep->ord;
915 attrs.llp_stream_handle = ep;
916 attrs.next_state = IWCH_QP_STATE_RTS;
917
918 mask = IWCH_QP_ATTR_NEXT_STATE |
919 IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
920 IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
921
922 /* bind QP and TID with INIT_WR */
923 err = iwch_modify_qp(ep->com.qp->rhp,
924 ep->com.qp, mask, &attrs, 1);
f8b0dfd1
SW
925 if (err)
926 goto err;
927
928 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
929 iwch_post_zb_read(ep->com.qp);
930 }
931
932 goto out;
b038ced7
SW
933err:
934 abort_connection(ep, skb, GFP_KERNEL);
935out:
936 connect_reply_upcall(ep, err);
937 return;
938}
939
940static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
941{
942 struct mpa_message *mpa;
943 u16 plen;
944
33718363 945 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
946
947 /*
948 * Stop mpa timer. If it expired, then the state has
949 * changed and we bail since ep_timeout already aborted
950 * the connection.
951 */
952 stop_ep_timer(ep);
953 if (state_read(&ep->com) != MPA_REQ_WAIT)
954 return;
955
956 /*
957 * If we get more than the supported amount of private data
958 * then we must fail this connection.
959 */
960 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
961 abort_connection(ep, skb, GFP_KERNEL);
962 return;
963 }
964
33718363 965 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
b038ced7
SW
966
967 /*
968 * Copy the new data into our accumulation buffer.
969 */
d626f62b
ACM
970 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
971 skb->len);
b038ced7
SW
972 ep->mpa_pkt_len += skb->len;
973
974 /*
975 * If we don't even have the mpa message, then bail.
976 * We'll continue process when more data arrives.
977 */
978 if (ep->mpa_pkt_len < sizeof(*mpa))
979 return;
33718363 980 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
b038ced7
SW
981 mpa = (struct mpa_message *) ep->mpa_pkt;
982
983 /*
984 * Validate MPA Header.
985 */
986 if (mpa->revision != mpa_rev) {
987 abort_connection(ep, skb, GFP_KERNEL);
988 return;
989 }
990
991 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
992 abort_connection(ep, skb, GFP_KERNEL);
993 return;
994 }
995
996 plen = ntohs(mpa->private_data_size);
997
998 /*
999 * Fail if there's too much private data.
1000 */
1001 if (plen > MPA_MAX_PRIVATE_DATA) {
1002 abort_connection(ep, skb, GFP_KERNEL);
1003 return;
1004 }
1005
1006 /*
1007 * If plen does not account for pkt size
1008 */
1009 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1010 abort_connection(ep, skb, GFP_KERNEL);
1011 return;
1012 }
1013 ep->plen = (u8) plen;
1014
1015 /*
1016 * If we don't have all the pdata yet, then bail.
1017 */
1018 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1019 return;
1020
1021 /*
1022 * If we get here we have accumulated the entire mpa
1023 * start reply message including private data.
1024 */
f8b0dfd1 1025 ep->mpa_attr.initiator = 0;
b038ced7
SW
1026 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1027 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1028 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1029 ep->mpa_attr.version = mpa_rev;
1030 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
33718363 1031 "xmit_marker_enabled=%d, version=%d\n", __func__,
b038ced7
SW
1032 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1033 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1034
1035 state_set(&ep->com, MPA_REQ_RCVD);
1036
1037 /* drive upcall */
1038 connect_request_upcall(ep);
1039 return;
1040}
1041
1042static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1043{
1044 struct iwch_ep *ep = ctx;
1045 struct cpl_rx_data *hdr = cplhdr(skb);
1046 unsigned int dlen = ntohs(hdr->len);
1047
33718363 1048 PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);
b038ced7
SW
1049
1050 skb_pull(skb, sizeof(*hdr));
1051 skb_trim(skb, dlen);
1052
de3d3530
SW
1053 ep->rcv_seq += dlen;
1054 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1055
b038ced7
SW
1056 switch (state_read(&ep->com)) {
1057 case MPA_REQ_SENT:
1058 process_mpa_reply(ep, skb);
1059 break;
1060 case MPA_REQ_WAIT:
1061 process_mpa_request(ep, skb);
1062 break;
1063 case MPA_REP_SENT:
1064 break;
1065 default:
1066 printk(KERN_ERR MOD "%s Unexpected streaming data."
1067 " ep %p state %d tid %d\n",
33718363 1068 __func__, ep, state_read(&ep->com), ep->hwtid);
b038ced7
SW
1069
1070 /*
1071 * The ep will timeout and inform the ULP of the failure.
1072 * See ep_timeout().
1073 */
1074 break;
1075 }
1076
1077 /* update RX credits */
1078 update_rx_credits(ep, dlen);
1079
1080 return CPL_RET_BUF_DONE;
1081}
1082
1083/*
1084 * Upcall from the adapter indicating data has been transmitted.
1085 * For us its just the single MPA request or reply. We can now free
1086 * the skb holding the mpa message.
1087 */
1088static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1089{
1090 struct iwch_ep *ep = ctx;
1091 struct cpl_wr_ack *hdr = cplhdr(skb);
1092 unsigned int credits = ntohs(hdr->credits);
b038ced7 1093
33718363 1094 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
b038ced7 1095
f8b0dfd1 1096 if (credits == 0) {
ca7cf94f
JP
1097 PDBG("%s 0 credit ack ep %p state %u\n",
1098 __func__, ep, state_read(&ep->com));
b038ced7 1099 return CPL_RET_BUF_DONE;
f8b0dfd1
SW
1100 }
1101
b038ced7 1102 BUG_ON(credits != 1);
b038ced7 1103 dst_confirm(ep->dst);
f8b0dfd1
SW
1104 if (!ep->mpa_skb) {
1105 PDBG("%s rdma_init wr_ack ep %p state %u\n",
1106 __func__, ep, state_read(&ep->com));
1107 if (ep->mpa_attr.initiator) {
1108 PDBG("%s initiator ep %p state %u\n",
1109 __func__, ep, state_read(&ep->com));
1110 if (peer2peer)
1111 iwch_post_zb_read(ep->com.qp);
1112 } else {
1113 PDBG("%s responder ep %p state %u\n",
1114 __func__, ep, state_read(&ep->com));
1115 ep->com.rpl_done = 1;
1116 wake_up(&ep->com.waitq);
1117 }
1118 } else {
1119 PDBG("%s lsm ack ep %p state %u freeing skb\n",
1120 __func__, ep, state_read(&ep->com));
1121 kfree_skb(ep->mpa_skb);
1122 ep->mpa_skb = NULL;
b038ced7
SW
1123 }
1124 return CPL_RET_BUF_DONE;
1125}
1126
1127static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1128{
1129 struct iwch_ep *ep = ctx;
989a1780
SW
1130 unsigned long flags;
1131 int release = 0;
b038ced7 1132
33718363 1133 PDBG("%s ep %p\n", __func__, ep);
989a1780 1134 BUG_ON(!ep);
b038ced7 1135
aff9e39d
SW
1136 /*
1137 * We get 2 abort replies from the HW. The first one must
1138 * be ignored except for scribbling that we need one more.
1139 */
6e47fe43 1140 if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
aff9e39d
SW
1141 return CPL_RET_BUF_DONE;
1142 }
1143
989a1780
SW
1144 spin_lock_irqsave(&ep->com.lock, flags);
1145 switch (ep->com.state) {
1146 case ABORTING:
1147 close_complete_upcall(ep);
1148 __state_set(&ep->com, DEAD);
1149 release = 1;
1150 break;
1151 default:
1152 printk(KERN_ERR "%s ep %p state %d\n",
1153 __func__, ep, ep->com.state);
1154 break;
1155 }
1156 spin_unlock_irqrestore(&ep->com.lock, flags);
1157
1158 if (release)
1159 release_ep_resources(ep);
b038ced7
SW
1160 return CPL_RET_BUF_DONE;
1161}
1162
96d0e493
SW
1163/*
1164 * Return whether a failed active open has allocated a TID
1165 */
1166static inline int act_open_has_tid(int status)
1167{
1168 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1169 status != CPL_ERR_ARP_MISS;
1170}
1171
b038ced7
SW
1172static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1173{
1174 struct iwch_ep *ep = ctx;
1175 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1176
33718363 1177 PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
b038ced7
SW
1178 status2errno(rpl->status));
1179 connect_reply_upcall(ep, status2errno(rpl->status));
1180 state_set(&ep->com, DEAD);
8176d297 1181 if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
b038ced7
SW
1182 release_tid(ep->com.tdev, GET_TID(rpl), NULL);
1183 cxgb3_free_atid(ep->com.tdev, ep->atid);
1184 dst_release(ep->dst);
1185 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
1186 put_ep(&ep->com);
1187 return CPL_RET_BUF_DONE;
1188}
1189
1190static int listen_start(struct iwch_listen_ep *ep)
1191{
1192 struct sk_buff *skb;
1193 struct cpl_pass_open_req *req;
1194
33718363 1195 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
1196 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1197 if (!skb) {
1198 printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
1199 return -ENOMEM;
1200 }
1201
1202 req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
1203 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1204 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
1205 req->local_port = ep->com.local_addr.sin_port;
1206 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
1207 req->peer_port = 0;
1208 req->peer_ip = 0;
1209 req->peer_netmask = 0;
1210 req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
1211 req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
1212 req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
1213
1214 skb->priority = 1;
04b5d028 1215 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
b038ced7
SW
1216}
1217
1218static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1219{
1220 struct iwch_listen_ep *ep = ctx;
1221 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1222
33718363 1223 PDBG("%s ep %p status %d error %d\n", __func__, ep,
b038ced7
SW
1224 rpl->status, status2errno(rpl->status));
1225 ep->com.rpl_err = status2errno(rpl->status);
1226 ep->com.rpl_done = 1;
1227 wake_up(&ep->com.waitq);
1228
1229 return CPL_RET_BUF_DONE;
1230}
1231
1232static int listen_stop(struct iwch_listen_ep *ep)
1233{
1234 struct sk_buff *skb;
1235 struct cpl_close_listserv_req *req;
1236
33718363 1237 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
1238 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1239 if (!skb) {
33718363 1240 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
b038ced7
SW
1241 return -ENOMEM;
1242 }
1243 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
1244 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
60be4b59 1245 req->cpu_idx = 0;
b038ced7
SW
1246 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
1247 skb->priority = 1;
04b5d028 1248 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
b038ced7
SW
1249}
1250
1251static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
1252 void *ctx)
1253{
1254 struct iwch_listen_ep *ep = ctx;
1255 struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
1256
33718363 1257 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
1258 ep->com.rpl_err = status2errno(rpl->status);
1259 ep->com.rpl_done = 1;
1260 wake_up(&ep->com.waitq);
1261 return CPL_RET_BUF_DONE;
1262}
1263
1264static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1265{
1266 struct cpl_pass_accept_rpl *rpl;
1267 unsigned int mtu_idx;
1268 u32 opt0h, opt0l, opt2;
1269 int wscale;
1270
33718363 1271 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
1272 BUG_ON(skb_cloned(skb));
1273 skb_trim(skb, sizeof(*rpl));
1274 skb_get(skb);
1275 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
1276 wscale = compute_wscale(rcv_win);
1277 opt0h = V_NAGLE(0) |
1278 V_NO_CONG(nocong) |
1279 V_KEEP_ALIVE(1) |
1280 F_TCAM_BYPASS |
1281 V_WND_SCALE(wscale) |
1282 V_MSS_IDX(mtu_idx) |
1283 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1284 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
bec658ff
SW
1285 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
1286 V_CONG_CONTROL_FLAVOR(cong_flavor);
b038ced7
SW
1287
1288 rpl = cplhdr(skb);
1289 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1290 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
1291 rpl->peer_ip = peer_ip;
1292 rpl->opt0h = htonl(opt0h);
1293 rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
1294 rpl->opt2 = htonl(opt2);
1295 rpl->rsvd = rpl->opt2; /* workaround for HW bug */
1296 skb->priority = CPL_PRIORITY_SETUP;
04b5d028 1297 iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
b038ced7
SW
1298
1299 return;
1300}
1301
1302static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1303 struct sk_buff *skb)
1304{
33718363 1305 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
b038ced7
SW
1306 peer_ip);
1307 BUG_ON(skb_cloned(skb));
1308 skb_trim(skb, sizeof(struct cpl_tid_release));
1309 skb_get(skb);
1310
8176d297 1311 if (tdev->type != T3A)
b038ced7
SW
1312 release_tid(tdev, hwtid, skb);
1313 else {
1314 struct cpl_pass_accept_rpl *rpl;
1315
1316 rpl = cplhdr(skb);
1317 skb->priority = CPL_PRIORITY_SETUP;
1318 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1319 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1320 hwtid));
1321 rpl->peer_ip = peer_ip;
1322 rpl->opt0h = htonl(F_TCAM_BYPASS);
1323 rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
1324 rpl->opt2 = 0;
1325 rpl->rsvd = rpl->opt2;
04b5d028 1326 iwch_cxgb3_ofld_send(tdev, skb);
b038ced7
SW
1327 }
1328}
1329
1330static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1331{
1332 struct iwch_ep *child_ep, *parent_ep = ctx;
1333 struct cpl_pass_accept_req *req = cplhdr(skb);
1334 unsigned int hwtid = GET_TID(req);
1335 struct dst_entry *dst;
1336 struct l2t_entry *l2t;
1337 struct rtable *rt;
1338 struct iff_mac tim;
1339
33718363 1340 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
b038ced7
SW
1341
1342 if (state_read(&parent_ep->com) != LISTEN) {
1343 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
33718363 1344 __func__);
b038ced7
SW
1345 goto reject;
1346 }
1347
1348 /*
1349 * Find the netdev for this connection request.
1350 */
1351 tim.mac_addr = req->dst_mac;
1352 tim.vlan_tag = ntohs(req->vlan_tag);
1353 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
eacc4d6a
HS
1354 printk(KERN_ERR "%s bad dst mac %pM\n",
1355 __func__, req->dst_mac);
b038ced7
SW
1356 goto reject;
1357 }
1358
1359 /* Find output route */
1360 rt = find_route(tdev,
1361 req->local_ip,
1362 req->peer_ip,
1363 req->local_port,
1364 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
1365 if (!rt) {
1366 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
33718363 1367 __func__);
b038ced7
SW
1368 goto reject;
1369 }
d8d1f30b 1370 dst = &rt->dst;
b038ced7
SW
1371 l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
1372 if (!l2t) {
1373 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
33718363 1374 __func__);
b038ced7
SW
1375 dst_release(dst);
1376 goto reject;
1377 }
1378 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1379 if (!child_ep) {
1380 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
33718363 1381 __func__);
b038ced7
SW
1382 l2t_release(L2DATA(tdev), l2t);
1383 dst_release(dst);
1384 goto reject;
1385 }
1386 state_set(&child_ep->com, CONNECTING);
1387 child_ep->com.tdev = tdev;
1388 child_ep->com.cm_id = NULL;
1389 child_ep->com.local_addr.sin_family = PF_INET;
1390 child_ep->com.local_addr.sin_port = req->local_port;
1391 child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
1392 child_ep->com.remote_addr.sin_family = PF_INET;
1393 child_ep->com.remote_addr.sin_port = req->peer_port;
1394 child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
1395 get_ep(&parent_ep->com);
1396 child_ep->parent_ep = parent_ep;
1397 child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
1398 child_ep->l2t = l2t;
1399 child_ep->dst = dst;
1400 child_ep->hwtid = hwtid;
1401 init_timer(&child_ep->timer);
1402 cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
1403 accept_cr(child_ep, req->peer_ip, skb);
1404 goto out;
1405reject:
1406 reject_cr(tdev, hwtid, req->peer_ip, skb);
1407out:
1408 return CPL_RET_BUF_DONE;
1409}
1410
1411static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1412{
1413 struct iwch_ep *ep = ctx;
1414 struct cpl_pass_establish *req = cplhdr(skb);
1415
33718363 1416 PDBG("%s ep %p\n", __func__, ep);
b038ced7 1417 ep->snd_seq = ntohl(req->snd_isn);
de3d3530 1418 ep->rcv_seq = ntohl(req->rcv_isn);
b038ced7
SW
1419
1420 set_emss(ep, ntohs(req->tcp_opt));
1421
1422 dst_confirm(ep->dst);
1423 state_set(&ep->com, MPA_REQ_WAIT);
1424 start_ep_timer(ep);
1425
1426 return CPL_RET_BUF_DONE;
1427}
1428
1429static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1430{
1431 struct iwch_ep *ep = ctx;
1432 struct iwch_qp_attributes attrs;
1433 unsigned long flags;
1434 int disconnect = 1;
1435 int release = 0;
1436
33718363 1437 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
1438 dst_confirm(ep->dst);
1439
1440 spin_lock_irqsave(&ep->com.lock, flags);
1441 switch (ep->com.state) {
1442 case MPA_REQ_WAIT:
1443 __state_set(&ep->com, CLOSING);
1444 break;
1445 case MPA_REQ_SENT:
1446 __state_set(&ep->com, CLOSING);
1447 connect_reply_upcall(ep, -ECONNRESET);
1448 break;
1449 case MPA_REQ_RCVD:
1450
1451 /*
1452 * We're gonna mark this puppy DEAD, but keep
1453 * the reference on it until the ULP accepts or
a52bf98d
SW
1454 * rejects the CR. Also wake up anyone waiting
1455 * in rdma connection migration (see iwch_accept_cr()).
b038ced7
SW
1456 */
1457 __state_set(&ep->com, CLOSING);
a52bf98d
SW
1458 ep->com.rpl_done = 1;
1459 ep->com.rpl_err = -ECONNRESET;
1460 PDBG("waking up ep %p\n", ep);
1461 wake_up(&ep->com.waitq);
b038ced7
SW
1462 break;
1463 case MPA_REP_SENT:
1464 __state_set(&ep->com, CLOSING);
1465 ep->com.rpl_done = 1;
1466 ep->com.rpl_err = -ECONNRESET;
1467 PDBG("waking up ep %p\n", ep);
1468 wake_up(&ep->com.waitq);
1469 break;
1470 case FPDU_MODE:
42e31753 1471 start_ep_timer(ep);
b038ced7
SW
1472 __state_set(&ep->com, CLOSING);
1473 attrs.next_state = IWCH_QP_STATE_CLOSING;
1474 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1475 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1476 peer_close_upcall(ep);
1477 break;
1478 case ABORTING:
1479 disconnect = 0;
1480 break;
1481 case CLOSING:
b038ced7
SW
1482 __state_set(&ep->com, MORIBUND);
1483 disconnect = 0;
1484 break;
1485 case MORIBUND:
1486 stop_ep_timer(ep);
1487 if (ep->com.cm_id && ep->com.qp) {
1488 attrs.next_state = IWCH_QP_STATE_IDLE;
1489 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1490 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1491 }
1492 close_complete_upcall(ep);
1493 __state_set(&ep->com, DEAD);
1494 release = 1;
1495 disconnect = 0;
1496 break;
1497 case DEAD:
1498 disconnect = 0;
1499 break;
1500 default:
1501 BUG_ON(1);
1502 }
1503 spin_unlock_irqrestore(&ep->com.lock, flags);
1504 if (disconnect)
1505 iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1506 if (release)
1507 release_ep_resources(ep);
1508 return CPL_RET_BUF_DONE;
1509}
1510
1511/*
1512 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1513 */
2b540355 1514static int is_neg_adv_abort(unsigned int status)
b038ced7
SW
1515{
1516 return status == CPL_ERR_RTX_NEG_ADVICE ||
1517 status == CPL_ERR_PERSIST_NEG_ADVICE;
1518}
1519
1520static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1521{
1522 struct cpl_abort_req_rss *req = cplhdr(skb);
1523 struct iwch_ep *ep = ctx;
1524 struct cpl_abort_rpl *rpl;
1525 struct sk_buff *rpl_skb;
1526 struct iwch_qp_attributes attrs;
1527 int ret;
989a1780
SW
1528 int release = 0;
1529 unsigned long flags;
b038ced7 1530
1580367e 1531 if (is_neg_adv_abort(req->status)) {
33718363 1532 PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
1580367e
SW
1533 ep->hwtid);
1534 t3_l2t_send_event(ep->com.tdev, ep->l2t);
1535 return CPL_RET_BUF_DONE;
1536 }
1537
aff9e39d
SW
1538 /*
1539 * We get 2 peer aborts from the HW. The first one must
1540 * be ignored except for scribbling that we need one more.
1541 */
6e47fe43 1542 if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
aff9e39d
SW
1543 return CPL_RET_BUF_DONE;
1544 }
1545
989a1780
SW
1546 spin_lock_irqsave(&ep->com.lock, flags);
1547 PDBG("%s ep %p state %u\n", __func__, ep, ep->com.state);
1548 switch (ep->com.state) {
b038ced7
SW
1549 case CONNECTING:
1550 break;
1551 case MPA_REQ_WAIT:
adf376b3 1552 stop_ep_timer(ep);
b038ced7
SW
1553 break;
1554 case MPA_REQ_SENT:
adf376b3 1555 stop_ep_timer(ep);
b038ced7
SW
1556 connect_reply_upcall(ep, -ECONNRESET);
1557 break;
1558 case MPA_REP_SENT:
1559 ep->com.rpl_done = 1;
1560 ep->com.rpl_err = -ECONNRESET;
1561 PDBG("waking up ep %p\n", ep);
1562 wake_up(&ep->com.waitq);
1563 break;
1564 case MPA_REQ_RCVD:
1565
1566 /*
1567 * We're gonna mark this puppy DEAD, but keep
1568 * the reference on it until the ULP accepts or
a52bf98d
SW
1569 * rejects the CR. Also wake up anyone waiting
1570 * in rdma connection migration (see iwch_accept_cr()).
b038ced7 1571 */
a52bf98d
SW
1572 ep->com.rpl_done = 1;
1573 ep->com.rpl_err = -ECONNRESET;
1574 PDBG("waking up ep %p\n", ep);
1575 wake_up(&ep->com.waitq);
b038ced7
SW
1576 break;
1577 case MORIBUND:
42e31753 1578 case CLOSING:
b038ced7 1579 stop_ep_timer(ep);
42e31753 1580 /*FALLTHROUGH*/
b038ced7 1581 case FPDU_MODE:
b038ced7
SW
1582 if (ep->com.cm_id && ep->com.qp) {
1583 attrs.next_state = IWCH_QP_STATE_ERROR;
1584 ret = iwch_modify_qp(ep->com.qp->rhp,
1585 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1586 &attrs, 1);
1587 if (ret)
1588 printk(KERN_ERR MOD
1589 "%s - qp <- error failed!\n",
33718363 1590 __func__);
b038ced7
SW
1591 }
1592 peer_abort_upcall(ep);
1593 break;
1594 case ABORTING:
1595 break;
1596 case DEAD:
33718363 1597 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
989a1780 1598 spin_unlock_irqrestore(&ep->com.lock, flags);
b038ced7
SW
1599 return CPL_RET_BUF_DONE;
1600 default:
1601 BUG_ON(1);
1602 break;
1603 }
1604 dst_confirm(ep->dst);
989a1780
SW
1605 if (ep->com.state != ABORTING) {
1606 __state_set(&ep->com, DEAD);
1607 release = 1;
1608 }
1609 spin_unlock_irqrestore(&ep->com.lock, flags);
b038ced7
SW
1610
1611 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1612 if (!rpl_skb) {
1613 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
33718363 1614 __func__);
989a1780
SW
1615 release = 1;
1616 goto out;
b038ced7
SW
1617 }
1618 rpl_skb->priority = CPL_PRIORITY_DATA;
1619 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1620 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
1621 rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
1622 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1623 rpl->cmd = CPL_ABORT_NO_RST;
04b5d028 1624 iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
989a1780
SW
1625out:
1626 if (release)
b038ced7 1627 release_ep_resources(ep);
b038ced7
SW
1628 return CPL_RET_BUF_DONE;
1629}
1630
1631static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1632{
1633 struct iwch_ep *ep = ctx;
1634 struct iwch_qp_attributes attrs;
1635 unsigned long flags;
1636 int release = 0;
1637
33718363 1638 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
1639 BUG_ON(!ep);
1640
1641 /* The cm_id may be null if we failed to connect */
1642 spin_lock_irqsave(&ep->com.lock, flags);
1643 switch (ep->com.state) {
1644 case CLOSING:
b038ced7
SW
1645 __state_set(&ep->com, MORIBUND);
1646 break;
1647 case MORIBUND:
1648 stop_ep_timer(ep);
1649 if ((ep->com.cm_id) && (ep->com.qp)) {
1650 attrs.next_state = IWCH_QP_STATE_IDLE;
1651 iwch_modify_qp(ep->com.qp->rhp,
1652 ep->com.qp,
1653 IWCH_QP_ATTR_NEXT_STATE,
1654 &attrs, 1);
1655 }
1656 close_complete_upcall(ep);
1657 __state_set(&ep->com, DEAD);
1658 release = 1;
1659 break;
42e31753 1660 case ABORTING:
b038ced7 1661 case DEAD:
c4d49776 1662 break;
b038ced7
SW
1663 default:
1664 BUG_ON(1);
1665 break;
1666 }
1667 spin_unlock_irqrestore(&ep->com.lock, flags);
1668 if (release)
1669 release_ep_resources(ep);
1670 return CPL_RET_BUF_DONE;
1671}
1672
1673/*
1674 * T3A does 3 things when a TERM is received:
1675 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
1676 * 2) generate an async event on the QP with the TERMINATE opcode
1677 * 3) post a TERMINATE opcde cqe into the associated CQ.
1678 *
1679 * For (1), we save the message in the qp for later consumer consumption.
1680 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
1681 * For (3), we toss the CQE in cxio_poll_cq().
1682 *
1683 * terminate() handles case (1)...
1684 */
1685static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1686{
1687 struct iwch_ep *ep = ctx;
1688
42fb61f0
SW
1689 if (state_read(&ep->com) != FPDU_MODE)
1690 return CPL_RET_BUF_DONE;
1691
33718363 1692 PDBG("%s ep %p\n", __func__, ep);
b038ced7 1693 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
33718363 1694 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
d626f62b
ACM
1695 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1696 skb->len);
b038ced7
SW
1697 ep->com.qp->attr.terminate_msg_len = skb->len;
1698 ep->com.qp->attr.is_terminate_local = 0;
1699 return CPL_RET_BUF_DONE;
1700}
1701
1702static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1703{
1704 struct cpl_rdma_ec_status *rep = cplhdr(skb);
1705 struct iwch_ep *ep = ctx;
1706
33718363 1707 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
b038ced7
SW
1708 rep->status);
1709 if (rep->status) {
1710 struct iwch_qp_attributes attrs;
1711
1712 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
33718363 1713 __func__, ep->hwtid);
2f236735 1714 stop_ep_timer(ep);
b038ced7
SW
1715 attrs.next_state = IWCH_QP_STATE_ERROR;
1716 iwch_modify_qp(ep->com.qp->rhp,
1717 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1718 &attrs, 1);
1719 abort_connection(ep, NULL, GFP_KERNEL);
1720 }
1721 return CPL_RET_BUF_DONE;
1722}
1723
1724static void ep_timeout(unsigned long arg)
1725{
1726 struct iwch_ep *ep = (struct iwch_ep *)arg;
1727 struct iwch_qp_attributes attrs;
1728 unsigned long flags;
989a1780 1729 int abort = 1;
b038ced7
SW
1730
1731 spin_lock_irqsave(&ep->com.lock, flags);
33718363 1732 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
b038ced7
SW
1733 ep->com.state);
1734 switch (ep->com.state) {
1735 case MPA_REQ_SENT:
989a1780 1736 __state_set(&ep->com, ABORTING);
b038ced7
SW
1737 connect_reply_upcall(ep, -ETIMEDOUT);
1738 break;
1739 case MPA_REQ_WAIT:
989a1780 1740 __state_set(&ep->com, ABORTING);
b038ced7 1741 break;
42e31753 1742 case CLOSING:
b038ced7
SW
1743 case MORIBUND:
1744 if (ep->com.cm_id && ep->com.qp) {
1745 attrs.next_state = IWCH_QP_STATE_ERROR;
1746 iwch_modify_qp(ep->com.qp->rhp,
1747 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1748 &attrs, 1);
1749 }
989a1780 1750 __state_set(&ep->com, ABORTING);
b038ced7
SW
1751 break;
1752 default:
989a1780
SW
1753 printk(KERN_ERR "%s unexpected state ep %p state %u\n",
1754 __func__, ep, ep->com.state);
1755 WARN_ON(1);
1756 abort = 0;
b038ced7 1757 }
b038ced7 1758 spin_unlock_irqrestore(&ep->com.lock, flags);
989a1780
SW
1759 if (abort)
1760 abort_connection(ep, NULL, GFP_ATOMIC);
b038ced7
SW
1761 put_ep(&ep->com);
1762}
1763
1764int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1765{
1766 int err;
1767 struct iwch_ep *ep = to_ep(cm_id);
33718363 1768 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
b038ced7
SW
1769
1770 if (state_read(&ep->com) == DEAD) {
1771 put_ep(&ep->com);
1772 return -ECONNRESET;
1773 }
1774 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
b038ced7
SW
1775 if (mpa_rev == 0)
1776 abort_connection(ep, NULL, GFP_KERNEL);
1777 else {
1778 err = send_mpa_reject(ep, pdata, pdata_len);
7d526e6b 1779 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
b038ced7 1780 }
6e47fe43 1781 put_ep(&ep->com);
b038ced7
SW
1782 return 0;
1783}
1784
1785int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1786{
1787 int err;
1788 struct iwch_qp_attributes attrs;
1789 enum iwch_qp_attr_mask mask;
1790 struct iwch_ep *ep = to_ep(cm_id);
1791 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1792 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1793
33718363 1794 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
6e47fe43
SW
1795 if (state_read(&ep->com) == DEAD) {
1796 err = -ECONNRESET;
1797 goto err;
1798 }
b038ced7
SW
1799
1800 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1801 BUG_ON(!qp);
1802
1803 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
1804 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
1805 abort_connection(ep, NULL, GFP_KERNEL);
6e47fe43
SW
1806 err = -EINVAL;
1807 goto err;
b038ced7
SW
1808 }
1809
1810 cm_id->add_ref(cm_id);
1811 ep->com.cm_id = cm_id;
1812 ep->com.qp = qp;
1813
b038ced7
SW
1814 ep->ird = conn_param->ird;
1815 ep->ord = conn_param->ord;
96ac7e88
SW
1816
1817 if (peer2peer && ep->ird == 0)
1818 ep->ird = 1;
1819
33718363 1820 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
de3d3530 1821
b038ced7
SW
1822 /* bind QP to EP and move to RTS */
1823 attrs.mpa_attr = ep->mpa_attr;
1f71f503 1824 attrs.max_ird = ep->ird;
b038ced7
SW
1825 attrs.max_ord = ep->ord;
1826 attrs.llp_stream_handle = ep;
1827 attrs.next_state = IWCH_QP_STATE_RTS;
1828
1829 /* bind QP and TID with INIT_WR */
1830 mask = IWCH_QP_ATTR_NEXT_STATE |
1831 IWCH_QP_ATTR_LLP_STREAM_HANDLE |
1832 IWCH_QP_ATTR_MPA_ATTR |
1833 IWCH_QP_ATTR_MAX_IRD |
1834 IWCH_QP_ATTR_MAX_ORD;
1835
1836 err = iwch_modify_qp(ep->com.qp->rhp,
1837 ep->com.qp, mask, &attrs, 1);
de3d3530 1838 if (err)
6e47fe43 1839 goto err1;
b038ced7 1840
f8b0dfd1
SW
1841 /* if needed, wait for wr_ack */
1842 if (iwch_rqes_posted(qp)) {
1843 wait_event(ep->com.waitq, ep->com.rpl_done);
1844 err = ep->com.rpl_err;
1845 if (err)
6e47fe43 1846 goto err1;
f8b0dfd1
SW
1847 }
1848
de3d3530
SW
1849 err = send_mpa_reply(ep, conn_param->private_data,
1850 conn_param->private_data_len);
1851 if (err)
6e47fe43 1852 goto err1;
de3d3530 1853
de3d3530
SW
1854
1855 state_set(&ep->com, FPDU_MODE);
1856 established_upcall(ep);
1857 put_ep(&ep->com);
1858 return 0;
6e47fe43 1859err1:
de3d3530
SW
1860 ep->com.cm_id = NULL;
1861 ep->com.qp = NULL;
1862 cm_id->rem_ref(cm_id);
6e47fe43 1863err:
b038ced7
SW
1864 put_ep(&ep->com);
1865 return err;
1866}
1867
8704e9a8
SW
1868static int is_loopback_dst(struct iw_cm_id *cm_id)
1869{
1870 struct net_device *dev;
1871
1872 dev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr);
1873 if (!dev)
1874 return 0;
1875 dev_put(dev);
1876 return 1;
1877}
1878
b038ced7
SW
1879int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1880{
1881 int err = 0;
1882 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1883 struct iwch_ep *ep;
1884 struct rtable *rt;
1885
8704e9a8
SW
1886 if (is_loopback_dst(cm_id)) {
1887 err = -ENOSYS;
1888 goto out;
1889 }
1890
b038ced7
SW
1891 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1892 if (!ep) {
33718363 1893 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
b038ced7
SW
1894 err = -ENOMEM;
1895 goto out;
1896 }
1897 init_timer(&ep->timer);
1898 ep->plen = conn_param->private_data_len;
1899 if (ep->plen)
1900 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1901 conn_param->private_data, ep->plen);
1902 ep->ird = conn_param->ird;
1903 ep->ord = conn_param->ord;
96ac7e88
SW
1904
1905 if (peer2peer && ep->ord == 0)
1906 ep->ord = 1;
1907
b038ced7
SW
1908 ep->com.tdev = h->rdev.t3cdev_p;
1909
1910 cm_id->add_ref(cm_id);
1911 ep->com.cm_id = cm_id;
1912 ep->com.qp = get_qhp(h, conn_param->qpn);
1913 BUG_ON(!ep->com.qp);
33718363 1914 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
b038ced7
SW
1915 ep->com.qp, cm_id);
1916
1917 /*
1918 * Allocate an active TID to initiate a TCP connection.
1919 */
1920 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
1921 if (ep->atid == -1) {
33718363 1922 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
b038ced7
SW
1923 err = -ENOMEM;
1924 goto fail2;
1925 }
1926
1927 /* find a route */
1928 rt = find_route(h->rdev.t3cdev_p,
1929 cm_id->local_addr.sin_addr.s_addr,
1930 cm_id->remote_addr.sin_addr.s_addr,
1931 cm_id->local_addr.sin_port,
1932 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
1933 if (!rt) {
33718363 1934 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
b038ced7
SW
1935 err = -EHOSTUNREACH;
1936 goto fail3;
1937 }
d8d1f30b 1938 ep->dst = &rt->dst;
b038ced7
SW
1939
1940 /* get a l2t entry */
1941 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
1942 ep->dst->neighbour->dev);
1943 if (!ep->l2t) {
33718363 1944 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
b038ced7
SW
1945 err = -ENOMEM;
1946 goto fail4;
1947 }
1948
1949 state_set(&ep->com, CONNECTING);
1950 ep->tos = IPTOS_LOWDELAY;
1951 ep->com.local_addr = cm_id->local_addr;
1952 ep->com.remote_addr = cm_id->remote_addr;
1953
1954 /* send connect request to rnic */
1955 err = send_connect(ep);
1956 if (!err)
1957 goto out;
1958
1959 l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
1960fail4:
1961 dst_release(ep->dst);
1962fail3:
1963 cxgb3_free_atid(ep->com.tdev, ep->atid);
1964fail2:
dc35fac9 1965 cm_id->rem_ref(cm_id);
b038ced7
SW
1966 put_ep(&ep->com);
1967out:
1968 return err;
1969}
1970
1971int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1972{
1973 int err = 0;
1974 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1975 struct iwch_listen_ep *ep;
1976
1977
1978 might_sleep();
1979
1980 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1981 if (!ep) {
33718363 1982 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
b038ced7
SW
1983 err = -ENOMEM;
1984 goto fail1;
1985 }
33718363 1986 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
1987 ep->com.tdev = h->rdev.t3cdev_p;
1988 cm_id->add_ref(cm_id);
1989 ep->com.cm_id = cm_id;
1990 ep->backlog = backlog;
1991 ep->com.local_addr = cm_id->local_addr;
1992
1993 /*
1994 * Allocate a server TID.
1995 */
1996 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
1997 if (ep->stid == -1) {
33718363 1998 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
b038ced7
SW
1999 err = -ENOMEM;
2000 goto fail2;
2001 }
2002
2003 state_set(&ep->com, LISTEN);
2004 err = listen_start(ep);
2005 if (err)
2006 goto fail3;
2007
2008 /* wait for pass_open_rpl */
2009 wait_event(ep->com.waitq, ep->com.rpl_done);
2010 err = ep->com.rpl_err;
2011 if (!err) {
2012 cm_id->provider_data = ep;
2013 goto out;
2014 }
2015fail3:
2016 cxgb3_free_stid(ep->com.tdev, ep->stid);
2017fail2:
1b07db70 2018 cm_id->rem_ref(cm_id);
b038ced7
SW
2019 put_ep(&ep->com);
2020fail1:
2021out:
2022 return err;
2023}
2024
2025int iwch_destroy_listen(struct iw_cm_id *cm_id)
2026{
2027 int err;
2028 struct iwch_listen_ep *ep = to_listen_ep(cm_id);
2029
33718363 2030 PDBG("%s ep %p\n", __func__, ep);
b038ced7
SW
2031
2032 might_sleep();
2033 state_set(&ep->com, DEAD);
2034 ep->com.rpl_done = 0;
2035 ep->com.rpl_err = 0;
2036 err = listen_stop(ep);
04b5d028
SW
2037 if (err)
2038 goto done;
b038ced7
SW
2039 wait_event(ep->com.waitq, ep->com.rpl_done);
2040 cxgb3_free_stid(ep->com.tdev, ep->stid);
04b5d028 2041done:
b038ced7
SW
2042 err = ep->com.rpl_err;
2043 cm_id->rem_ref(cm_id);
2044 put_ep(&ep->com);
2045 return err;
2046}
2047
2048int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
2049{
2050 int ret=0;
2051 unsigned long flags;
2052 int close = 0;
04b5d028
SW
2053 int fatal = 0;
2054 struct t3cdev *tdev;
2055 struct cxio_rdev *rdev;
b038ced7
SW
2056
2057 spin_lock_irqsave(&ep->com.lock, flags);
2058
33718363 2059 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
b038ced7
SW
2060 states[ep->com.state], abrupt);
2061
04b5d028
SW
2062 tdev = (struct t3cdev *)ep->com.tdev;
2063 rdev = (struct cxio_rdev *)tdev->ulp;
2064 if (cxio_fatal_error(rdev)) {
2065 fatal = 1;
2066 close_complete_upcall(ep);
2067 ep->com.state = DEAD;
2068 }
b038ced7
SW
2069 switch (ep->com.state) {
2070 case MPA_REQ_WAIT:
2071 case MPA_REQ_SENT:
2072 case MPA_REQ_RCVD:
2073 case MPA_REP_SENT:
2074 case FPDU_MODE:
b038ced7 2075 close = 1;
989a1780
SW
2076 if (abrupt)
2077 ep->com.state = ABORTING;
2078 else {
2079 ep->com.state = CLOSING;
2080 start_ep_timer(ep);
2081 }
6e47fe43 2082 set_bit(CLOSE_SENT, &ep->com.flags);
b038ced7
SW
2083 break;
2084 case CLOSING:
6e47fe43
SW
2085 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2086 close = 1;
2087 if (abrupt) {
2088 stop_ep_timer(ep);
2089 ep->com.state = ABORTING;
2090 } else
2091 ep->com.state = MORIBUND;
2092 }
b038ced7
SW
2093 break;
2094 case MORIBUND:
989a1780
SW
2095 case ABORTING:
2096 case DEAD:
2097 PDBG("%s ignoring disconnect ep %p state %u\n",
2098 __func__, ep, ep->com.state);
b038ced7
SW
2099 break;
2100 default:
2101 BUG();
2102 break;
2103 }
989a1780 2104
b038ced7
SW
2105 spin_unlock_irqrestore(&ep->com.lock, flags);
2106 if (close) {
2107 if (abrupt)
2108 ret = send_abort(ep, NULL, gfp);
2109 else
2110 ret = send_halfclose(ep, gfp);
04b5d028
SW
2111 if (ret)
2112 fatal = 1;
b038ced7 2113 }
04b5d028
SW
2114 if (fatal)
2115 release_ep_resources(ep);
b038ced7
SW
2116 return ret;
2117}
2118
2119int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2120 struct l2t_entry *l2t)
2121{
2122 struct iwch_ep *ep = ctx;
2123
2124 if (ep->dst != old)
2125 return 0;
2126
33718363 2127 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
b038ced7
SW
2128 l2t);
2129 dst_hold(new);
2130 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
2131 ep->l2t = l2t;
2132 dst_release(old);
2133 ep->dst = new;
2134 return 1;
2135}
2136
2137/*
2138 * All the CM events are handled on a work queue to have a safe context.
617c9a7e 2139 * These are the real handlers that are called from the work queue.
b038ced7 2140 */
617c9a7e
RD
2141static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
2142 [CPL_ACT_ESTABLISH] = act_establish,
2143 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2144 [CPL_RX_DATA] = rx_data,
2145 [CPL_TX_DMA_ACK] = tx_ack,
2146 [CPL_ABORT_RPL_RSS] = abort_rpl,
2147 [CPL_ABORT_RPL] = abort_rpl,
2148 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2149 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2150 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2151 [CPL_PASS_ESTABLISH] = pass_establish,
2152 [CPL_PEER_CLOSE] = peer_close,
2153 [CPL_ABORT_REQ_RSS] = peer_abort,
2154 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2155 [CPL_RDMA_TERMINATE] = terminate,
2156 [CPL_RDMA_EC_STATUS] = ec_status,
2157};
2158
2159static void process_work(struct work_struct *work)
2160{
2161 struct sk_buff *skb = NULL;
2162 void *ep;
2163 struct t3cdev *tdev;
2164 int ret;
2165
2166 while ((skb = skb_dequeue(&rxq))) {
2167 ep = *((void **) (skb->cb));
2168 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
2169 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
2170 if (ret & CPL_RET_BUF_DONE)
2171 kfree_skb(skb);
2172
2173 /*
2174 * ep was referenced in sched(), and is freed here.
2175 */
2176 put_ep((struct iwch_ep_common *)ep);
2177 }
2178}
2179
2180static DECLARE_WORK(skb_work, process_work);
2181
b038ced7
SW
2182static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2183{
2184 struct iwch_ep_common *epc = ctx;
2185
2186 get_ep(epc);
2187
2188 /*
2189 * Save ctx and tdev in the skb->cb area.
2190 */
2191 *((void **) skb->cb) = ctx;
2192 *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
2193
2194 /*
2195 * Queue the skb and schedule the worker thread.
2196 */
2197 skb_queue_tail(&rxq, skb);
2198 queue_work(workq, &skb_work);
2199 return 0;
2200}
2201
1ca19770
SW
2202static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2203{
2204 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2205
2206 if (rpl->status != CPL_ERR_NONE) {
2207 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2208 "for tid %u\n", rpl->status, GET_TID(rpl));
2209 }
2210 return CPL_RET_BUF_DONE;
2211}
2212
617c9a7e
RD
2213/*
2214 * All upcalls from the T3 Core go to sched() to schedule the
2215 * processing on a work queue.
2216 */
2217cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
2218 [CPL_ACT_ESTABLISH] = sched,
2219 [CPL_ACT_OPEN_RPL] = sched,
2220 [CPL_RX_DATA] = sched,
2221 [CPL_TX_DMA_ACK] = sched,
2222 [CPL_ABORT_RPL_RSS] = sched,
2223 [CPL_ABORT_RPL] = sched,
2224 [CPL_PASS_OPEN_RPL] = sched,
2225 [CPL_CLOSE_LISTSRV_RPL] = sched,
2226 [CPL_PASS_ACCEPT_REQ] = sched,
2227 [CPL_PASS_ESTABLISH] = sched,
2228 [CPL_PEER_CLOSE] = sched,
2229 [CPL_CLOSE_CON_RPL] = sched,
2230 [CPL_ABORT_REQ_RSS] = sched,
2231 [CPL_RDMA_TERMINATE] = sched,
2232 [CPL_RDMA_EC_STATUS] = sched,
2233 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2234};
2235
b038ced7
SW
2236int __init iwch_cm_init(void)
2237{
2238 skb_queue_head_init(&rxq);
2239
2240 workq = create_singlethread_workqueue("iw_cxgb3");
2241 if (!workq)
2242 return -ENOMEM;
2243
b038ced7
SW
2244 return 0;
2245}
2246
2247void __exit iwch_cm_term(void)
2248{
2249 flush_workqueue(workq);
2250 destroy_workqueue(workq);
2251}