]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/infiniband/hw/cxgb4/cm.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / infiniband / hw / cxgb4 / cm.c
CommitLineData
cfdda9d7 1/*
9eccfe10 2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
cfdda9d7
SW
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <linux/skbuff.h>
36#include <linux/timer.h>
37#include <linux/notifier.h>
38#include <linux/inetdevice.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
1cab775c 41#include <linux/if_vlan.h>
cfdda9d7
SW
42
43#include <net/neighbour.h>
44#include <net/netevent.h>
45#include <net/route.h>
1cab775c 46#include <net/tcp.h>
830662f6
VP
47#include <net/ip6_route.h>
48#include <net/addrconf.h>
cfdda9d7 49
11b8e22d
SW
50#include <rdma/ib_addr.h>
51
85e42b04 52#include <libcxgb_cm.h>
cfdda9d7 53#include "iw_cxgb4.h"
84cc6ac6 54#include "clip_tbl.h"
cfdda9d7
SW
55
56static char *states[] = {
57 "idle",
58 "listen",
59 "connecting",
60 "mpa_wait_req",
61 "mpa_req_sent",
62 "mpa_req_rcvd",
63 "mpa_rep_sent",
64 "fpdu_mode",
65 "aborting",
66 "closing",
67 "moribund",
68 "dead",
69 NULL,
70};
71
5be78ee9
VP
72static int nocong;
73module_param(nocong, int, 0644);
74MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
75
76static int enable_ecn;
77module_param(enable_ecn, int, 0644);
78MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
79
b52fe09e 80static int dack_mode = 1;
ba6d3925 81module_param(dack_mode, int, 0644);
b52fe09e 82MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
ba6d3925 83
4c2c5763 84uint c4iw_max_read_depth = 32;
be4c9bad 85module_param(c4iw_max_read_depth, int, 0644);
4c2c5763
HS
86MODULE_PARM_DESC(c4iw_max_read_depth,
87 "Per-connection max ORD/IRD (default=32)");
be4c9bad 88
cfdda9d7
SW
89static int enable_tcp_timestamps;
90module_param(enable_tcp_timestamps, int, 0644);
91MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
92
93static int enable_tcp_sack;
94module_param(enable_tcp_sack, int, 0644);
95MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
96
97static int enable_tcp_window_scaling = 1;
98module_param(enable_tcp_window_scaling, int, 0644);
99MODULE_PARM_DESC(enable_tcp_window_scaling,
100 "Enable tcp window scaling (default=1)");
101
df2d5130 102static int peer2peer = 1;
cfdda9d7 103module_param(peer2peer, int, 0644);
df2d5130 104MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
cfdda9d7
SW
105
106static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
107module_param(p2p_type, int, 0644);
108MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
109 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
110
111static int ep_timeout_secs = 60;
112module_param(ep_timeout_secs, int, 0644);
113MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
114 "in seconds (default=60)");
115
b8ac3112 116static int mpa_rev = 2;
cfdda9d7
SW
117module_param(mpa_rev, int, 0644);
118MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
ccd2c30b 119 "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft"
b8ac3112 120 " compliant (default=2)");
cfdda9d7
SW
121
122static int markers_enabled;
123module_param(markers_enabled, int, 0644);
124MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
125
126static int crc_enabled = 1;
127module_param(crc_enabled, int, 0644);
128MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
129
130static int rcv_win = 256 * 1024;
131module_param(rcv_win, int, 0644);
132MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
133
98ae68b7 134static int snd_win = 128 * 1024;
cfdda9d7 135module_param(snd_win, int, 0644);
98ae68b7 136MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
cfdda9d7 137
cfdda9d7 138static struct workqueue_struct *workq;
cfdda9d7
SW
139
140static struct sk_buff_head rxq;
cfdda9d7
SW
141
142static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
a9346abe 143static void ep_timeout(struct timer_list *t);
cfdda9d7 144static void connect_reply_upcall(struct c4iw_ep *ep, int status);
9dec900c 145static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
cfdda9d7 146
be4c9bad
RD
147static LIST_HEAD(timeout_list);
148static spinlock_t timeout_lock;
149
9ca6f7cf
H
150static void deref_cm_id(struct c4iw_ep_common *epc)
151{
152 epc->cm_id->rem_ref(epc->cm_id);
153 epc->cm_id = NULL;
154 set_bit(CM_ID_DEREFED, &epc->history);
155}
156
157static void ref_cm_id(struct c4iw_ep_common *epc)
158{
159 set_bit(CM_ID_REFED, &epc->history);
160 epc->cm_id->add_ref(epc->cm_id);
161}
162
325abead
VP
163static void deref_qp(struct c4iw_ep *ep)
164{
165 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
166 clear_bit(QP_REFERENCED, &ep->com.flags);
9ca6f7cf 167 set_bit(QP_DEREFED, &ep->com.history);
325abead
VP
168}
169
170static void ref_qp(struct c4iw_ep *ep)
171{
172 set_bit(QP_REFERENCED, &ep->com.flags);
9ca6f7cf 173 set_bit(QP_REFED, &ep->com.history);
325abead
VP
174 c4iw_qp_add_ref(&ep->com.qp->ibqp);
175}
176
cfdda9d7
SW
177static void start_ep_timer(struct c4iw_ep *ep)
178{
548ddb19 179 pr_debug("ep %p\n", ep);
cfdda9d7 180 if (timer_pending(&ep->timer)) {
1ec779cc
VP
181 pr_err("%s timer already started! ep %p\n",
182 __func__, ep);
183 return;
184 }
185 clear_bit(TIMEOUT, &ep->com.flags);
186 c4iw_get_ep(&ep->com);
cfdda9d7 187 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
cfdda9d7
SW
188 add_timer(&ep->timer);
189}
190
b33bd0cb 191static int stop_ep_timer(struct c4iw_ep *ep)
cfdda9d7 192{
548ddb19 193 pr_debug("ep %p stopping\n", ep);
cfdda9d7 194 del_timer_sync(&ep->timer);
b33bd0cb 195 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
1ec779cc 196 c4iw_put_ep(&ep->com);
b33bd0cb
SW
197 return 0;
198 }
199 return 1;
cfdda9d7
SW
200}
201
202static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
203 struct l2t_entry *l2e)
204{
205 int error = 0;
206
207 if (c4iw_fatal_error(rdev)) {
208 kfree_skb(skb);
4d45b757 209 pr_err("%s - device in error state - dropping\n", __func__);
cfdda9d7
SW
210 return -EIO;
211 }
212 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
213 if (error < 0)
214 kfree_skb(skb);
caa6c9f2
H
215 else if (error == NET_XMIT_DROP)
216 return -ENOMEM;
74594861 217 return error < 0 ? error : 0;
cfdda9d7
SW
218}
219
220int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
221{
222 int error = 0;
223
224 if (c4iw_fatal_error(rdev)) {
225 kfree_skb(skb);
4d45b757 226 pr_err("%s - device in error state - dropping\n", __func__);
cfdda9d7
SW
227 return -EIO;
228 }
229 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
230 if (error < 0)
231 kfree_skb(skb);
74594861 232 return error < 0 ? error : 0;
cfdda9d7
SW
233}
234
235static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
236{
a1a23454 237 u32 len = roundup(sizeof(struct cpl_tid_release), 16);
cfdda9d7 238
a1a23454 239 skb = get_skb(skb, len, GFP_KERNEL);
cfdda9d7
SW
240 if (!skb)
241 return;
a1a23454
VP
242
243 cxgb_mk_tid_release(skb, len, hwtid, 0);
cfdda9d7
SW
244 c4iw_ofld_send(rdev, skb);
245 return;
246}
247
248static void set_emss(struct c4iw_ep *ep, u16 opt)
249{
6c53e938 250 ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
04524a47
H
251 ((AF_INET == ep->com.remote_addr.ss_family) ?
252 sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
253 sizeof(struct tcphdr);
cfdda9d7 254 ep->mss = ep->emss;
6c53e938 255 if (TCPOPT_TSTAMP_G(opt))
04524a47 256 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
cfdda9d7
SW
257 if (ep->emss < 128)
258 ep->emss = 128;
92e7ae71 259 if (ep->emss & 7)
f48fca4d
BP
260 pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
261 TCPOPT_MSS_G(opt), ep->mss, ep->emss);
548ddb19
BP
262 pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
263 ep->emss);
cfdda9d7
SW
264}
265
266static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
267{
cfdda9d7
SW
268 enum c4iw_ep_state state;
269
2f5b48c3 270 mutex_lock(&epc->mutex);
cfdda9d7 271 state = epc->state;
2f5b48c3 272 mutex_unlock(&epc->mutex);
cfdda9d7
SW
273 return state;
274}
275
276static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
277{
278 epc->state = new;
279}
280
281static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
282{
2f5b48c3 283 mutex_lock(&epc->mutex);
548ddb19 284 pr_debug("%s -> %s\n", states[epc->state], states[new]);
cfdda9d7 285 __state_set(epc, new);
2f5b48c3 286 mutex_unlock(&epc->mutex);
cfdda9d7
SW
287 return;
288}
289
4a740838
H
290static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size)
291{
292 struct sk_buff *skb;
293 unsigned int i;
294 size_t len;
295
296 len = roundup(sizeof(union cpl_wr_size), 16);
297 for (i = 0; i < size; i++) {
298 skb = alloc_skb(len, GFP_KERNEL);
299 if (!skb)
300 goto fail;
301 skb_queue_tail(ep_skb_list, skb);
302 }
303 return 0;
304fail:
305 skb_queue_purge(ep_skb_list);
306 return -ENOMEM;
307}
308
cfdda9d7
SW
309static void *alloc_ep(int size, gfp_t gfp)
310{
311 struct c4iw_ep_common *epc;
312
313 epc = kzalloc(size, gfp);
314 if (epc) {
2015f26c 315 epc->wr_waitp = c4iw_alloc_wr_wait(gfp);
ef885dc6
SW
316 if (!epc->wr_waitp) {
317 kfree(epc);
318 epc = NULL;
319 goto out;
320 }
cfdda9d7 321 kref_init(&epc->kref);
2f5b48c3 322 mutex_init(&epc->mutex);
ef885dc6 323 c4iw_init_wr_wait(epc->wr_waitp);
cfdda9d7 324 }
548ddb19 325 pr_debug("alloc ep %p\n", epc);
ef885dc6 326out:
cfdda9d7
SW
327 return epc;
328}
329
944661dd
H
330static void remove_ep_tid(struct c4iw_ep *ep)
331{
332 unsigned long flags;
333
334 spin_lock_irqsave(&ep->com.dev->lock, flags);
335 _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
37eb816c
SW
336 if (idr_is_empty(&ep->com.dev->hwtid_idr))
337 wake_up(&ep->com.dev->wait);
944661dd
H
338 spin_unlock_irqrestore(&ep->com.dev->lock, flags);
339}
340
341static void insert_ep_tid(struct c4iw_ep *ep)
342{
343 unsigned long flags;
344
345 spin_lock_irqsave(&ep->com.dev->lock, flags);
346 _insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0);
347 spin_unlock_irqrestore(&ep->com.dev->lock, flags);
348}
349
350/*
351 * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
352 */
353static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid)
354{
355 struct c4iw_ep *ep;
356 unsigned long flags;
357
358 spin_lock_irqsave(&dev->lock, flags);
359 ep = idr_find(&dev->hwtid_idr, tid);
360 if (ep)
361 c4iw_get_ep(&ep->com);
362 spin_unlock_irqrestore(&dev->lock, flags);
363 return ep;
364}
365
f86fac79
H
366/*
367 * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
368 */
369static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev,
370 unsigned int stid)
371{
372 struct c4iw_listen_ep *ep;
373 unsigned long flags;
374
375 spin_lock_irqsave(&dev->lock, flags);
376 ep = idr_find(&dev->stid_idr, stid);
377 if (ep)
378 c4iw_get_ep(&ep->com);
379 spin_unlock_irqrestore(&dev->lock, flags);
380 return ep;
381}
382
cfdda9d7
SW
383void _c4iw_free_ep(struct kref *kref)
384{
385 struct c4iw_ep *ep;
386
387 ep = container_of(kref, struct c4iw_ep, com.kref);
548ddb19 388 pr_debug("ep %p state %s\n", ep, states[ep->com.state]);
325abead
VP
389 if (test_bit(QP_REFERENCED, &ep->com.flags))
390 deref_qp(ep);
cfdda9d7 391 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
84cc6ac6
H
392 if (ep->com.remote_addr.ss_family == AF_INET6) {
393 struct sockaddr_in6 *sin6 =
394 (struct sockaddr_in6 *)
170003c8 395 &ep->com.local_addr;
84cc6ac6
H
396
397 cxgb4_clip_release(
398 ep->com.dev->rdev.lldi.ports[0],
399 (const u32 *)&sin6->sin6_addr.s6_addr,
400 1);
401 }
1dec4cec
GG
402 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
403 ep->com.local_addr.ss_family);
cfdda9d7
SW
404 dst_release(ep->dst);
405 cxgb4_l2t_release(ep->l2t);
26f91da2 406 kfree_skb(ep->mpa_skb);
cfdda9d7 407 }
4a740838
H
408 if (!skb_queue_empty(&ep->com.ep_skb_list))
409 skb_queue_purge(&ep->com.ep_skb_list);
2015f26c 410 c4iw_put_wr_wait(ep->com.wr_waitp);
cfdda9d7
SW
411 kfree(ep);
412}
413
414static void release_ep_resources(struct c4iw_ep *ep)
415{
416 set_bit(RELEASE_RESOURCES, &ep->com.flags);
944661dd
H
417
418 /*
419 * If we have a hwtid, then remove it from the idr table
420 * so lookups will no longer find this endpoint. Otherwise
421 * we have a race where one thread finds the ep ptr just
422 * before the other thread is freeing the ep memory.
423 */
424 if (ep->hwtid != -1)
425 remove_ep_tid(ep);
cfdda9d7
SW
426 c4iw_put_ep(&ep->com);
427}
428
cfdda9d7
SW
429static int status2errno(int status)
430{
431 switch (status) {
432 case CPL_ERR_NONE:
433 return 0;
434 case CPL_ERR_CONN_RESET:
435 return -ECONNRESET;
436 case CPL_ERR_ARP_MISS:
437 return -EHOSTUNREACH;
438 case CPL_ERR_CONN_TIMEDOUT:
439 return -ETIMEDOUT;
440 case CPL_ERR_TCAM_FULL:
441 return -ENOMEM;
442 case CPL_ERR_CONN_EXIST:
443 return -EADDRINUSE;
444 default:
445 return -EIO;
446 }
447}
448
449/*
450 * Try and reuse skbs already allocated...
451 */
452static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
453{
454 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
455 skb_trim(skb, 0);
456 skb_get(skb);
457 skb_reset_transport_header(skb);
458 } else {
459 skb = alloc_skb(len, gfp);
460 }
b38a0ad8 461 t4_set_arp_err_handler(skb, NULL, NULL);
cfdda9d7
SW
462 return skb;
463}
464
830662f6
VP
465static struct net_device *get_real_dev(struct net_device *egress_dev)
466{
11b8e22d 467 return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
830662f6
VP
468}
469
cfdda9d7
SW
470static void arp_failure_discard(void *handle, struct sk_buff *skb)
471{
700456bd 472 pr_err("ARP failure\n");
cfdda9d7
SW
473 kfree_skb(skb);
474}
475
64bec74a
H
476static void mpa_start_arp_failure(void *handle, struct sk_buff *skb)
477{
478 pr_err("ARP failure during MPA Negotiation - Closing Connection\n");
479}
480
9dec900c 481enum {
8d1f1a6b 482 NUM_FAKE_CPLS = 2,
9dec900c 483 FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0,
8d1f1a6b 484 FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1,
9dec900c
H
485};
486
487static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
488{
489 struct c4iw_ep *ep;
490
491 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
492 release_ep_resources(ep);
1dad0ebe 493 kfree_skb(skb);
9dec900c
H
494 return 0;
495}
496
8d1f1a6b
H
497static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
498{
499 struct c4iw_ep *ep;
500
501 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
502 c4iw_put_ep(&ep->parent_ep->com);
503 release_ep_resources(ep);
1dad0ebe 504 kfree_skb(skb);
8d1f1a6b
H
505 return 0;
506}
507
9dec900c
H
508/*
509 * Fake up a special CPL opcode and call sched() so process_work() will call
510 * _put_ep_safe() in a safe context to free the ep resources. This is needed
511 * because ARP error handlers are called in an ATOMIC context, and
512 * _c4iw_free_ep() needs to block.
513 */
8d1f1a6b
H
514static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
515 int cpl)
9dec900c
H
516{
517 struct cpl_act_establish *rpl = cplhdr(skb);
518
519 /* Set our special ARP_FAILURE opcode */
8d1f1a6b 520 rpl->ot.opcode = cpl;
9dec900c
H
521
522 /*
523 * Save ep in the skb->cb area, after where sched() will save the dev
524 * ptr.
525 */
526 *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
527 sched(ep->com.dev, skb);
528}
529
530/* Handle an ARP failure for an accept */
531static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
532{
533 struct c4iw_ep *ep = handle;
534
700456bd 535 pr_err("ARP failure during accept - tid %u - dropping connection\n",
9dec900c
H
536 ep->hwtid);
537
538 __state_set(&ep->com, DEAD);
8d1f1a6b 539 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE);
9dec900c
H
540}
541
cfdda9d7
SW
542/*
543 * Handle an ARP failure for an active open.
544 */
545static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
546{
5dab6d3a
H
547 struct c4iw_ep *ep = handle;
548
700456bd 549 pr_err("ARP failure during connect\n");
5dab6d3a 550 connect_reply_upcall(ep, -EHOSTUNREACH);
9dec900c 551 __state_set(&ep->com, DEAD);
84cc6ac6
H
552 if (ep->com.remote_addr.ss_family == AF_INET6) {
553 struct sockaddr_in6 *sin6 =
170003c8 554 (struct sockaddr_in6 *)&ep->com.local_addr;
84cc6ac6
H
555 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
556 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
557 }
5dab6d3a
H
558 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
559 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
8d1f1a6b 560 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
cfdda9d7
SW
561}
562
563/*
564 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
565 * and send it along.
566 */
567static void abort_arp_failure(void *handle, struct sk_buff *skb)
568{
761e19a5
H
569 int ret;
570 struct c4iw_ep *ep = handle;
571 struct c4iw_rdev *rdev = &ep->com.dev->rdev;
cfdda9d7
SW
572 struct cpl_abort_req *req = cplhdr(skb);
573
548ddb19 574 pr_debug("rdev %p\n", rdev);
cfdda9d7 575 req->cmd = CPL_ABORT_NO_RST;
1dad0ebe 576 skb_get(skb);
761e19a5
H
577 ret = c4iw_ofld_send(rdev, skb);
578 if (ret) {
579 __state_set(&ep->com, DEAD);
580 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
1dad0ebe
RR
581 } else
582 kfree_skb(skb);
cfdda9d7
SW
583}
584
4a740838 585static int send_flowc(struct c4iw_ep *ep)
cfdda9d7 586{
cfdda9d7 587 struct fw_flowc_wr *flowc;
4a740838 588 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
ac8e4c69
H
589 u16 vlan = ep->l2t->vlan;
590 int nparams;
2e51e45c 591 int flowclen, flowclen16;
ac8e4c69 592
4a740838
H
593 if (WARN_ON(!skb))
594 return -ENOMEM;
595
ac8e4c69 596 if (vlan == CPL_L2T_VLAN_NONE)
ac8e4c69 597 nparams = 9;
2e51e45c
PBT
598 else
599 nparams = 10;
600
601 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
602 flowclen16 = DIV_ROUND_UP(flowclen, 16);
603 flowclen = flowclen16 * 16;
cfdda9d7 604
2e51e45c
PBT
605 flowc = __skb_put(skb, flowclen);
606 memset(flowc, 0, flowclen);
cfdda9d7 607
e2ac9628 608 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
ac8e4c69 609 FW_FLOWC_WR_NPARAMS_V(nparams));
2e51e45c
PBT
610 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
611 FW_WR_FLOWID_V(ep->hwtid));
cfdda9d7
SW
612
613 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
5167865a 614 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
35b1de55 615 (ep->com.dev->rdev.lldi.pf));
cfdda9d7
SW
616 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
617 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
618 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
619 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
620 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
621 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
622 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
623 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
624 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
625 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
626 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
b408ff28 627 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
cfdda9d7
SW
628 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
629 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
2e51e45c
PBT
630 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
631 flowc->mnemval[8].val = cpu_to_be32(ep->snd_wscale);
632 if (nparams == 10) {
ac8e4c69 633 u16 pri;
ac8e4c69 634 pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
2e51e45c
PBT
635 flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
636 flowc->mnemval[9].val = cpu_to_be32(pri);
cfdda9d7
SW
637 }
638
639 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
fef4422d 640 return c4iw_ofld_send(&ep->com.dev->rdev, skb);
cfdda9d7
SW
641}
642
4a740838 643static int send_halfclose(struct c4iw_ep *ep)
cfdda9d7 644{
4a740838 645 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
29fb6f42 646 u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
cfdda9d7 647
548ddb19 648 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
4a740838 649 if (WARN_ON(!skb))
cfdda9d7 650 return -ENOMEM;
4a740838 651
29fb6f42
VP
652 cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx,
653 NULL, arp_failure_discard);
654
cfdda9d7
SW
655 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
656}
657
3b8f8b95 658static void read_tcb(struct c4iw_ep *ep)
11a27e21
RR
659{
660 struct sk_buff *skb;
661 struct cpl_get_tcb *req;
662 int wrlen = roundup(sizeof(*req), 16);
663
664 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
665 if (WARN_ON(!skb))
666 return;
667
668 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
669 req = (struct cpl_get_tcb *) skb_put(skb, wrlen);
670 memset(req, 0, wrlen);
671 INIT_TP_WR(req, ep->hwtid);
672 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_GET_TCB, ep->hwtid));
673 req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(ep->rss_qid));
674
675 /*
676 * keep a ref on the ep so the tcb is not unlocked before this
677 * cpl completes. The ref is released in read_tcb_rpl().
678 */
679 c4iw_get_ep(&ep->com);
680 if (WARN_ON(c4iw_ofld_send(&ep->com.dev->rdev, skb)))
681 c4iw_put_ep(&ep->com);
682}
683
684static int send_abort_req(struct c4iw_ep *ep)
cfdda9d7 685{
a7e1a97f 686 u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
4a740838 687 struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
cfdda9d7 688
548ddb19 689 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
4a740838 690 if (WARN_ON(!req_skb))
cfdda9d7 691 return -ENOMEM;
4a740838 692
a7e1a97f
VP
693 cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx,
694 ep, abort_arp_failure);
695
4a740838 696 return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
cfdda9d7
SW
697}
698
11a27e21
RR
699static int send_abort(struct c4iw_ep *ep)
700{
701 if (!ep->com.qp || !ep->com.qp->srq) {
702 send_abort_req(ep);
703 return 0;
704 }
705 set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags);
706 read_tcb(ep);
707 return 0;
708}
709
cfdda9d7
SW
710static int send_connect(struct c4iw_ep *ep)
711{
963cab50
H
712 struct cpl_act_open_req *req = NULL;
713 struct cpl_t5_act_open_req *t5req = NULL;
714 struct cpl_t6_act_open_req *t6req = NULL;
715 struct cpl_act_open_req6 *req6 = NULL;
716 struct cpl_t5_act_open_req6 *t5req6 = NULL;
717 struct cpl_t6_act_open_req6 *t6req6 = NULL;
cfdda9d7
SW
718 struct sk_buff *skb;
719 u64 opt0;
720 u32 opt2;
721 unsigned int mtu_idx;
cc516700 722 u32 wscale;
963cab50 723 int win, sizev4, sizev6, wrlen;
9eccfe10 724 struct sockaddr_in *la = (struct sockaddr_in *)
170003c8 725 &ep->com.local_addr;
9eccfe10 726 struct sockaddr_in *ra = (struct sockaddr_in *)
170003c8 727 &ep->com.remote_addr;
9eccfe10 728 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
170003c8 729 &ep->com.local_addr;
9eccfe10 730 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
170003c8 731 &ep->com.remote_addr;
84cc6ac6 732 int ret;
963cab50
H
733 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
734 u32 isn = (prandom_u32() & ~7UL) - 1;
192539f4
GG
735 struct net_device *netdev;
736 u64 params;
737
738 netdev = ep->com.dev->rdev.lldi.ports[0];
963cab50
H
739
740 switch (CHELSIO_CHIP_VERSION(adapter_type)) {
741 case CHELSIO_T4:
742 sizev4 = sizeof(struct cpl_act_open_req);
743 sizev6 = sizeof(struct cpl_act_open_req6);
744 break;
745 case CHELSIO_T5:
746 sizev4 = sizeof(struct cpl_t5_act_open_req);
747 sizev6 = sizeof(struct cpl_t5_act_open_req6);
748 break;
749 case CHELSIO_T6:
750 sizev4 = sizeof(struct cpl_t6_act_open_req);
751 sizev6 = sizeof(struct cpl_t6_act_open_req6);
752 break;
753 default:
754 pr_err("T%d Chip is not supported\n",
755 CHELSIO_CHIP_VERSION(adapter_type));
756 return -EINVAL;
757 }
830662f6
VP
758
759 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
760 roundup(sizev4, 16) :
761 roundup(sizev6, 16);
cfdda9d7 762
548ddb19 763 pr_debug("ep %p atid %u\n", ep, ep->atid);
cfdda9d7
SW
764
765 skb = get_skb(NULL, wrlen, GFP_KERNEL);
766 if (!skb) {
700456bd 767 pr_err("%s - failed to alloc skb\n", __func__);
cfdda9d7
SW
768 return -ENOMEM;
769 }
d4f1a5c6 770 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
cfdda9d7 771
44c6d069
VP
772 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
773 enable_tcp_timestamps,
774 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
cc516700 775 wscale = cxgb_compute_wscale(rcv_win);
b408ff28
HS
776
777 /*
778 * Specify the largest window that will fit in opt0. The
779 * remainder will be specified in the rx_data_ack.
780 */
781 win = ep->rcv_win >> 10;
d7990b0c
AB
782 if (win > RCV_BUFSIZ_M)
783 win = RCV_BUFSIZ_M;
b408ff28 784
6c53e938 785 opt0 = (nocong ? NO_CONG_F : 0) |
d7990b0c 786 KEEP_ALIVE_F |
6c53e938 787 DELACK_F |
d7990b0c
AB
788 WND_SCALE_V(wscale) |
789 MSS_IDX_V(mtu_idx) |
790 L2T_IDX_V(ep->l2t->idx) |
791 TX_CHAN_V(ep->tx_chan) |
792 SMAC_SEL_V(ep->smac_idx) |
ac8e4c69 793 DSCP_V(ep->tos >> 2) |
d7990b0c
AB
794 ULP_MODE_V(ULP_MODE_TCPDDP) |
795 RCV_BUFSIZ_V(win);
796 opt2 = RX_CHANNEL_V(0) |
6c53e938 797 CCTRL_ECN_V(enable_ecn) |
d7990b0c 798 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
cfdda9d7 799 if (enable_tcp_timestamps)
6c53e938 800 opt2 |= TSTAMPS_EN_F;
cfdda9d7 801 if (enable_tcp_sack)
6c53e938 802 opt2 |= SACK_EN_F;
cfdda9d7 803 if (wscale && enable_tcp_window_scaling)
d7990b0c 804 opt2 |= WND_SCALE_EN_F;
963cab50
H
805 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
806 if (peer2peer)
807 isn += 4;
808
d7990b0c 809 opt2 |= T5_OPT_2_VALID_F;
cf7fe64a 810 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
0b741047 811 opt2 |= T5_ISS_F;
92e5011a 812 }
84cc6ac6 813
192539f4
GG
814 params = cxgb4_select_ntuple(netdev, ep->l2t);
815
84cc6ac6
H
816 if (ep->com.remote_addr.ss_family == AF_INET6)
817 cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
818 (const u32 *)&la6->sin6_addr.s6_addr, 1);
819
5dab6d3a 820 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
cfdda9d7 821
963cab50
H
822 if (ep->com.remote_addr.ss_family == AF_INET) {
823 switch (CHELSIO_CHIP_VERSION(adapter_type)) {
824 case CHELSIO_T4:
4df864c1 825 req = skb_put(skb, wrlen);
830662f6 826 INIT_TP_WR(req, 0);
963cab50
H
827 break;
828 case CHELSIO_T5:
4df864c1 829 t5req = skb_put(skb, wrlen);
963cab50
H
830 INIT_TP_WR(t5req, 0);
831 req = (struct cpl_act_open_req *)t5req;
832 break;
833 case CHELSIO_T6:
4df864c1 834 t6req = skb_put(skb, wrlen);
963cab50
H
835 INIT_TP_WR(t6req, 0);
836 req = (struct cpl_act_open_req *)t6req;
837 t5req = (struct cpl_t5_act_open_req *)t6req;
838 break;
839 default:
840 pr_err("T%d Chip is not supported\n",
841 CHELSIO_CHIP_VERSION(adapter_type));
842 ret = -EINVAL;
843 goto clip_release;
844 }
845
846 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
847 ((ep->rss_qid<<14) | ep->atid)));
848 req->local_port = la->sin_port;
849 req->peer_port = ra->sin_port;
850 req->local_ip = la->sin_addr.s_addr;
851 req->peer_ip = ra->sin_addr.s_addr;
852 req->opt0 = cpu_to_be64(opt0);
853
854 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
192539f4 855 req->params = cpu_to_be32(params);
830662f6
VP
856 req->opt2 = cpu_to_be32(opt2);
857 } else {
192539f4
GG
858 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
859 t5req->params =
860 cpu_to_be64(FILTER_TUPLE_V(params));
861 t5req->rsvd = cpu_to_be32(isn);
70d72568 862 pr_debug("snd_isn %u\n", t5req->rsvd);
192539f4
GG
863 t5req->opt2 = cpu_to_be32(opt2);
864 } else {
865 t6req->params =
866 cpu_to_be64(FILTER_TUPLE_V(params));
867 t6req->rsvd = cpu_to_be32(isn);
70d72568 868 pr_debug("snd_isn %u\n", t6req->rsvd);
192539f4
GG
869 t6req->opt2 = cpu_to_be32(opt2);
870 }
963cab50
H
871 }
872 } else {
873 switch (CHELSIO_CHIP_VERSION(adapter_type)) {
874 case CHELSIO_T4:
4df864c1 875 req6 = skb_put(skb, wrlen);
830662f6 876 INIT_TP_WR(req6, 0);
963cab50
H
877 break;
878 case CHELSIO_T5:
4df864c1 879 t5req6 = skb_put(skb, wrlen);
963cab50
H
880 INIT_TP_WR(t5req6, 0);
881 req6 = (struct cpl_act_open_req6 *)t5req6;
882 break;
883 case CHELSIO_T6:
4df864c1 884 t6req6 = skb_put(skb, wrlen);
963cab50
H
885 INIT_TP_WR(t6req6, 0);
886 req6 = (struct cpl_act_open_req6 *)t6req6;
887 t5req6 = (struct cpl_t5_act_open_req6 *)t6req6;
888 break;
889 default:
890 pr_err("T%d Chip is not supported\n",
891 CHELSIO_CHIP_VERSION(adapter_type));
892 ret = -EINVAL;
893 goto clip_release;
894 }
895
896 OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
897 ((ep->rss_qid<<14)|ep->atid)));
898 req6->local_port = la6->sin6_port;
899 req6->peer_port = ra6->sin6_port;
900 req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr));
901 req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8));
902 req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr));
903 req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8));
904 req6->opt0 = cpu_to_be64(opt0);
905
906 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
192539f4
GG
907 req6->params = cpu_to_be32(cxgb4_select_ntuple(netdev,
908 ep->l2t));
830662f6 909 req6->opt2 = cpu_to_be32(opt2);
830662f6 910 } else {
192539f4
GG
911 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
912 t5req6->params =
913 cpu_to_be64(FILTER_TUPLE_V(params));
914 t5req6->rsvd = cpu_to_be32(isn);
70d72568 915 pr_debug("snd_isn %u\n", t5req6->rsvd);
192539f4
GG
916 t5req6->opt2 = cpu_to_be32(opt2);
917 } else {
918 t6req6->params =
919 cpu_to_be64(FILTER_TUPLE_V(params));
920 t6req6->rsvd = cpu_to_be32(isn);
70d72568 921 pr_debug("snd_isn %u\n", t6req6->rsvd);
192539f4
GG
922 t6req6->opt2 = cpu_to_be32(opt2);
923 }
924
830662f6 925 }
f079af7a
VP
926 }
927
793dad94 928 set_bit(ACT_OPEN_REQ, &ep->com.history);
84cc6ac6 929 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
963cab50 930clip_release:
84cc6ac6
H
931 if (ret && ep->com.remote_addr.ss_family == AF_INET6)
932 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
933 (const u32 *)&la6->sin6_addr.s6_addr, 1);
934 return ret;
cfdda9d7
SW
935}
936
caa6c9f2
H
937static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
938 u8 mpa_rev_to_use)
cfdda9d7 939{
caa6c9f2 940 int mpalen, wrlen, ret;
cfdda9d7
SW
941 struct fw_ofld_tx_data_wr *req;
942 struct mpa_message *mpa;
d2fe99e8 943 struct mpa_v2_conn_params mpa_v2_params;
cfdda9d7 944
548ddb19
BP
945 pr_debug("ep %p tid %u pd_len %d\n",
946 ep, ep->hwtid, ep->plen);
cfdda9d7 947
cfdda9d7 948 mpalen = sizeof(*mpa) + ep->plen;
d2fe99e8
KS
949 if (mpa_rev_to_use == 2)
950 mpalen += sizeof(struct mpa_v2_conn_params);
cfdda9d7
SW
951 wrlen = roundup(mpalen + sizeof *req, 16);
952 skb = get_skb(skb, wrlen, GFP_KERNEL);
953 if (!skb) {
954 connect_reply_upcall(ep, -ENOMEM);
caa6c9f2 955 return -ENOMEM;
cfdda9d7
SW
956 }
957 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
958
b080db58 959 req = skb_put_zero(skb, wrlen);
cfdda9d7 960 req->op_to_immdlen = cpu_to_be32(
e2ac9628
HS
961 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
962 FW_WR_COMPL_F |
963 FW_WR_IMMDLEN_V(mpalen));
cfdda9d7 964 req->flowid_len16 = cpu_to_be32(
e2ac9628
HS
965 FW_WR_FLOWID_V(ep->hwtid) |
966 FW_WR_LEN16_V(wrlen >> 4));
cfdda9d7
SW
967 req->plen = cpu_to_be32(mpalen);
968 req->tunnel_to_proxy = cpu_to_be32(
e2ac9628
HS
969 FW_OFLD_TX_DATA_WR_FLUSH_F |
970 FW_OFLD_TX_DATA_WR_SHOVE_F);
cfdda9d7
SW
971
972 mpa = (struct mpa_message *)(req + 1);
973 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
3d4e7994
H
974
975 mpa->flags = 0;
976 if (crc_enabled)
977 mpa->flags |= MPA_CRC;
978 if (markers_enabled) {
979 mpa->flags |= MPA_MARKERS;
980 ep->mpa_attr.recv_marker_enabled = 1;
981 } else {
982 ep->mpa_attr.recv_marker_enabled = 0;
983 }
984 if (mpa_rev_to_use == 2)
985 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
986
cfdda9d7 987 mpa->private_data_size = htons(ep->plen);
d2fe99e8 988 mpa->revision = mpa_rev_to_use;
01b225e1 989 if (mpa_rev_to_use == 1) {
d2fe99e8 990 ep->tried_with_mpa_v1 = 1;
01b225e1
KS
991 ep->retry_with_mpa_v1 = 0;
992 }
d2fe99e8
KS
993
994 if (mpa_rev_to_use == 2) {
f747c34a
RD
995 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
996 sizeof (struct mpa_v2_conn_params));
548ddb19 997 pr_debug("initiator ird %u ord %u\n", ep->ird,
a9a42886 998 ep->ord);
d2fe99e8
KS
999 mpa_v2_params.ird = htons((u16)ep->ird);
1000 mpa_v2_params.ord = htons((u16)ep->ord);
1001
1002 if (peer2peer) {
1003 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1004 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
1005 mpa_v2_params.ord |=
1006 htons(MPA_V2_RDMA_WRITE_RTR);
1007 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
1008 mpa_v2_params.ord |=
1009 htons(MPA_V2_RDMA_READ_RTR);
1010 }
1011 memcpy(mpa->private_data, &mpa_v2_params,
1012 sizeof(struct mpa_v2_conn_params));
cfdda9d7 1013
d2fe99e8
KS
1014 if (ep->plen)
1015 memcpy(mpa->private_data +
1016 sizeof(struct mpa_v2_conn_params),
1017 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1018 } else
1019 if (ep->plen)
1020 memcpy(mpa->private_data,
1021 ep->mpa_pkt + sizeof(*mpa), ep->plen);
cfdda9d7
SW
1022
1023 /*
1024 * Reference the mpa skb. This ensures the data area
1025 * will remain in memory until the hw acks the tx.
1026 * Function fw4_ack() will deref it.
1027 */
1028 skb_get(skb);
1029 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
cfdda9d7 1030 ep->mpa_skb = skb;
caa6c9f2
H
1031 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1032 if (ret)
1033 return ret;
cfdda9d7 1034 start_ep_timer(ep);
a7db89eb 1035 __state_set(&ep->com, MPA_REQ_SENT);
cfdda9d7 1036 ep->mpa_attr.initiator = 1;
9c88aa00 1037 ep->snd_seq += mpalen;
caa6c9f2 1038 return ret;
cfdda9d7
SW
1039}
1040
1041static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1042{
1043 int mpalen, wrlen;
1044 struct fw_ofld_tx_data_wr *req;
1045 struct mpa_message *mpa;
1046 struct sk_buff *skb;
d2fe99e8 1047 struct mpa_v2_conn_params mpa_v2_params;
cfdda9d7 1048
548ddb19
BP
1049 pr_debug("ep %p tid %u pd_len %d\n",
1050 ep, ep->hwtid, ep->plen);
cfdda9d7
SW
1051
1052 mpalen = sizeof(*mpa) + plen;
d2fe99e8
KS
1053 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1054 mpalen += sizeof(struct mpa_v2_conn_params);
cfdda9d7
SW
1055 wrlen = roundup(mpalen + sizeof *req, 16);
1056
1057 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1058 if (!skb) {
700456bd 1059 pr_err("%s - cannot alloc skb!\n", __func__);
cfdda9d7
SW
1060 return -ENOMEM;
1061 }
1062 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1063
b080db58 1064 req = skb_put_zero(skb, wrlen);
cfdda9d7 1065 req->op_to_immdlen = cpu_to_be32(
e2ac9628
HS
1066 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
1067 FW_WR_COMPL_F |
1068 FW_WR_IMMDLEN_V(mpalen));
cfdda9d7 1069 req->flowid_len16 = cpu_to_be32(
e2ac9628
HS
1070 FW_WR_FLOWID_V(ep->hwtid) |
1071 FW_WR_LEN16_V(wrlen >> 4));
cfdda9d7
SW
1072 req->plen = cpu_to_be32(mpalen);
1073 req->tunnel_to_proxy = cpu_to_be32(
e2ac9628
HS
1074 FW_OFLD_TX_DATA_WR_FLUSH_F |
1075 FW_OFLD_TX_DATA_WR_SHOVE_F);
cfdda9d7
SW
1076
1077 mpa = (struct mpa_message *)(req + 1);
1078 memset(mpa, 0, sizeof(*mpa));
1079 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1080 mpa->flags = MPA_REJECT;
fe7e0a4d 1081 mpa->revision = ep->mpa_attr.version;
cfdda9d7 1082 mpa->private_data_size = htons(plen);
d2fe99e8
KS
1083
1084 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1085 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
f747c34a
RD
1086 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1087 sizeof (struct mpa_v2_conn_params));
d2fe99e8
KS
1088 mpa_v2_params.ird = htons(((u16)ep->ird) |
1089 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
1090 0));
1091 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1092 (p2p_type ==
1093 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1094 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1095 FW_RI_INIT_P2PTYPE_READ_REQ ?
1096 MPA_V2_RDMA_READ_RTR : 0) : 0));
1097 memcpy(mpa->private_data, &mpa_v2_params,
1098 sizeof(struct mpa_v2_conn_params));
1099
1100 if (ep->plen)
1101 memcpy(mpa->private_data +
1102 sizeof(struct mpa_v2_conn_params), pdata, plen);
1103 } else
1104 if (plen)
1105 memcpy(mpa->private_data, pdata, plen);
cfdda9d7
SW
1106
1107 /*
1108 * Reference the mpa skb again. This ensures the data area
1109 * will remain in memory until the hw acks the tx.
1110 * Function fw4_ack() will deref it.
1111 */
1112 skb_get(skb);
1113 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
64bec74a 1114 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
cfdda9d7 1115 ep->mpa_skb = skb;
9c88aa00 1116 ep->snd_seq += mpalen;
cfdda9d7
SW
1117 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1118}
1119
1120static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1121{
1122 int mpalen, wrlen;
1123 struct fw_ofld_tx_data_wr *req;
1124 struct mpa_message *mpa;
1125 struct sk_buff *skb;
d2fe99e8 1126 struct mpa_v2_conn_params mpa_v2_params;
cfdda9d7 1127
548ddb19
BP
1128 pr_debug("ep %p tid %u pd_len %d\n",
1129 ep, ep->hwtid, ep->plen);
cfdda9d7
SW
1130
1131 mpalen = sizeof(*mpa) + plen;
d2fe99e8
KS
1132 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1133 mpalen += sizeof(struct mpa_v2_conn_params);
cfdda9d7
SW
1134 wrlen = roundup(mpalen + sizeof *req, 16);
1135
1136 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1137 if (!skb) {
700456bd 1138 pr_err("%s - cannot alloc skb!\n", __func__);
cfdda9d7
SW
1139 return -ENOMEM;
1140 }
1141 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1142
b080db58 1143 req = skb_put_zero(skb, wrlen);
cfdda9d7 1144 req->op_to_immdlen = cpu_to_be32(
e2ac9628
HS
1145 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
1146 FW_WR_COMPL_F |
1147 FW_WR_IMMDLEN_V(mpalen));
cfdda9d7 1148 req->flowid_len16 = cpu_to_be32(
e2ac9628
HS
1149 FW_WR_FLOWID_V(ep->hwtid) |
1150 FW_WR_LEN16_V(wrlen >> 4));
cfdda9d7
SW
1151 req->plen = cpu_to_be32(mpalen);
1152 req->tunnel_to_proxy = cpu_to_be32(
e2ac9628
HS
1153 FW_OFLD_TX_DATA_WR_FLUSH_F |
1154 FW_OFLD_TX_DATA_WR_SHOVE_F);
cfdda9d7
SW
1155
1156 mpa = (struct mpa_message *)(req + 1);
1157 memset(mpa, 0, sizeof(*mpa));
1158 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
3d4e7994
H
1159 mpa->flags = 0;
1160 if (ep->mpa_attr.crc_enabled)
1161 mpa->flags |= MPA_CRC;
1162 if (ep->mpa_attr.recv_marker_enabled)
1163 mpa->flags |= MPA_MARKERS;
d2fe99e8 1164 mpa->revision = ep->mpa_attr.version;
cfdda9d7 1165 mpa->private_data_size = htons(plen);
d2fe99e8
KS
1166
1167 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1168 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
f747c34a
RD
1169 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1170 sizeof (struct mpa_v2_conn_params));
d2fe99e8
KS
1171 mpa_v2_params.ird = htons((u16)ep->ird);
1172 mpa_v2_params.ord = htons((u16)ep->ord);
1173 if (peer2peer && (ep->mpa_attr.p2p_type !=
1174 FW_RI_INIT_P2PTYPE_DISABLED)) {
1175 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1176
1177 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
1178 mpa_v2_params.ord |=
1179 htons(MPA_V2_RDMA_WRITE_RTR);
1180 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
1181 mpa_v2_params.ord |=
1182 htons(MPA_V2_RDMA_READ_RTR);
1183 }
1184
1185 memcpy(mpa->private_data, &mpa_v2_params,
1186 sizeof(struct mpa_v2_conn_params));
1187
1188 if (ep->plen)
1189 memcpy(mpa->private_data +
1190 sizeof(struct mpa_v2_conn_params), pdata, plen);
1191 } else
1192 if (plen)
1193 memcpy(mpa->private_data, pdata, plen);
cfdda9d7
SW
1194
1195 /*
1196 * Reference the mpa skb. This ensures the data area
1197 * will remain in memory until the hw acks the tx.
1198 * Function fw4_ack() will deref it.
1199 */
1200 skb_get(skb);
64bec74a 1201 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
cfdda9d7 1202 ep->mpa_skb = skb;
a7db89eb 1203 __state_set(&ep->com, MPA_REP_SENT);
9c88aa00 1204 ep->snd_seq += mpalen;
cfdda9d7
SW
1205 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1206}
1207
1208static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1209{
1210 struct c4iw_ep *ep;
1211 struct cpl_act_establish *req = cplhdr(skb);
2e51e45c 1212 unsigned short tcp_opt = ntohs(req->tcp_opt);
cfdda9d7 1213 unsigned int tid = GET_TID(req);
6c53e938 1214 unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
cfdda9d7 1215 struct tid_info *t = dev->rdev.lldi.tids;
fef4422d 1216 int ret;
cfdda9d7
SW
1217
1218 ep = lookup_atid(t, atid);
1219
548ddb19 1220 pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
a9a42886 1221 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
cfdda9d7 1222
a7db89eb 1223 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
1224 dst_confirm(ep->dst);
1225
1226 /* setup the hwtid for this connection */
1227 ep->hwtid = tid;
1dec4cec 1228 cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family);
944661dd 1229 insert_ep_tid(ep);
cfdda9d7
SW
1230
1231 ep->snd_seq = be32_to_cpu(req->snd_isn);
1232 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2e51e45c 1233 ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
cfdda9d7 1234
2e51e45c 1235 set_emss(ep, tcp_opt);
cfdda9d7
SW
1236
1237 /* dealloc the atid */
793dad94 1238 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
cfdda9d7 1239 cxgb4_free_atid(t, atid);
793dad94 1240 set_bit(ACT_ESTAB, &ep->com.history);
cfdda9d7
SW
1241
1242 /* start MPA negotiation */
4a740838 1243 ret = send_flowc(ep);
fef4422d
H
1244 if (ret)
1245 goto err;
d2fe99e8 1246 if (ep->retry_with_mpa_v1)
caa6c9f2 1247 ret = send_mpa_req(ep, skb, 1);
d2fe99e8 1248 else
caa6c9f2
H
1249 ret = send_mpa_req(ep, skb, mpa_rev);
1250 if (ret)
1251 goto err;
a7db89eb 1252 mutex_unlock(&ep->com.mutex);
cfdda9d7 1253 return 0;
fef4422d
H
1254err:
1255 mutex_unlock(&ep->com.mutex);
1256 connect_reply_upcall(ep, -ENOMEM);
1257 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1258 return 0;
cfdda9d7
SW
1259}
1260
be13b2df 1261static void close_complete_upcall(struct c4iw_ep *ep, int status)
cfdda9d7
SW
1262{
1263 struct iw_cm_event event;
1264
548ddb19 1265 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
1266 memset(&event, 0, sizeof(event));
1267 event.event = IW_CM_EVENT_CLOSE;
be13b2df 1268 event.status = status;
cfdda9d7 1269 if (ep->com.cm_id) {
a9a42886
JP
1270 pr_debug("close complete delivered ep %p cm_id %p tid %u\n",
1271 ep, ep->com.cm_id, ep->hwtid);
cfdda9d7 1272 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
9ca6f7cf 1273 deref_cm_id(&ep->com);
793dad94 1274 set_bit(CLOSE_UPCALL, &ep->com.history);
cfdda9d7
SW
1275 }
1276}
1277
cfdda9d7
SW
1278static void peer_close_upcall(struct c4iw_ep *ep)
1279{
1280 struct iw_cm_event event;
1281
548ddb19 1282 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
1283 memset(&event, 0, sizeof(event));
1284 event.event = IW_CM_EVENT_DISCONNECT;
1285 if (ep->com.cm_id) {
a9a42886
JP
1286 pr_debug("peer close delivered ep %p cm_id %p tid %u\n",
1287 ep, ep->com.cm_id, ep->hwtid);
cfdda9d7 1288 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
793dad94 1289 set_bit(DISCONN_UPCALL, &ep->com.history);
cfdda9d7
SW
1290 }
1291}
1292
1293static void peer_abort_upcall(struct c4iw_ep *ep)
1294{
1295 struct iw_cm_event event;
1296
548ddb19 1297 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
1298 memset(&event, 0, sizeof(event));
1299 event.event = IW_CM_EVENT_CLOSE;
1300 event.status = -ECONNRESET;
1301 if (ep->com.cm_id) {
a9a42886
JP
1302 pr_debug("abort delivered ep %p cm_id %p tid %u\n", ep,
1303 ep->com.cm_id, ep->hwtid);
cfdda9d7 1304 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
9ca6f7cf 1305 deref_cm_id(&ep->com);
793dad94 1306 set_bit(ABORT_UPCALL, &ep->com.history);
cfdda9d7
SW
1307 }
1308}
1309
1310static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1311{
1312 struct iw_cm_event event;
1313
548ddb19
BP
1314 pr_debug("ep %p tid %u status %d\n",
1315 ep, ep->hwtid, status);
cfdda9d7
SW
1316 memset(&event, 0, sizeof(event));
1317 event.event = IW_CM_EVENT_CONNECT_REPLY;
1318 event.status = status;
24d44a39
SW
1319 memcpy(&event.local_addr, &ep->com.local_addr,
1320 sizeof(ep->com.local_addr));
1321 memcpy(&event.remote_addr, &ep->com.remote_addr,
1322 sizeof(ep->com.remote_addr));
cfdda9d7
SW
1323
1324 if ((status == 0) || (status == -ECONNREFUSED)) {
d2fe99e8
KS
1325 if (!ep->tried_with_mpa_v1) {
1326 /* this means MPA_v2 is used */
158c776d
H
1327 event.ord = ep->ird;
1328 event.ird = ep->ord;
d2fe99e8
KS
1329 event.private_data_len = ep->plen -
1330 sizeof(struct mpa_v2_conn_params);
1331 event.private_data = ep->mpa_pkt +
1332 sizeof(struct mpa_message) +
1333 sizeof(struct mpa_v2_conn_params);
1334 } else {
1335 /* this means MPA_v1 is used */
158c776d
H
1336 event.ord = cur_max_read_depth(ep->com.dev);
1337 event.ird = cur_max_read_depth(ep->com.dev);
d2fe99e8
KS
1338 event.private_data_len = ep->plen;
1339 event.private_data = ep->mpa_pkt +
1340 sizeof(struct mpa_message);
1341 }
cfdda9d7 1342 }
85963e4c 1343
548ddb19 1344 pr_debug("ep %p tid %u status %d\n", ep,
a9a42886 1345 ep->hwtid, status);
793dad94 1346 set_bit(CONN_RPL_UPCALL, &ep->com.history);
85963e4c
RD
1347 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1348
9ca6f7cf
H
1349 if (status < 0)
1350 deref_cm_id(&ep->com);
cfdda9d7
SW
1351}
1352
be13b2df 1353static int connect_request_upcall(struct c4iw_ep *ep)
cfdda9d7
SW
1354{
1355 struct iw_cm_event event;
be13b2df 1356 int ret;
cfdda9d7 1357
548ddb19 1358 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
1359 memset(&event, 0, sizeof(event));
1360 event.event = IW_CM_EVENT_CONNECT_REQUEST;
24d44a39
SW
1361 memcpy(&event.local_addr, &ep->com.local_addr,
1362 sizeof(ep->com.local_addr));
1363 memcpy(&event.remote_addr, &ep->com.remote_addr,
1364 sizeof(ep->com.remote_addr));
cfdda9d7 1365 event.provider_data = ep;
d2fe99e8
KS
1366 if (!ep->tried_with_mpa_v1) {
1367 /* this means MPA_v2 is used */
1368 event.ord = ep->ord;
1369 event.ird = ep->ird;
1370 event.private_data_len = ep->plen -
1371 sizeof(struct mpa_v2_conn_params);
1372 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1373 sizeof(struct mpa_v2_conn_params);
1374 } else {
1375 /* this means MPA_v1 is used. Send max supported */
4c2c5763
HS
1376 event.ord = cur_max_read_depth(ep->com.dev);
1377 event.ird = cur_max_read_depth(ep->com.dev);
d2fe99e8
KS
1378 event.private_data_len = ep->plen;
1379 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1380 }
be13b2df
SW
1381 c4iw_get_ep(&ep->com);
1382 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1383 &event);
1384 if (ret)
1385 c4iw_put_ep(&ep->com);
793dad94 1386 set_bit(CONNREQ_UPCALL, &ep->com.history);
cfdda9d7 1387 c4iw_put_ep(&ep->parent_ep->com);
be13b2df 1388 return ret;
cfdda9d7
SW
1389}
1390
1391static void established_upcall(struct c4iw_ep *ep)
1392{
1393 struct iw_cm_event event;
1394
548ddb19 1395 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
1396 memset(&event, 0, sizeof(event));
1397 event.event = IW_CM_EVENT_ESTABLISHED;
3dd9a5dc
H
1398 event.ird = ep->ord;
1399 event.ord = ep->ird;
cfdda9d7 1400 if (ep->com.cm_id) {
548ddb19 1401 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7 1402 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
793dad94 1403 set_bit(ESTAB_UPCALL, &ep->com.history);
cfdda9d7
SW
1404 }
1405}
1406
1407static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1408{
cfdda9d7 1409 struct sk_buff *skb;
6e3b6fc2
VP
1410 u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
1411 u32 credit_dack;
cfdda9d7 1412
548ddb19
BP
1413 pr_debug("ep %p tid %u credits %u\n",
1414 ep, ep->hwtid, credits);
cfdda9d7
SW
1415 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1416 if (!skb) {
700456bd 1417 pr_err("update_rx_credits - cannot alloc skb!\n");
cfdda9d7
SW
1418 return 0;
1419 }
1420
b408ff28
HS
1421 /*
1422 * If we couldn't specify the entire rcv window at connection setup
1423 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1424 * then add the overage in to the credits returned.
1425 */
d7990b0c
AB
1426 if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
1427 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
b408ff28 1428
6e3b6fc2
VP
1429 credit_dack = credits | RX_FORCE_ACK_F | RX_DACK_CHANGE_F |
1430 RX_DACK_MODE_V(dack_mode);
1431
1432 cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx,
1433 credit_dack);
1434
cfdda9d7
SW
1435 c4iw_ofld_send(&ep->com.dev->rdev, skb);
1436 return credits;
1437}
1438
4c2c5763
HS
1439#define RELAXED_IRD_NEGOTIATION 1
1440
f8e1e1d1
H
1441/*
1442 * process_mpa_reply - process streaming mode MPA reply
1443 *
1444 * Returns:
1445 *
1446 * 0 upon success indicating a connect request was delivered to the ULP
1447 * or the mpa request is incomplete but valid so far.
1448 *
1449 * 1 if a failure requires the caller to close the connection.
1450 *
1451 * 2 if a failure requires the caller to abort the connection.
1452 */
cc18b939 1453static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
cfdda9d7
SW
1454{
1455 struct mpa_message *mpa;
d2fe99e8 1456 struct mpa_v2_conn_params *mpa_v2_params;
cfdda9d7 1457 u16 plen;
d2fe99e8
KS
1458 u16 resp_ird, resp_ord;
1459 u8 rtr_mismatch = 0, insuff_ird = 0;
cfdda9d7
SW
1460 struct c4iw_qp_attributes attrs;
1461 enum c4iw_qp_attr_mask mask;
1462 int err;
cc18b939 1463 int disconnect = 0;
cfdda9d7 1464
548ddb19 1465 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7 1466
cfdda9d7
SW
1467 /*
1468 * If we get more than the supported amount of private data
1469 * then we must fail this connection.
1470 */
1471 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1472 err = -EINVAL;
da1cecdf 1473 goto err_stop_timer;
cfdda9d7
SW
1474 }
1475
1476 /*
1477 * copy the new data into our accumulation buffer.
1478 */
1479 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1480 skb->len);
1481 ep->mpa_pkt_len += skb->len;
1482
1483 /*
1484 * if we don't even have the mpa message, then bail.
1485 */
1486 if (ep->mpa_pkt_len < sizeof(*mpa))
cc18b939 1487 return 0;
cfdda9d7
SW
1488 mpa = (struct mpa_message *) ep->mpa_pkt;
1489
1490 /* Validate MPA header. */
d2fe99e8 1491 if (mpa->revision > mpa_rev) {
700456bd
JP
1492 pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
1493 __func__, mpa_rev, mpa->revision);
cfdda9d7 1494 err = -EPROTO;
da1cecdf 1495 goto err_stop_timer;
cfdda9d7
SW
1496 }
1497 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1498 err = -EPROTO;
da1cecdf 1499 goto err_stop_timer;
cfdda9d7
SW
1500 }
1501
1502 plen = ntohs(mpa->private_data_size);
1503
1504 /*
1505 * Fail if there's too much private data.
1506 */
1507 if (plen > MPA_MAX_PRIVATE_DATA) {
1508 err = -EPROTO;
da1cecdf 1509 goto err_stop_timer;
cfdda9d7
SW
1510 }
1511
1512 /*
1513 * If plen does not account for pkt size
1514 */
1515 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1516 err = -EPROTO;
da1cecdf 1517 goto err_stop_timer;
cfdda9d7
SW
1518 }
1519
1520 ep->plen = (u8) plen;
1521
1522 /*
1523 * If we don't have all the pdata yet, then bail.
1524 * We'll continue process when more data arrives.
1525 */
1526 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
cc18b939 1527 return 0;
cfdda9d7
SW
1528
1529 if (mpa->flags & MPA_REJECT) {
1530 err = -ECONNREFUSED;
da1cecdf 1531 goto err_stop_timer;
cfdda9d7
SW
1532 }
1533
da1cecdf
H
1534 /*
1535 * Stop mpa timer. If it expired, then
1536 * we ignore the MPA reply. process_timeout()
1537 * will abort the connection.
1538 */
1539 if (stop_ep_timer(ep))
1540 return 0;
1541
cfdda9d7
SW
1542 /*
1543 * If we get here we have accumulated the entire mpa
1544 * start reply message including private data. And
1545 * the MPA header is valid.
1546 */
c529fb50 1547 __state_set(&ep->com, FPDU_MODE);
cfdda9d7 1548 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
cfdda9d7 1549 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
d2fe99e8
KS
1550 ep->mpa_attr.version = mpa->revision;
1551 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1552
1553 if (mpa->revision == 2) {
1554 ep->mpa_attr.enhanced_rdma_conn =
1555 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1556 if (ep->mpa_attr.enhanced_rdma_conn) {
1557 mpa_v2_params = (struct mpa_v2_conn_params *)
1558 (ep->mpa_pkt + sizeof(*mpa));
1559 resp_ird = ntohs(mpa_v2_params->ird) &
1560 MPA_V2_IRD_ORD_MASK;
1561 resp_ord = ntohs(mpa_v2_params->ord) &
1562 MPA_V2_IRD_ORD_MASK;
548ddb19 1563 pr_debug("responder ird %u ord %u ep ird %u ord %u\n",
a9a42886 1564 resp_ird, resp_ord, ep->ird, ep->ord);
d2fe99e8
KS
1565
1566 /*
1567 * This is a double-check. Ideally, below checks are
1568 * not required since ird/ord stuff has been taken
1569 * care of in c4iw_accept_cr
1570 */
4c2c5763
HS
1571 if (ep->ird < resp_ord) {
1572 if (RELAXED_IRD_NEGOTIATION && resp_ord <=
1573 ep->com.dev->rdev.lldi.max_ordird_qp)
1574 ep->ird = resp_ord;
1575 else
1576 insuff_ird = 1;
1577 } else if (ep->ird > resp_ord) {
1578 ep->ird = resp_ord;
1579 }
1580 if (ep->ord > resp_ird) {
1581 if (RELAXED_IRD_NEGOTIATION)
1582 ep->ord = resp_ird;
1583 else
1584 insuff_ird = 1;
1585 }
1586 if (insuff_ird) {
d2fe99e8
KS
1587 err = -ENOMEM;
1588 ep->ird = resp_ord;
1589 ep->ord = resp_ird;
d2fe99e8
KS
1590 }
1591
1592 if (ntohs(mpa_v2_params->ird) &
1593 MPA_V2_PEER2PEER_MODEL) {
1594 if (ntohs(mpa_v2_params->ord) &
1595 MPA_V2_RDMA_WRITE_RTR)
1596 ep->mpa_attr.p2p_type =
1597 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1598 else if (ntohs(mpa_v2_params->ord) &
1599 MPA_V2_RDMA_READ_RTR)
1600 ep->mpa_attr.p2p_type =
1601 FW_RI_INIT_P2PTYPE_READ_REQ;
1602 }
1603 }
1604 } else if (mpa->revision == 1)
1605 if (peer2peer)
1606 ep->mpa_attr.p2p_type = p2p_type;
1607
548ddb19
BP
1608 pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
1609 ep->mpa_attr.crc_enabled,
a9a42886
JP
1610 ep->mpa_attr.recv_marker_enabled,
1611 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1612 ep->mpa_attr.p2p_type, p2p_type);
d2fe99e8
KS
1613
1614 /*
1615 * If responder's RTR does not match with that of initiator, assign
1616 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1617 * generated when moving QP to RTS state.
1618 * A TERM message will be sent after QP has moved to RTS state
1619 */
91018f86 1620 if ((ep->mpa_attr.version == 2) && peer2peer &&
d2fe99e8
KS
1621 (ep->mpa_attr.p2p_type != p2p_type)) {
1622 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1623 rtr_mismatch = 1;
1624 }
cfdda9d7
SW
1625
1626 attrs.mpa_attr = ep->mpa_attr;
1627 attrs.max_ird = ep->ird;
1628 attrs.max_ord = ep->ord;
1629 attrs.llp_stream_handle = ep;
1630 attrs.next_state = C4IW_QP_STATE_RTS;
1631
1632 mask = C4IW_QP_ATTR_NEXT_STATE |
1633 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1634 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1635
1636 /* bind QP and TID with INIT_WR */
1637 err = c4iw_modify_qp(ep->com.qp->rhp,
1638 ep->com.qp, mask, &attrs, 1);
1639 if (err)
1640 goto err;
d2fe99e8
KS
1641
1642 /*
1643 * If responder's RTR requirement did not match with what initiator
1644 * supports, generate TERM message
1645 */
1646 if (rtr_mismatch) {
700456bd 1647 pr_err("%s: RTR mismatch, sending TERM\n", __func__);
d2fe99e8
KS
1648 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1649 attrs.ecode = MPA_NOMATCH_RTR;
1650 attrs.next_state = C4IW_QP_STATE_TERMINATE;
cc18b939 1651 attrs.send_term = 1;
d2fe99e8 1652 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
cc18b939 1653 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
d2fe99e8 1654 err = -ENOMEM;
cc18b939 1655 disconnect = 1;
d2fe99e8
KS
1656 goto out;
1657 }
1658
1659 /*
1660 * Generate TERM if initiator IRD is not sufficient for responder
1661 * provided ORD. Currently, we do the same behaviour even when
1662 * responder provided IRD is also not sufficient as regards to
1663 * initiator ORD.
1664 */
1665 if (insuff_ird) {
700456bd 1666 pr_err("%s: Insufficient IRD, sending TERM\n", __func__);
d2fe99e8
KS
1667 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1668 attrs.ecode = MPA_INSUFF_IRD;
1669 attrs.next_state = C4IW_QP_STATE_TERMINATE;
cc18b939 1670 attrs.send_term = 1;
d2fe99e8 1671 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
cc18b939 1672 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
d2fe99e8 1673 err = -ENOMEM;
cc18b939 1674 disconnect = 1;
d2fe99e8
KS
1675 goto out;
1676 }
cfdda9d7 1677 goto out;
da1cecdf
H
1678err_stop_timer:
1679 stop_ep_timer(ep);
cfdda9d7 1680err:
f8e1e1d1 1681 disconnect = 2;
cfdda9d7
SW
1682out:
1683 connect_reply_upcall(ep, err);
cc18b939 1684 return disconnect;
cfdda9d7
SW
1685}
1686
fd6aabe4
H
1687/*
1688 * process_mpa_request - process streaming mode MPA request
1689 *
1690 * Returns:
1691 *
1692 * 0 upon success indicating a connect request was delivered to the ULP
1693 * or the mpa request is incomplete but valid so far.
1694 *
1695 * 1 if a failure requires the caller to close the connection.
1696 *
1697 * 2 if a failure requires the caller to abort the connection.
1698 */
1699static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
cfdda9d7
SW
1700{
1701 struct mpa_message *mpa;
d2fe99e8 1702 struct mpa_v2_conn_params *mpa_v2_params;
cfdda9d7
SW
1703 u16 plen;
1704
548ddb19 1705 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7 1706
cfdda9d7
SW
1707 /*
1708 * If we get more than the supported amount of private data
1709 * then we must fail this connection.
1710 */
fd6aabe4
H
1711 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
1712 goto err_stop_timer;
cfdda9d7 1713
548ddb19 1714 pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
cfdda9d7
SW
1715
1716 /*
1717 * Copy the new data into our accumulation buffer.
1718 */
1719 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1720 skb->len);
1721 ep->mpa_pkt_len += skb->len;
1722
1723 /*
1724 * If we don't even have the mpa message, then bail.
1725 * We'll continue process when more data arrives.
1726 */
1727 if (ep->mpa_pkt_len < sizeof(*mpa))
fd6aabe4 1728 return 0;
cfdda9d7 1729
548ddb19 1730 pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
cfdda9d7
SW
1731 mpa = (struct mpa_message *) ep->mpa_pkt;
1732
1733 /*
1734 * Validate MPA Header.
1735 */
d2fe99e8 1736 if (mpa->revision > mpa_rev) {
700456bd
JP
1737 pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
1738 __func__, mpa_rev, mpa->revision);
fd6aabe4 1739 goto err_stop_timer;
cfdda9d7
SW
1740 }
1741
fd6aabe4
H
1742 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
1743 goto err_stop_timer;
cfdda9d7
SW
1744
1745 plen = ntohs(mpa->private_data_size);
1746
1747 /*
1748 * Fail if there's too much private data.
1749 */
fd6aabe4
H
1750 if (plen > MPA_MAX_PRIVATE_DATA)
1751 goto err_stop_timer;
cfdda9d7
SW
1752
1753 /*
1754 * If plen does not account for pkt size
1755 */
fd6aabe4
H
1756 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
1757 goto err_stop_timer;
cfdda9d7
SW
1758 ep->plen = (u8) plen;
1759
1760 /*
1761 * If we don't have all the pdata yet, then bail.
1762 */
1763 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
fd6aabe4 1764 return 0;
cfdda9d7
SW
1765
1766 /*
1767 * If we get here we have accumulated the entire mpa
1768 * start reply message including private data.
1769 */
1770 ep->mpa_attr.initiator = 0;
1771 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1772 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1773 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
d2fe99e8
KS
1774 ep->mpa_attr.version = mpa->revision;
1775 if (mpa->revision == 1)
1776 ep->tried_with_mpa_v1 = 1;
1777 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1778
1779 if (mpa->revision == 2) {
1780 ep->mpa_attr.enhanced_rdma_conn =
1781 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1782 if (ep->mpa_attr.enhanced_rdma_conn) {
1783 mpa_v2_params = (struct mpa_v2_conn_params *)
1784 (ep->mpa_pkt + sizeof(*mpa));
1785 ep->ird = ntohs(mpa_v2_params->ird) &
1786 MPA_V2_IRD_ORD_MASK;
7f446abf
SW
1787 ep->ird = min_t(u32, ep->ird,
1788 cur_max_read_depth(ep->com.dev));
d2fe99e8
KS
1789 ep->ord = ntohs(mpa_v2_params->ord) &
1790 MPA_V2_IRD_ORD_MASK;
7f446abf
SW
1791 ep->ord = min_t(u32, ep->ord,
1792 cur_max_read_depth(ep->com.dev));
548ddb19
BP
1793 pr_debug("initiator ird %u ord %u\n",
1794 ep->ird, ep->ord);
d2fe99e8
KS
1795 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1796 if (peer2peer) {
1797 if (ntohs(mpa_v2_params->ord) &
1798 MPA_V2_RDMA_WRITE_RTR)
1799 ep->mpa_attr.p2p_type =
1800 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1801 else if (ntohs(mpa_v2_params->ord) &
1802 MPA_V2_RDMA_READ_RTR)
1803 ep->mpa_attr.p2p_type =
1804 FW_RI_INIT_P2PTYPE_READ_REQ;
1805 }
1806 }
1807 } else if (mpa->revision == 1)
1808 if (peer2peer)
1809 ep->mpa_attr.p2p_type = p2p_type;
1810
548ddb19 1811 pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
a9a42886
JP
1812 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1813 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1814 ep->mpa_attr.p2p_type);
cfdda9d7 1815
e4b76a2a
H
1816 __state_set(&ep->com, MPA_REQ_RCVD);
1817
1818 /* drive upcall */
1819 mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING);
1820 if (ep->parent_ep->com.state != DEAD) {
1821 if (connect_request_upcall(ep))
fd6aabe4 1822 goto err_unlock_parent;
e4b76a2a
H
1823 } else {
1824 goto err_unlock_parent;
be13b2df 1825 }
e4b76a2a 1826 mutex_unlock(&ep->parent_ep->com.mutex);
fd6aabe4
H
1827 return 0;
1828
1829err_unlock_parent:
1830 mutex_unlock(&ep->parent_ep->com.mutex);
1831 goto err_out;
1832err_stop_timer:
1833 (void)stop_ep_timer(ep);
1834err_out:
1835 return 2;
cfdda9d7
SW
1836}
1837
1838static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1839{
1840 struct c4iw_ep *ep;
1841 struct cpl_rx_data *hdr = cplhdr(skb);
1842 unsigned int dlen = ntohs(hdr->len);
1843 unsigned int tid = GET_TID(hdr);
793dad94 1844 __u8 status = hdr->status;
cc18b939 1845 int disconnect = 0;
cfdda9d7 1846
944661dd 1847 ep = get_ep_from_tid(dev, tid);
977116c6
SW
1848 if (!ep)
1849 return 0;
548ddb19 1850 pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen);
cfdda9d7
SW
1851 skb_pull(skb, sizeof(*hdr));
1852 skb_trim(skb, dlen);
c529fb50 1853 mutex_lock(&ep->com.mutex);
cfdda9d7 1854
c529fb50 1855 switch (ep->com.state) {
cfdda9d7 1856 case MPA_REQ_SENT:
3bcf96e0 1857 update_rx_credits(ep, dlen);
55abf8df 1858 ep->rcv_seq += dlen;
cc18b939 1859 disconnect = process_mpa_reply(ep, skb);
cfdda9d7
SW
1860 break;
1861 case MPA_REQ_WAIT:
3bcf96e0 1862 update_rx_credits(ep, dlen);
55abf8df 1863 ep->rcv_seq += dlen;
4a4dd8db 1864 disconnect = process_mpa_request(ep, skb);
cfdda9d7 1865 break;
1557967b
VP
1866 case FPDU_MODE: {
1867 struct c4iw_qp_attributes attrs;
3bcf96e0
SW
1868
1869 update_rx_credits(ep, dlen);
e8e5b927 1870 if (status)
1557967b 1871 pr_err("%s Unexpected streaming data." \
04236df2
VP
1872 " qpid %u ep %p state %d tid %u status %d\n",
1873 __func__, ep->com.qp->wq.sq.qid, ep,
c529fb50 1874 ep->com.state, ep->hwtid, status);
97d7ec0c 1875 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1557967b 1876 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
cc18b939
SW
1877 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1878 disconnect = 1;
cfdda9d7
SW
1879 break;
1880 }
1557967b
VP
1881 default:
1882 break;
1883 }
c529fb50 1884 mutex_unlock(&ep->com.mutex);
cc18b939 1885 if (disconnect)
4a4dd8db 1886 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
944661dd 1887 c4iw_put_ep(&ep->com);
cfdda9d7
SW
1888 return 0;
1889}
1890
11a27e21 1891static void complete_cached_srq_buffers(struct c4iw_ep *ep, u32 srqidx)
6a0b6174
RR
1892{
1893 enum chip_type adapter_type;
6a0b6174
RR
1894
1895 adapter_type = ep->com.dev->rdev.lldi.adapter_type;
6a0b6174
RR
1896
1897 /*
1898 * If this TCB had a srq buffer cached, then we must complete
1899 * it. For user mode, that means saving the srqidx in the
1900 * user/kernel status page for this qp. For kernel mode, just
1901 * synthesize the CQE now.
1902 */
1903 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T5 && srqidx) {
1904 if (ep->com.qp->ibqp.uobject)
1905 t4_set_wq_in_error(&ep->com.qp->wq, srqidx);
1906 else
1907 c4iw_flush_srqidx(ep->com.qp, srqidx);
1908 }
1909}
1910
cfdda9d7
SW
1911static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1912{
11a27e21 1913 u32 srqidx;
cfdda9d7 1914 struct c4iw_ep *ep;
6a0b6174 1915 struct cpl_abort_rpl_rss6 *rpl = cplhdr(skb);
cfdda9d7
SW
1916 int release = 0;
1917 unsigned int tid = GET_TID(rpl);
cfdda9d7 1918
944661dd 1919 ep = get_ep_from_tid(dev, tid);
4984037b 1920 if (!ep) {
700456bd 1921 pr_warn("Abort rpl to freed endpoint\n");
4984037b
VP
1922 return 0;
1923 }
6a0b6174 1924
11a27e21
RR
1925 if (ep->com.qp && ep->com.qp->srq) {
1926 srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(rpl->srqidx_status));
1927 complete_cached_srq_buffers(ep, srqidx ? srqidx : ep->srqe_idx);
1928 }
6a0b6174 1929
548ddb19 1930 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
2f5b48c3 1931 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
1932 switch (ep->com.state) {
1933 case ABORTING:
2015f26c 1934 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
cfdda9d7
SW
1935 __state_set(&ep->com, DEAD);
1936 release = 1;
1937 break;
1938 default:
700456bd 1939 pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
cfdda9d7
SW
1940 break;
1941 }
2f5b48c3 1942 mutex_unlock(&ep->com.mutex);
cfdda9d7 1943
f368ff18
RR
1944 if (release) {
1945 close_complete_upcall(ep, -ECONNRESET);
cfdda9d7 1946 release_ep_resources(ep);
f368ff18 1947 }
944661dd 1948 c4iw_put_ep(&ep->com);
cfdda9d7
SW
1949 return 0;
1950}
1951
caa6c9f2 1952static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
5be78ee9
VP
1953{
1954 struct sk_buff *skb;
1955 struct fw_ofld_connection_wr *req;
1956 unsigned int mtu_idx;
cc516700 1957 u32 wscale;
830662f6 1958 struct sockaddr_in *sin;
b408ff28 1959 int win;
5be78ee9
VP
1960
1961 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
de77b966 1962 req = __skb_put_zero(skb, sizeof(*req));
6c53e938 1963 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
e2ac9628 1964 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
41b4f86c
KS
1965 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1966 ep->com.dev->rdev.lldi.ports[0],
5be78ee9 1967 ep->l2t));
170003c8 1968 sin = (struct sockaddr_in *)&ep->com.local_addr;
830662f6
VP
1969 req->le.lport = sin->sin_port;
1970 req->le.u.ipv4.lip = sin->sin_addr.s_addr;
170003c8 1971 sin = (struct sockaddr_in *)&ep->com.remote_addr;
830662f6
VP
1972 req->le.pport = sin->sin_port;
1973 req->le.u.ipv4.pip = sin->sin_addr.s_addr;
5be78ee9 1974 req->tcb.t_state_to_astid =
77a80e23
HS
1975 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) |
1976 FW_OFLD_CONNECTION_WR_ASTID_V(atid));
5be78ee9 1977 req->tcb.cplrxdataack_cplpassacceptrpl =
77a80e23 1978 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
ef5d6355 1979 req->tcb.tx_max = (__force __be32) jiffies;
793dad94 1980 req->tcb.rcv_adv = htons(1);
44c6d069
VP
1981 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
1982 enable_tcp_timestamps,
1983 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
cc516700 1984 wscale = cxgb_compute_wscale(rcv_win);
b408ff28
HS
1985
1986 /*
1987 * Specify the largest window that will fit in opt0. The
1988 * remainder will be specified in the rx_data_ack.
1989 */
1990 win = ep->rcv_win >> 10;
d7990b0c
AB
1991 if (win > RCV_BUFSIZ_M)
1992 win = RCV_BUFSIZ_M;
b408ff28 1993
6c53e938
HS
1994 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
1995 (nocong ? NO_CONG_F : 0) |
d7990b0c 1996 KEEP_ALIVE_F |
6c53e938 1997 DELACK_F |
d7990b0c
AB
1998 WND_SCALE_V(wscale) |
1999 MSS_IDX_V(mtu_idx) |
2000 L2T_IDX_V(ep->l2t->idx) |
2001 TX_CHAN_V(ep->tx_chan) |
2002 SMAC_SEL_V(ep->smac_idx) |
ac8e4c69 2003 DSCP_V(ep->tos >> 2) |
d7990b0c
AB
2004 ULP_MODE_V(ULP_MODE_TCPDDP) |
2005 RCV_BUFSIZ_V(win));
6c53e938
HS
2006 req->tcb.opt2 = (__force __be32) (PACE_V(1) |
2007 TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
d7990b0c 2008 RX_CHANNEL_V(0) |
6c53e938 2009 CCTRL_ECN_V(enable_ecn) |
d7990b0c 2010 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
5be78ee9 2011 if (enable_tcp_timestamps)
6c53e938 2012 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
5be78ee9 2013 if (enable_tcp_sack)
6c53e938 2014 req->tcb.opt2 |= (__force __be32)SACK_EN_F;
5be78ee9 2015 if (wscale && enable_tcp_window_scaling)
d7990b0c
AB
2016 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
2017 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
2018 req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
793dad94
VP
2019 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
2020 set_bit(ACT_OFLD_CONN, &ep->com.history);
caa6c9f2 2021 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
5be78ee9
VP
2022}
2023
cfdda9d7 2024/*
4c72efef
H
2025 * Some of the error codes above implicitly indicate that there is no TID
2026 * allocated with the result of an ACT_OPEN. We use this predicate to make
2027 * that explicit.
cfdda9d7
SW
2028 */
2029static inline int act_open_has_tid(int status)
2030{
4c72efef
H
2031 return (status != CPL_ERR_TCAM_PARITY &&
2032 status != CPL_ERR_TCAM_MISS &&
2033 status != CPL_ERR_TCAM_FULL &&
2034 status != CPL_ERR_CONN_EXIST_SYNRECV &&
2035 status != CPL_ERR_CONN_EXIST);
cfdda9d7
SW
2036}
2037
dd92b124
HS
2038static char *neg_adv_str(unsigned int status)
2039{
2040 switch (status) {
2041 case CPL_ERR_RTX_NEG_ADVICE:
2042 return "Retransmit timeout";
2043 case CPL_ERR_PERSIST_NEG_ADVICE:
2044 return "Persist timeout";
2045 case CPL_ERR_KEEPALV_NEG_ADVICE:
2046 return "Keepalive timeout";
2047 default:
2048 return "Unknown";
2049 }
2050}
2051
b408ff28
HS
2052static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
2053{
2054 ep->snd_win = snd_win;
2055 ep->rcv_win = rcv_win;
548ddb19
BP
2056 pr_debug("snd_win %d rcv_win %d\n",
2057 ep->snd_win, ep->rcv_win);
b408ff28
HS
2058}
2059
793dad94
VP
2060#define ACT_OPEN_RETRY_COUNT 2
2061
830662f6
VP
2062static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
2063 struct dst_entry *dst, struct c4iw_dev *cdev,
ac8e4c69 2064 bool clear_mpa_v1, enum chip_type adapter_type, u8 tos)
830662f6
VP
2065{
2066 struct neighbour *n;
2067 int err, step;
2068 struct net_device *pdev;
2069
2070 n = dst_neigh_lookup(dst, peer_ip);
2071 if (!n)
2072 return -ENODEV;
2073
2074 rcu_read_lock();
2075 err = -ENOMEM;
2076 if (n->dev->flags & IFF_LOOPBACK) {
2077 if (iptype == 4)
2078 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
2079 else if (IS_ENABLED(CONFIG_IPV6))
2080 for_each_netdev(&init_net, pdev) {
2081 if (ipv6_chk_addr(&init_net,
2082 (struct in6_addr *)peer_ip,
2083 pdev, 1))
2084 break;
2085 }
2086 else
2087 pdev = NULL;
2088
2089 if (!pdev) {
2090 err = -ENODEV;
2091 goto out;
2092 }
2093 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
ac8e4c69 2094 n, pdev, rt_tos2priority(tos));
609e941a
SW
2095 if (!ep->l2t) {
2096 dev_put(pdev);
830662f6 2097 goto out;
609e941a 2098 }
830662f6
VP
2099 ep->mtu = pdev->mtu;
2100 ep->tx_chan = cxgb4_port_chan(pdev);
02d805dc 2101 ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
830662f6
VP
2102 step = cdev->rdev.lldi.ntxq /
2103 cdev->rdev.lldi.nchan;
2104 ep->txq_idx = cxgb4_port_idx(pdev) * step;
2105 step = cdev->rdev.lldi.nrxq /
2106 cdev->rdev.lldi.nchan;
2107 ep->ctrlq_idx = cxgb4_port_idx(pdev);
2108 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
2109 cxgb4_port_idx(pdev) * step];
b408ff28 2110 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
830662f6
VP
2111 dev_put(pdev);
2112 } else {
2113 pdev = get_real_dev(n->dev);
2114 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
cb3ba0bd 2115 n, pdev, rt_tos2priority(tos));
830662f6
VP
2116 if (!ep->l2t)
2117 goto out;
2118 ep->mtu = dst_mtu(dst);
11b8e22d 2119 ep->tx_chan = cxgb4_port_chan(pdev);
02d805dc 2120 ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
830662f6
VP
2121 step = cdev->rdev.lldi.ntxq /
2122 cdev->rdev.lldi.nchan;
11b8e22d
SW
2123 ep->txq_idx = cxgb4_port_idx(pdev) * step;
2124 ep->ctrlq_idx = cxgb4_port_idx(pdev);
830662f6
VP
2125 step = cdev->rdev.lldi.nrxq /
2126 cdev->rdev.lldi.nchan;
2127 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
11b8e22d 2128 cxgb4_port_idx(pdev) * step];
b408ff28 2129 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
830662f6
VP
2130
2131 if (clear_mpa_v1) {
2132 ep->retry_with_mpa_v1 = 0;
2133 ep->tried_with_mpa_v1 = 0;
2134 }
2135 }
2136 err = 0;
2137out:
2138 rcu_read_unlock();
2139
2140 neigh_release(n);
2141
2142 return err;
2143}
2144
793dad94
VP
2145static int c4iw_reconnect(struct c4iw_ep *ep)
2146{
2147 int err = 0;
4a740838 2148 int size = 0;
24d44a39 2149 struct sockaddr_in *laddr = (struct sockaddr_in *)
170003c8 2150 &ep->com.cm_id->m_local_addr;
24d44a39 2151 struct sockaddr_in *raddr = (struct sockaddr_in *)
170003c8 2152 &ep->com.cm_id->m_remote_addr;
830662f6 2153 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
170003c8 2154 &ep->com.cm_id->m_local_addr;
830662f6 2155 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
170003c8 2156 &ep->com.cm_id->m_remote_addr;
830662f6
VP
2157 int iptype;
2158 __u8 *ra;
793dad94 2159
548ddb19 2160 pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id);
ef885dc6 2161 c4iw_init_wr_wait(ep->com.wr_waitp);
793dad94 2162
4a740838
H
2163 /* When MPA revision is different on nodes, the node with MPA_rev=2
2164 * tries to reconnect with MPA_rev 1 for the same EP through
2165 * c4iw_reconnect(), where the same EP is assigned with new tid for
2166 * further connection establishment. As we are using the same EP pointer
2167 * for reconnect, few skbs are used during the previous c4iw_connect(),
2168 * which leaves the EP with inadequate skbs for further
ba97b749 2169 * c4iw_reconnect(), Further causing a crash due to an empty
4a740838
H
2170 * skb_list() during peer_abort(). Allocate skbs which is already used.
2171 */
2172 size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
2173 if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) {
2174 err = -ENOMEM;
2175 goto fail1;
2176 }
2177
793dad94
VP
2178 /*
2179 * Allocate an active TID to initiate a TCP connection.
2180 */
2181 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
2182 if (ep->atid == -1) {
700456bd 2183 pr_err("%s - cannot alloc atid\n", __func__);
793dad94
VP
2184 err = -ENOMEM;
2185 goto fail2;
2186 }
2187 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
2188
2189 /* find a route */
170003c8 2190 if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
804c2f3e
VP
2191 ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev,
2192 laddr->sin_addr.s_addr,
2193 raddr->sin_addr.s_addr,
2194 laddr->sin_port,
2195 raddr->sin_port, ep->com.cm_id->tos);
830662f6
VP
2196 iptype = 4;
2197 ra = (__u8 *)&raddr->sin_addr;
2198 } else {
95554761
VP
2199 ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi,
2200 get_real_dev,
2201 laddr6->sin6_addr.s6_addr,
2202 raddr6->sin6_addr.s6_addr,
2203 laddr6->sin6_port,
c8a7eb55
SW
2204 raddr6->sin6_port,
2205 ep->com.cm_id->tos,
95554761 2206 raddr6->sin6_scope_id);
830662f6
VP
2207 iptype = 6;
2208 ra = (__u8 *)&raddr6->sin6_addr;
2209 }
2210 if (!ep->dst) {
700456bd 2211 pr_err("%s - cannot find route\n", __func__);
793dad94
VP
2212 err = -EHOSTUNREACH;
2213 goto fail3;
2214 }
963cab50 2215 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
ac8e4c69
H
2216 ep->com.dev->rdev.lldi.adapter_type,
2217 ep->com.cm_id->tos);
830662f6 2218 if (err) {
700456bd 2219 pr_err("%s - cannot alloc l2e\n", __func__);
793dad94
VP
2220 goto fail4;
2221 }
2222
548ddb19
BP
2223 pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2224 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
a9a42886 2225 ep->l2t->idx);
793dad94
VP
2226
2227 state_set(&ep->com, CONNECTING);
ac8e4c69 2228 ep->tos = ep->com.cm_id->tos;
793dad94
VP
2229
2230 /* send connect request to rnic */
2231 err = send_connect(ep);
2232 if (!err)
2233 goto out;
2234
2235 cxgb4_l2t_release(ep->l2t);
2236fail4:
2237 dst_release(ep->dst);
2238fail3:
2239 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
2240 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2241fail2:
2242 /*
2243 * remember to send notification to upper layer.
2244 * We are in here so the upper layer is not aware that this is
2245 * re-connect attempt and so, upper layer is still waiting for
2246 * response of 1st connect request.
2247 */
2248 connect_reply_upcall(ep, -ECONNRESET);
4a740838 2249fail1:
793dad94
VP
2250 c4iw_put_ep(&ep->com);
2251out:
2252 return err;
2253}
2254
cfdda9d7
SW
2255static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2256{
2257 struct c4iw_ep *ep;
2258 struct cpl_act_open_rpl *rpl = cplhdr(skb);
6c53e938
HS
2259 unsigned int atid = TID_TID_G(AOPEN_ATID_G(
2260 ntohl(rpl->atid_status)));
cfdda9d7 2261 struct tid_info *t = dev->rdev.lldi.tids;
6c53e938 2262 int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
830662f6
VP
2263 struct sockaddr_in *la;
2264 struct sockaddr_in *ra;
2265 struct sockaddr_in6 *la6;
2266 struct sockaddr_in6 *ra6;
caa6c9f2 2267 int ret = 0;
cfdda9d7
SW
2268
2269 ep = lookup_atid(t, atid);
170003c8
SW
2270 la = (struct sockaddr_in *)&ep->com.local_addr;
2271 ra = (struct sockaddr_in *)&ep->com.remote_addr;
2272 la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
2273 ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
cfdda9d7 2274
548ddb19 2275 pr_debug("ep %p atid %u status %u errno %d\n", ep, atid,
a9a42886 2276 status, status2errno(status));
cfdda9d7 2277
b65eef0a 2278 if (cxgb_is_neg_adv(status)) {
548ddb19
BP
2279 pr_debug("Connection problems for atid %u status %u (%s)\n",
2280 atid, status, neg_adv_str(status));
179d03bb
H
2281 ep->stats.connect_neg_adv++;
2282 mutex_lock(&dev->rdev.stats.lock);
2283 dev->rdev.stats.neg_adv++;
2284 mutex_unlock(&dev->rdev.stats.lock);
cfdda9d7
SW
2285 return 0;
2286 }
2287
793dad94
VP
2288 set_bit(ACT_OPEN_RPL, &ep->com.history);
2289
d716a2a0
VP
2290 /*
2291 * Log interesting failures.
2292 */
2293 switch (status) {
2294 case CPL_ERR_CONN_RESET:
2295 case CPL_ERR_CONN_TIMEDOUT:
2296 break;
5be78ee9 2297 case CPL_ERR_TCAM_FULL:
830662f6 2298 mutex_lock(&dev->rdev.stats.lock);
3b174d94 2299 dev->rdev.stats.tcam_full++;
830662f6
VP
2300 mutex_unlock(&dev->rdev.stats.lock);
2301 if (ep->com.local_addr.ss_family == AF_INET &&
2302 dev->rdev.lldi.enable_fw_ofld_conn) {
caa6c9f2
H
2303 ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G(
2304 ntohl(rpl->atid_status))));
2305 if (ret)
2306 goto fail;
793dad94
VP
2307 return 0;
2308 }
2309 break;
2310 case CPL_ERR_CONN_EXIST:
2311 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2312 set_bit(ACT_RETRY_INUSE, &ep->com.history);
84cc6ac6
H
2313 if (ep->com.remote_addr.ss_family == AF_INET6) {
2314 struct sockaddr_in6 *sin6 =
2315 (struct sockaddr_in6 *)
170003c8 2316 &ep->com.local_addr;
84cc6ac6
H
2317 cxgb4_clip_release(
2318 ep->com.dev->rdev.lldi.ports[0],
2319 (const u32 *)
2320 &sin6->sin6_addr.s6_addr, 1);
2321 }
793dad94
VP
2322 remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
2323 atid);
2324 cxgb4_free_atid(t, atid);
2325 dst_release(ep->dst);
2326 cxgb4_l2t_release(ep->l2t);
2327 c4iw_reconnect(ep);
2328 return 0;
2329 }
5be78ee9 2330 break;
d716a2a0 2331 default:
830662f6
VP
2332 if (ep->com.local_addr.ss_family == AF_INET) {
2333 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
2334 atid, status, status2errno(status),
2335 &la->sin_addr.s_addr, ntohs(la->sin_port),
2336 &ra->sin_addr.s_addr, ntohs(ra->sin_port));
2337 } else {
2338 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
2339 atid, status, status2errno(status),
2340 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
2341 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
2342 }
d716a2a0
VP
2343 break;
2344 }
2345
caa6c9f2 2346fail:
cfdda9d7
SW
2347 connect_reply_upcall(ep, status2errno(status));
2348 state_set(&ep->com, DEAD);
2349
84cc6ac6
H
2350 if (ep->com.remote_addr.ss_family == AF_INET6) {
2351 struct sockaddr_in6 *sin6 =
170003c8 2352 (struct sockaddr_in6 *)&ep->com.local_addr;
84cc6ac6
H
2353 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
2354 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
2355 }
cfdda9d7 2356 if (status && act_open_has_tid(status))
1dec4cec
GG
2357 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl),
2358 ep->com.local_addr.ss_family);
cfdda9d7 2359
793dad94 2360 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
cfdda9d7
SW
2361 cxgb4_free_atid(t, atid);
2362 dst_release(ep->dst);
2363 cxgb4_l2t_release(ep->l2t);
2364 c4iw_put_ep(&ep->com);
2365
2366 return 0;
2367}
2368
2369static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2370{
2371 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
cfdda9d7 2372 unsigned int stid = GET_TID(rpl);
f86fac79 2373 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
cfdda9d7
SW
2374
2375 if (!ep) {
4d45b757 2376 pr_warn("%s stid %d lookup failure!\n", __func__, stid);
1cab775c 2377 goto out;
cfdda9d7 2378 }
548ddb19 2379 pr_debug("ep %p status %d error %d\n", ep,
a9a42886 2380 rpl->status, status2errno(rpl->status));
2015f26c 2381 c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
f86fac79 2382 c4iw_put_ep(&ep->com);
1cab775c 2383out:
cfdda9d7
SW
2384 return 0;
2385}
2386
cfdda9d7
SW
2387static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2388{
2389 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
cfdda9d7 2390 unsigned int stid = GET_TID(rpl);
f86fac79 2391 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
cfdda9d7 2392
3c8415cc 2393 if (!ep) {
4d45b757 2394 pr_warn("%s stid %d lookup failure!\n", __func__, stid);
3c8415cc
SW
2395 goto out;
2396 }
548ddb19 2397 pr_debug("ep %p\n", ep);
2015f26c 2398 c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
f86fac79 2399 c4iw_put_ep(&ep->com);
3c8415cc 2400out:
cfdda9d7
SW
2401 return 0;
2402}
2403
9dec900c
H
2404static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2405 struct cpl_pass_accept_req *req)
cfdda9d7
SW
2406{
2407 struct cpl_pass_accept_rpl *rpl;
2408 unsigned int mtu_idx;
2409 u64 opt0;
2410 u32 opt2;
cc516700 2411 u32 wscale;
92e7ae71 2412 struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
b408ff28 2413 int win;
963cab50 2414 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
cfdda9d7 2415
548ddb19 2416 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
92e7ae71 2417
cfdda9d7 2418 skb_get(skb);
92e7ae71 2419 rpl = cplhdr(skb);
963cab50 2420 if (!is_t4(adapter_type)) {
92e7ae71
HS
2421 skb_trim(skb, roundup(sizeof(*rpl5), 16));
2422 rpl5 = (void *)rpl;
2423 INIT_TP_WR(rpl5, ep->hwtid);
2424 } else {
2425 skb_trim(skb, sizeof(*rpl));
2426 INIT_TP_WR(rpl, ep->hwtid);
2427 }
2428 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
2429 ep->hwtid));
2430
44c6d069
VP
2431 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
2432 enable_tcp_timestamps && req->tcpopt.tstamp,
2433 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
cc516700 2434 wscale = cxgb_compute_wscale(rcv_win);
b408ff28
HS
2435
2436 /*
2437 * Specify the largest window that will fit in opt0. The
2438 * remainder will be specified in the rx_data_ack.
2439 */
2440 win = ep->rcv_win >> 10;
d7990b0c
AB
2441 if (win > RCV_BUFSIZ_M)
2442 win = RCV_BUFSIZ_M;
6c53e938 2443 opt0 = (nocong ? NO_CONG_F : 0) |
d7990b0c 2444 KEEP_ALIVE_F |
6c53e938 2445 DELACK_F |
d7990b0c
AB
2446 WND_SCALE_V(wscale) |
2447 MSS_IDX_V(mtu_idx) |
2448 L2T_IDX_V(ep->l2t->idx) |
2449 TX_CHAN_V(ep->tx_chan) |
2450 SMAC_SEL_V(ep->smac_idx) |
6c53e938 2451 DSCP_V(ep->tos >> 2) |
d7990b0c
AB
2452 ULP_MODE_V(ULP_MODE_TCPDDP) |
2453 RCV_BUFSIZ_V(win);
2454 opt2 = RX_CHANNEL_V(0) |
2455 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
cfdda9d7
SW
2456
2457 if (enable_tcp_timestamps && req->tcpopt.tstamp)
6c53e938 2458 opt2 |= TSTAMPS_EN_F;
cfdda9d7 2459 if (enable_tcp_sack && req->tcpopt.sack)
6c53e938 2460 opt2 |= SACK_EN_F;
cfdda9d7 2461 if (wscale && enable_tcp_window_scaling)
d7990b0c 2462 opt2 |= WND_SCALE_EN_F;
5be78ee9
VP
2463 if (enable_ecn) {
2464 const struct tcphdr *tcph;
2465 u32 hlen = ntohl(req->hdr_len);
2466
963cab50
H
2467 if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5)
2468 tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
2469 IP_HDR_LEN_G(hlen);
2470 else
2471 tcph = (const void *)(req + 1) +
2472 T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen);
5be78ee9 2473 if (tcph->ece && tcph->cwr)
6c53e938 2474 opt2 |= CCTRL_ECN_V(1);
5be78ee9 2475 }
963cab50 2476 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
92e7ae71 2477 u32 isn = (prandom_u32() & ~7UL) - 1;
d7990b0c 2478 opt2 |= T5_OPT_2_VALID_F;
cf7fe64a 2479 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
0b741047 2480 opt2 |= T5_ISS_F;
92e7ae71
HS
2481 rpl5 = (void *)rpl;
2482 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
2483 if (peer2peer)
2484 isn += 4;
2485 rpl5->iss = cpu_to_be32(isn);
548ddb19 2486 pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
92e5011a 2487 }
cfdda9d7 2488
cfdda9d7
SW
2489 rpl->opt0 = cpu_to_be64(opt0);
2490 rpl->opt2 = cpu_to_be32(opt2);
d4f1a5c6 2491 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
9dec900c 2492 t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure);
cfdda9d7 2493
9dec900c 2494 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
cfdda9d7
SW
2495}
2496
830662f6 2497static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
cfdda9d7 2498{
548ddb19 2499 pr_debug("c4iw_dev %p tid %u\n", dev, hwtid);
cfdda9d7 2500 skb_trim(skb, sizeof(struct cpl_tid_release));
cfdda9d7
SW
2501 release_tid(&dev->rdev, hwtid, skb);
2502 return;
2503}
2504
cfdda9d7
SW
2505static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2506{
793dad94 2507 struct c4iw_ep *child_ep = NULL, *parent_ep;
cfdda9d7 2508 struct cpl_pass_accept_req *req = cplhdr(skb);
6c53e938 2509 unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
cfdda9d7
SW
2510 struct tid_info *t = dev->rdev.lldi.tids;
2511 unsigned int hwtid = GET_TID(req);
2512 struct dst_entry *dst;
830662f6 2513 __u8 local_ip[16], peer_ip[16];
cfdda9d7 2514 __be16 local_port, peer_port;
84cc6ac6 2515 struct sockaddr_in6 *sin6;
3786cf18 2516 int err;
1cab775c 2517 u16 peer_mss = ntohs(req->tcpopt.mss);
830662f6 2518 int iptype;
92e7ae71 2519 unsigned short hdrs;
7235ea22 2520 u8 tos;
cfdda9d7 2521
f86fac79 2522 parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
1cab775c 2523 if (!parent_ep) {
4d45b757
BP
2524 pr_err("%s connect request on invalid stid %d\n",
2525 __func__, stid);
1cab775c
VP
2526 goto reject;
2527 }
1cab775c 2528
cfdda9d7 2529 if (state_read(&parent_ep->com) != LISTEN) {
4d45b757 2530 pr_err("%s - listening ep not in LISTEN\n", __func__);
cfdda9d7
SW
2531 goto reject;
2532 }
2533
7235ea22
SW
2534 if (parent_ep->com.cm_id->tos_set)
2535 tos = parent_ep->com.cm_id->tos;
2536 else
2537 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
2538
85e42b04
VP
2539 cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
2540 &iptype, local_ip, peer_ip, &local_port, &peer_port);
830662f6 2541
cfdda9d7 2542 /* Find output route */
830662f6 2543 if (iptype == 4) {
548ddb19
BP
2544 pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2545 , parent_ep, hwtid,
a9a42886
JP
2546 local_ip, peer_ip, ntohs(local_port),
2547 ntohs(peer_port), peer_mss);
804c2f3e
VP
2548 dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
2549 *(__be32 *)local_ip, *(__be32 *)peer_ip,
2550 local_port, peer_port, tos);
830662f6 2551 } else {
548ddb19
BP
2552 pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2553 , parent_ep, hwtid,
a9a42886
JP
2554 local_ip, peer_ip, ntohs(local_port),
2555 ntohs(peer_port), peer_mss);
95554761
VP
2556 dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
2557 local_ip, peer_ip, local_port, peer_port,
7235ea22 2558 tos,
95554761
VP
2559 ((struct sockaddr_in6 *)
2560 &parent_ep->com.local_addr)->sin6_scope_id);
830662f6
VP
2561 }
2562 if (!dst) {
700456bd 2563 pr_err("%s - failed to find dst entry!\n", __func__);
cfdda9d7
SW
2564 goto reject;
2565 }
3786cf18
DM
2566
2567 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
2568 if (!child_ep) {
700456bd 2569 pr_err("%s - failed to allocate ep entry!\n", __func__);
cfdda9d7
SW
2570 dst_release(dst);
2571 goto reject;
2572 }
2573
963cab50 2574 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
ac8e4c69 2575 parent_ep->com.dev->rdev.lldi.adapter_type, tos);
3786cf18 2576 if (err) {
700456bd 2577 pr_err("%s - failed to allocate l2t entry!\n", __func__);
cfdda9d7 2578 dst_release(dst);
3786cf18 2579 kfree(child_ep);
cfdda9d7
SW
2580 goto reject;
2581 }
3786cf18 2582
98b80a2a
RR
2583 hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
2584 sizeof(struct tcphdr) +
92e7ae71
HS
2585 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
2586 if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
2587 child_ep->mtu = peer_mss + hdrs;
1cab775c 2588
4a740838
H
2589 skb_queue_head_init(&child_ep->com.ep_skb_list);
2590 if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF))
2591 goto fail;
2592
cfdda9d7
SW
2593 state_set(&child_ep->com, CONNECTING);
2594 child_ep->com.dev = dev;
2595 child_ep->com.cm_id = NULL;
5b6b8fe6 2596
830662f6
VP
2597 if (iptype == 4) {
2598 struct sockaddr_in *sin = (struct sockaddr_in *)
170003c8 2599 &child_ep->com.local_addr;
5b6b8fe6 2600
b462b06e 2601 sin->sin_family = AF_INET;
830662f6
VP
2602 sin->sin_port = local_port;
2603 sin->sin_addr.s_addr = *(__be32 *)local_ip;
5b6b8fe6
SW
2604
2605 sin = (struct sockaddr_in *)&child_ep->com.local_addr;
b462b06e 2606 sin->sin_family = AF_INET;
5b6b8fe6
SW
2607 sin->sin_port = ((struct sockaddr_in *)
2608 &parent_ep->com.local_addr)->sin_port;
2609 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2610
170003c8 2611 sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
b462b06e 2612 sin->sin_family = AF_INET;
830662f6
VP
2613 sin->sin_port = peer_port;
2614 sin->sin_addr.s_addr = *(__be32 *)peer_ip;
2615 } else {
170003c8 2616 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
830662f6
VP
2617 sin6->sin6_family = PF_INET6;
2618 sin6->sin6_port = local_port;
2619 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
5b6b8fe6
SW
2620
2621 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2622 sin6->sin6_family = PF_INET6;
2623 sin6->sin6_port = ((struct sockaddr_in6 *)
2624 &parent_ep->com.local_addr)->sin6_port;
2625 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2626
170003c8 2627 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
830662f6
VP
2628 sin6->sin6_family = PF_INET6;
2629 sin6->sin6_port = peer_port;
2630 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
2631 }
5b6b8fe6 2632
cfdda9d7
SW
2633 c4iw_get_ep(&parent_ep->com);
2634 child_ep->parent_ep = parent_ep;
ac8e4c69 2635 child_ep->tos = tos;
cfdda9d7
SW
2636 child_ep->dst = dst;
2637 child_ep->hwtid = hwtid;
cfdda9d7 2638
548ddb19 2639 pr_debug("tx_chan %u smac_idx %u rss_qid %u\n",
a9a42886 2640 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
cfdda9d7 2641
a9346abe 2642 timer_setup(&child_ep->timer, ep_timeout, 0);
1dec4cec
GG
2643 cxgb4_insert_tid(t, child_ep, hwtid,
2644 child_ep->com.local_addr.ss_family);
944661dd 2645 insert_ep_tid(child_ep);
9dec900c
H
2646 if (accept_cr(child_ep, skb, req)) {
2647 c4iw_put_ep(&parent_ep->com);
2648 release_ep_resources(child_ep);
2649 } else {
2650 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
2651 }
84cc6ac6 2652 if (iptype == 6) {
170003c8 2653 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
84cc6ac6
H
2654 cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
2655 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
2656 }
cfdda9d7 2657 goto out;
4a740838
H
2658fail:
2659 c4iw_put_ep(&child_ep->com);
cfdda9d7 2660reject:
830662f6 2661 reject_cr(dev, hwtid, skb);
3d318605 2662out:
f86fac79
H
2663 if (parent_ep)
2664 c4iw_put_ep(&parent_ep->com);
cfdda9d7
SW
2665 return 0;
2666}
2667
2668static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
2669{
2670 struct c4iw_ep *ep;
2671 struct cpl_pass_establish *req = cplhdr(skb);
cfdda9d7 2672 unsigned int tid = GET_TID(req);
fef4422d 2673 int ret;
2e51e45c 2674 u16 tcp_opt = ntohs(req->tcp_opt);
cfdda9d7 2675
944661dd 2676 ep = get_ep_from_tid(dev, tid);
548ddb19 2677 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
2678 ep->snd_seq = be32_to_cpu(req->snd_isn);
2679 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2e51e45c 2680 ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
cfdda9d7 2681
2e51e45c 2682 pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid, tcp_opt);
1cab775c 2683
2e51e45c 2684 set_emss(ep, tcp_opt);
cfdda9d7
SW
2685
2686 dst_confirm(ep->dst);
fef4422d
H
2687 mutex_lock(&ep->com.mutex);
2688 ep->com.state = MPA_REQ_WAIT;
cfdda9d7 2689 start_ep_timer(ep);
793dad94 2690 set_bit(PASS_ESTAB, &ep->com.history);
4a740838 2691 ret = send_flowc(ep);
fef4422d
H
2692 mutex_unlock(&ep->com.mutex);
2693 if (ret)
2694 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
944661dd 2695 c4iw_put_ep(&ep->com);
cfdda9d7
SW
2696
2697 return 0;
2698}
2699
2700static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
2701{
2702 struct cpl_peer_close *hdr = cplhdr(skb);
2703 struct c4iw_ep *ep;
2704 struct c4iw_qp_attributes attrs;
cfdda9d7
SW
2705 int disconnect = 1;
2706 int release = 0;
cfdda9d7 2707 unsigned int tid = GET_TID(hdr);
8da7e7a5 2708 int ret;
cfdda9d7 2709
944661dd
H
2710 ep = get_ep_from_tid(dev, tid);
2711 if (!ep)
2712 return 0;
2713
548ddb19 2714 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
2715 dst_confirm(ep->dst);
2716
793dad94 2717 set_bit(PEER_CLOSE, &ep->com.history);
2f5b48c3 2718 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
2719 switch (ep->com.state) {
2720 case MPA_REQ_WAIT:
2721 __state_set(&ep->com, CLOSING);
2722 break;
2723 case MPA_REQ_SENT:
2724 __state_set(&ep->com, CLOSING);
2725 connect_reply_upcall(ep, -ECONNRESET);
2726 break;
2727 case MPA_REQ_RCVD:
2728
2729 /*
2730 * We're gonna mark this puppy DEAD, but keep
2731 * the reference on it until the ULP accepts or
2732 * rejects the CR. Also wake up anyone waiting
2733 * in rdma connection migration (see c4iw_accept_cr()).
2734 */
2735 __state_set(&ep->com, CLOSING);
a9a42886 2736 pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
2015f26c 2737 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
cfdda9d7
SW
2738 break;
2739 case MPA_REP_SENT:
2740 __state_set(&ep->com, CLOSING);
a9a42886 2741 pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
2015f26c 2742 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
cfdda9d7
SW
2743 break;
2744 case FPDU_MODE:
ca5a2202 2745 start_ep_timer(ep);
cfdda9d7 2746 __state_set(&ep->com, CLOSING);
30c95c2d 2747 attrs.next_state = C4IW_QP_STATE_CLOSING;
8da7e7a5 2748 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
30c95c2d 2749 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
8da7e7a5
SW
2750 if (ret != -ECONNRESET) {
2751 peer_close_upcall(ep);
2752 disconnect = 1;
2753 }
cfdda9d7
SW
2754 break;
2755 case ABORTING:
2756 disconnect = 0;
2757 break;
2758 case CLOSING:
2759 __state_set(&ep->com, MORIBUND);
2760 disconnect = 0;
2761 break;
2762 case MORIBUND:
b33bd0cb 2763 (void)stop_ep_timer(ep);
cfdda9d7
SW
2764 if (ep->com.cm_id && ep->com.qp) {
2765 attrs.next_state = C4IW_QP_STATE_IDLE;
2766 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2767 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2768 }
be13b2df 2769 close_complete_upcall(ep, 0);
cfdda9d7
SW
2770 __state_set(&ep->com, DEAD);
2771 release = 1;
2772 disconnect = 0;
2773 break;
2774 case DEAD:
2775 disconnect = 0;
2776 break;
2777 default:
ba97b749 2778 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
cfdda9d7 2779 }
2f5b48c3 2780 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
2781 if (disconnect)
2782 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2783 if (release)
2784 release_ep_resources(ep);
944661dd 2785 c4iw_put_ep(&ep->com);
cfdda9d7
SW
2786 return 0;
2787}
2788
11a27e21
RR
2789static void finish_peer_abort(struct c4iw_dev *dev, struct c4iw_ep *ep)
2790{
2791 complete_cached_srq_buffers(ep, ep->srqe_idx);
2792 if (ep->com.cm_id && ep->com.qp) {
2793 struct c4iw_qp_attributes attrs;
2794
2795 attrs.next_state = C4IW_QP_STATE_ERROR;
2796 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2797 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2798 }
2799 peer_abort_upcall(ep);
2800 release_ep_resources(ep);
2801 c4iw_put_ep(&ep->com);
2802}
2803
cfdda9d7
SW
2804static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2805{
6a0b6174 2806 struct cpl_abort_req_rss6 *req = cplhdr(skb);
cfdda9d7 2807 struct c4iw_ep *ep;
cfdda9d7
SW
2808 struct sk_buff *rpl_skb;
2809 struct c4iw_qp_attributes attrs;
2810 int ret;
2811 int release = 0;
cfdda9d7 2812 unsigned int tid = GET_TID(req);
6a0b6174 2813 u8 status;
11a27e21 2814 u32 srqidx;
6a0b6174 2815
052f4731 2816 u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
cfdda9d7 2817
944661dd
H
2818 ep = get_ep_from_tid(dev, tid);
2819 if (!ep)
2820 return 0;
2821
6a0b6174
RR
2822 status = ABORT_RSS_STATUS_G(be32_to_cpu(req->srqidx_status));
2823
2824 if (cxgb_is_neg_adv(status)) {
f48fca4d 2825 pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
6a0b6174 2826 ep->hwtid, status, neg_adv_str(status));
179d03bb
H
2827 ep->stats.abort_neg_adv++;
2828 mutex_lock(&dev->rdev.stats.lock);
2829 dev->rdev.stats.neg_adv++;
2830 mutex_unlock(&dev->rdev.stats.lock);
944661dd 2831 goto deref_ep;
cfdda9d7 2832 }
6a0b6174 2833
548ddb19 2834 pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
a9a42886 2835 ep->com.state);
793dad94 2836 set_bit(PEER_ABORT, &ep->com.history);
2f5b48c3
SW
2837
2838 /*
2839 * Wake up any threads in rdma_init() or rdma_fini().
d2fe99e8
KS
2840 * However, this is not needed if com state is just
2841 * MPA_REQ_SENT
2f5b48c3 2842 */
d2fe99e8 2843 if (ep->com.state != MPA_REQ_SENT)
2015f26c 2844 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
2f5b48c3
SW
2845
2846 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
2847 switch (ep->com.state) {
2848 case CONNECTING:
9dec900c 2849 c4iw_put_ep(&ep->parent_ep->com);
cfdda9d7
SW
2850 break;
2851 case MPA_REQ_WAIT:
b33bd0cb 2852 (void)stop_ep_timer(ep);
cfdda9d7
SW
2853 break;
2854 case MPA_REQ_SENT:
b33bd0cb 2855 (void)stop_ep_timer(ep);
9828ca65
SW
2856 if (status != CPL_ERR_CONN_RESET || mpa_rev == 1 ||
2857 (mpa_rev == 2 && ep->tried_with_mpa_v1))
d2fe99e8
KS
2858 connect_reply_upcall(ep, -ECONNRESET);
2859 else {
2860 /*
2861 * we just don't send notification upwards because we
2862 * want to retry with mpa_v1 without upper layers even
2863 * knowing it.
2864 *
2865 * do some housekeeping so as to re-initiate the
2866 * connection
2867 */
4d45b757
BP
2868 pr_info("%s: mpa_rev=%d. Retrying with mpav1\n",
2869 __func__, mpa_rev);
d2fe99e8
KS
2870 ep->retry_with_mpa_v1 = 1;
2871 }
cfdda9d7
SW
2872 break;
2873 case MPA_REP_SENT:
cfdda9d7
SW
2874 break;
2875 case MPA_REQ_RCVD:
cfdda9d7
SW
2876 break;
2877 case MORIBUND:
2878 case CLOSING:
ca5a2202 2879 stop_ep_timer(ep);
cfdda9d7
SW
2880 /*FALLTHROUGH*/
2881 case FPDU_MODE:
11a27e21
RR
2882 if (ep->com.qp && ep->com.qp->srq) {
2883 srqidx = ABORT_RSS_SRQIDX_G(
2884 be32_to_cpu(req->srqidx_status));
2885 if (srqidx) {
2886 complete_cached_srq_buffers(ep,
2887 req->srqidx_status);
2888 } else {
2889 /* Hold ep ref until finish_peer_abort() */
2890 c4iw_get_ep(&ep->com);
2891 __state_set(&ep->com, ABORTING);
2892 set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags);
2893 read_tcb(ep);
2894 break;
2895
2896 }
2897 }
2898
cfdda9d7
SW
2899 if (ep->com.cm_id && ep->com.qp) {
2900 attrs.next_state = C4IW_QP_STATE_ERROR;
2901 ret = c4iw_modify_qp(ep->com.qp->rhp,
2902 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2903 &attrs, 1);
2904 if (ret)
700456bd 2905 pr_err("%s - qp <- error failed!\n", __func__);
cfdda9d7
SW
2906 }
2907 peer_abort_upcall(ep);
2908 break;
2909 case ABORTING:
2910 break;
2911 case DEAD:
4d45b757 2912 pr_warn("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
2f5b48c3 2913 mutex_unlock(&ep->com.mutex);
944661dd 2914 goto deref_ep;
cfdda9d7 2915 default:
ba97b749 2916 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
cfdda9d7
SW
2917 break;
2918 }
2919 dst_confirm(ep->dst);
2920 if (ep->com.state != ABORTING) {
2921 __state_set(&ep->com, DEAD);
d2fe99e8
KS
2922 /* we don't release if we want to retry with mpa_v1 */
2923 if (!ep->retry_with_mpa_v1)
2924 release = 1;
cfdda9d7 2925 }
2f5b48c3 2926 mutex_unlock(&ep->com.mutex);
cfdda9d7 2927
4a740838
H
2928 rpl_skb = skb_dequeue(&ep->com.ep_skb_list);
2929 if (WARN_ON(!rpl_skb)) {
cfdda9d7
SW
2930 release = 1;
2931 goto out;
2932 }
052f4731
VP
2933
2934 cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx);
2935
cfdda9d7
SW
2936 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2937out:
cfdda9d7
SW
2938 if (release)
2939 release_ep_resources(ep);
fe7e0a4d 2940 else if (ep->retry_with_mpa_v1) {
84cc6ac6
H
2941 if (ep->com.remote_addr.ss_family == AF_INET6) {
2942 struct sockaddr_in6 *sin6 =
2943 (struct sockaddr_in6 *)
170003c8 2944 &ep->com.local_addr;
84cc6ac6
H
2945 cxgb4_clip_release(
2946 ep->com.dev->rdev.lldi.ports[0],
2947 (const u32 *)&sin6->sin6_addr.s6_addr,
2948 1);
2949 }
fe7e0a4d 2950 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
1dec4cec
GG
2951 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
2952 ep->com.local_addr.ss_family);
d2fe99e8
KS
2953 dst_release(ep->dst);
2954 cxgb4_l2t_release(ep->l2t);
2955 c4iw_reconnect(ep);
2956 }
2957
944661dd
H
2958deref_ep:
2959 c4iw_put_ep(&ep->com);
2960 /* Dereferencing ep, referenced in peer_abort_intr() */
2961 c4iw_put_ep(&ep->com);
cfdda9d7
SW
2962 return 0;
2963}
2964
2965static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2966{
2967 struct c4iw_ep *ep;
2968 struct c4iw_qp_attributes attrs;
2969 struct cpl_close_con_rpl *rpl = cplhdr(skb);
cfdda9d7 2970 int release = 0;
cfdda9d7 2971 unsigned int tid = GET_TID(rpl);
cfdda9d7 2972
944661dd
H
2973 ep = get_ep_from_tid(dev, tid);
2974 if (!ep)
2975 return 0;
cfdda9d7 2976
548ddb19 2977 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7
SW
2978
2979 /* The cm_id may be null if we failed to connect */
2f5b48c3 2980 mutex_lock(&ep->com.mutex);
9ca6f7cf 2981 set_bit(CLOSE_CON_RPL, &ep->com.history);
cfdda9d7
SW
2982 switch (ep->com.state) {
2983 case CLOSING:
2984 __state_set(&ep->com, MORIBUND);
2985 break;
2986 case MORIBUND:
b33bd0cb 2987 (void)stop_ep_timer(ep);
cfdda9d7
SW
2988 if ((ep->com.cm_id) && (ep->com.qp)) {
2989 attrs.next_state = C4IW_QP_STATE_IDLE;
2990 c4iw_modify_qp(ep->com.qp->rhp,
2991 ep->com.qp,
2992 C4IW_QP_ATTR_NEXT_STATE,
2993 &attrs, 1);
2994 }
be13b2df 2995 close_complete_upcall(ep, 0);
cfdda9d7
SW
2996 __state_set(&ep->com, DEAD);
2997 release = 1;
2998 break;
2999 case ABORTING:
3000 case DEAD:
3001 break;
3002 default:
ba97b749 3003 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
cfdda9d7
SW
3004 break;
3005 }
2f5b48c3 3006 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
3007 if (release)
3008 release_ep_resources(ep);
944661dd 3009 c4iw_put_ep(&ep->com);
cfdda9d7
SW
3010 return 0;
3011}
3012
3013static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
3014{
0e42c1f4 3015 struct cpl_rdma_terminate *rpl = cplhdr(skb);
0e42c1f4
SW
3016 unsigned int tid = GET_TID(rpl);
3017 struct c4iw_ep *ep;
3018 struct c4iw_qp_attributes attrs;
cfdda9d7 3019
944661dd 3020 ep = get_ep_from_tid(dev, tid);
cfdda9d7 3021
3352976c
RR
3022 if (ep) {
3023 if (ep->com.qp) {
3024 pr_warn("TERM received tid %u qpid %u\n", tid,
3025 ep->com.qp->wq.sq.qid);
3026 attrs.next_state = C4IW_QP_STATE_TERMINATE;
3027 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
3028 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
3029 }
3030
3031 c4iw_put_ep(&ep->com);
0e42c1f4 3032 } else
700456bd 3033 pr_warn("TERM received tid %u no ep/qp\n", tid);
cfdda9d7 3034
cfdda9d7
SW
3035 return 0;
3036}
3037
3038/*
3039 * Upcall from the adapter indicating data has been transmitted.
3040 * For us its just the single MPA request or reply. We can now free
3041 * the skb holding the mpa message.
3042 */
3043static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
3044{
3045 struct c4iw_ep *ep;
3046 struct cpl_fw4_ack *hdr = cplhdr(skb);
3047 u8 credits = hdr->credits;
3048 unsigned int tid = GET_TID(hdr);
cfdda9d7
SW
3049
3050
944661dd
H
3051 ep = get_ep_from_tid(dev, tid);
3052 if (!ep)
3053 return 0;
548ddb19
BP
3054 pr_debug("ep %p tid %u credits %u\n",
3055 ep, ep->hwtid, credits);
cfdda9d7 3056 if (credits == 0) {
548ddb19
BP
3057 pr_debug("0 credit ack ep %p tid %u state %u\n",
3058 ep, ep->hwtid, state_read(&ep->com));
944661dd 3059 goto out;
cfdda9d7
SW
3060 }
3061
3062 dst_confirm(ep->dst);
3063 if (ep->mpa_skb) {
548ddb19
BP
3064 pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
3065 ep, ep->hwtid, state_read(&ep->com),
3066 ep->mpa_attr.initiator ? 1 : 0);
12eb5137 3067 mutex_lock(&ep->com.mutex);
cfdda9d7
SW
3068 kfree_skb(ep->mpa_skb);
3069 ep->mpa_skb = NULL;
e4b76a2a
H
3070 if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
3071 stop_ep_timer(ep);
3072 mutex_unlock(&ep->com.mutex);
cfdda9d7 3073 }
944661dd
H
3074out:
3075 c4iw_put_ep(&ep->com);
cfdda9d7
SW
3076 return 0;
3077}
3078
cfdda9d7
SW
3079int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
3080{
bce2841f 3081 int abort;
cfdda9d7 3082 struct c4iw_ep *ep = to_ep(cm_id);
bce2841f 3083
548ddb19 3084 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
cfdda9d7 3085
a7db89eb 3086 mutex_lock(&ep->com.mutex);
e8667a9b 3087 if (ep->com.state != MPA_REQ_RCVD) {
a7db89eb 3088 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
3089 c4iw_put_ep(&ep->com);
3090 return -ECONNRESET;
3091 }
793dad94 3092 set_bit(ULP_REJECT, &ep->com.history);
cfdda9d7 3093 if (mpa_rev == 0)
bce2841f
H
3094 abort = 1;
3095 else
3096 abort = send_mpa_reject(ep, pdata, pdata_len);
a7db89eb 3097 mutex_unlock(&ep->com.mutex);
bce2841f
H
3098
3099 stop_ep_timer(ep);
3100 c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
cfdda9d7
SW
3101 c4iw_put_ep(&ep->com);
3102 return 0;
3103}
3104
3105int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3106{
3107 int err;
3108 struct c4iw_qp_attributes attrs;
3109 enum c4iw_qp_attr_mask mask;
3110 struct c4iw_ep *ep = to_ep(cm_id);
3111 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
3112 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
eaf4c6d4 3113 int abort = 0;
cfdda9d7 3114
548ddb19 3115 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
a7db89eb
SW
3116
3117 mutex_lock(&ep->com.mutex);
e8667a9b 3118 if (ep->com.state != MPA_REQ_RCVD) {
cfdda9d7 3119 err = -ECONNRESET;
eaf4c6d4 3120 goto err_out;
cfdda9d7
SW
3121 }
3122
ba97b749
SW
3123 if (!qp) {
3124 err = -EINVAL;
3125 goto err_out;
3126 }
cfdda9d7 3127
793dad94 3128 set_bit(ULP_ACCEPT, &ep->com.history);
4c2c5763
HS
3129 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
3130 (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
cfdda9d7 3131 err = -EINVAL;
eaf4c6d4 3132 goto err_abort;
cfdda9d7
SW
3133 }
3134
d2fe99e8
KS
3135 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
3136 if (conn_param->ord > ep->ird) {
4c2c5763 3137 if (RELAXED_IRD_NEGOTIATION) {
30b03b15 3138 conn_param->ord = ep->ird;
4c2c5763
HS
3139 } else {
3140 ep->ird = conn_param->ird;
3141 ep->ord = conn_param->ord;
3142 send_mpa_reject(ep, conn_param->private_data,
3143 conn_param->private_data_len);
4c2c5763 3144 err = -ENOMEM;
eaf4c6d4 3145 goto err_abort;
4c2c5763 3146 }
d2fe99e8 3147 }
4c2c5763
HS
3148 if (conn_param->ird < ep->ord) {
3149 if (RELAXED_IRD_NEGOTIATION &&
3150 ep->ord <= h->rdev.lldi.max_ordird_qp) {
3151 conn_param->ird = ep->ord;
3152 } else {
d2fe99e8 3153 err = -ENOMEM;
eaf4c6d4 3154 goto err_abort;
d2fe99e8
KS
3155 }
3156 }
d2fe99e8 3157 }
cfdda9d7
SW
3158 ep->ird = conn_param->ird;
3159 ep->ord = conn_param->ord;
3160
4c2c5763 3161 if (ep->mpa_attr.version == 1) {
d2fe99e8
KS
3162 if (peer2peer && ep->ird == 0)
3163 ep->ird = 1;
4c2c5763
HS
3164 } else {
3165 if (peer2peer &&
3166 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
f57b780c 3167 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
4c2c5763
HS
3168 ep->ird = 1;
3169 }
cfdda9d7 3170
548ddb19 3171 pr_debug("ird %d ord %d\n", ep->ird, ep->ord);
cfdda9d7 3172
d2fe99e8 3173 ep->com.cm_id = cm_id;
9ca6f7cf 3174 ref_cm_id(&ep->com);
d2fe99e8 3175 ep->com.qp = qp;
325abead 3176 ref_qp(ep);
d2fe99e8 3177
cfdda9d7
SW
3178 /* bind QP to EP and move to RTS */
3179 attrs.mpa_attr = ep->mpa_attr;
3180 attrs.max_ird = ep->ird;
3181 attrs.max_ord = ep->ord;
3182 attrs.llp_stream_handle = ep;
3183 attrs.next_state = C4IW_QP_STATE_RTS;
3184
3185 /* bind QP and TID with INIT_WR */
3186 mask = C4IW_QP_ATTR_NEXT_STATE |
3187 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
3188 C4IW_QP_ATTR_MPA_ATTR |
3189 C4IW_QP_ATTR_MAX_IRD |
3190 C4IW_QP_ATTR_MAX_ORD;
3191
3192 err = c4iw_modify_qp(ep->com.qp->rhp,
3193 ep->com.qp, mask, &attrs, 1);
3194 if (err)
eaf4c6d4 3195 goto err_deref_cm_id;
e4b76a2a
H
3196
3197 set_bit(STOP_MPA_TIMER, &ep->com.flags);
cfdda9d7
SW
3198 err = send_mpa_reply(ep, conn_param->private_data,
3199 conn_param->private_data_len);
3200 if (err)
eaf4c6d4 3201 goto err_deref_cm_id;
cfdda9d7 3202
a7db89eb 3203 __state_set(&ep->com, FPDU_MODE);
cfdda9d7 3204 established_upcall(ep);
a7db89eb 3205 mutex_unlock(&ep->com.mutex);
cfdda9d7
SW
3206 c4iw_put_ep(&ep->com);
3207 return 0;
eaf4c6d4 3208err_deref_cm_id:
9ca6f7cf 3209 deref_cm_id(&ep->com);
eaf4c6d4
H
3210err_abort:
3211 abort = 1;
3212err_out:
a7db89eb 3213 mutex_unlock(&ep->com.mutex);
eaf4c6d4
H
3214 if (abort)
3215 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
cfdda9d7
SW
3216 c4iw_put_ep(&ep->com);
3217 return err;
3218}
3219
830662f6
VP
3220static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
3221{
3222 struct in_device *ind;
3223 int found = 0;
170003c8
SW
3224 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
3225 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
830662f6
VP
3226
3227 ind = in_dev_get(dev->rdev.lldi.ports[0]);
3228 if (!ind)
3229 return -EADDRNOTAVAIL;
3230 for_primary_ifa(ind) {
3231 laddr->sin_addr.s_addr = ifa->ifa_address;
3232 raddr->sin_addr.s_addr = ifa->ifa_address;
3233 found = 1;
3234 break;
3235 }
3236 endfor_ifa(ind);
3237 in_dev_put(ind);
3238 return found ? 0 : -EADDRNOTAVAIL;
3239}
3240
3241static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
3242 unsigned char banned_flags)
3243{
3244 struct inet6_dev *idev;
3245 int err = -EADDRNOTAVAIL;
3246
3247 rcu_read_lock();
3248 idev = __in6_dev_get(dev);
3249 if (idev != NULL) {
3250 struct inet6_ifaddr *ifp;
3251
3252 read_lock_bh(&idev->lock);
3253 list_for_each_entry(ifp, &idev->addr_list, if_list) {
3254 if (ifp->scope == IFA_LINK &&
3255 !(ifp->flags & banned_flags)) {
3256 memcpy(addr, &ifp->addr, 16);
3257 err = 0;
3258 break;
3259 }
3260 }
3261 read_unlock_bh(&idev->lock);
3262 }
3263 rcu_read_unlock();
3264 return err;
3265}
3266
3267static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
3268{
3269 struct in6_addr uninitialized_var(addr);
170003c8
SW
3270 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
3271 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
830662f6 3272
54b9a96f 3273 if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
830662f6
VP
3274 memcpy(la6->sin6_addr.s6_addr, &addr, 16);
3275 memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
3276 return 0;
3277 }
3278 return -EADDRNOTAVAIL;
3279}
3280
cfdda9d7
SW
3281int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3282{
cfdda9d7
SW
3283 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3284 struct c4iw_ep *ep;
3786cf18 3285 int err = 0;
9eccfe10
SW
3286 struct sockaddr_in *laddr;
3287 struct sockaddr_in *raddr;
3288 struct sockaddr_in6 *laddr6;
3289 struct sockaddr_in6 *raddr6;
830662f6
VP
3290 __u8 *ra;
3291 int iptype;
cfdda9d7 3292
4c2c5763
HS
3293 if ((conn_param->ord > cur_max_read_depth(dev)) ||
3294 (conn_param->ird > cur_max_read_depth(dev))) {
be4c9bad
RD
3295 err = -EINVAL;
3296 goto out;
3297 }
cfdda9d7
SW
3298 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3299 if (!ep) {
700456bd 3300 pr_err("%s - cannot alloc ep\n", __func__);
cfdda9d7
SW
3301 err = -ENOMEM;
3302 goto out;
3303 }
4a740838
H
3304
3305 skb_queue_head_init(&ep->com.ep_skb_list);
3306 if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) {
3307 err = -ENOMEM;
3308 goto fail1;
3309 }
3310
a9346abe 3311 timer_setup(&ep->timer, ep_timeout, 0);
cfdda9d7
SW
3312 ep->plen = conn_param->private_data_len;
3313 if (ep->plen)
3314 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
3315 conn_param->private_data, ep->plen);
3316 ep->ird = conn_param->ird;
3317 ep->ord = conn_param->ord;
3318
3319 if (peer2peer && ep->ord == 0)
3320 ep->ord = 1;
3321
cfdda9d7 3322 ep->com.cm_id = cm_id;
9ca6f7cf 3323 ref_cm_id(&ep->com);
b06f2efd 3324 cm_id->provider_data = ep;
9ca6f7cf 3325 ep->com.dev = dev;
cfdda9d7 3326 ep->com.qp = get_qhp(dev, conn_param->qpn);
830662f6 3327 if (!ep->com.qp) {
4d45b757 3328 pr_warn("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
830662f6 3329 err = -EINVAL;
4a740838 3330 goto fail2;
830662f6 3331 }
325abead 3332 ref_qp(ep);
548ddb19 3333 pr_debug("qpn 0x%x qp %p cm_id %p\n", conn_param->qpn,
a9a42886 3334 ep->com.qp, cm_id);
cfdda9d7
SW
3335
3336 /*
3337 * Allocate an active TID to initiate a TCP connection.
3338 */
3339 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
3340 if (ep->atid == -1) {
700456bd 3341 pr_err("%s - cannot alloc atid\n", __func__);
cfdda9d7 3342 err = -ENOMEM;
4a740838 3343 goto fail2;
cfdda9d7 3344 }
793dad94 3345 insert_handle(dev, &dev->atid_idr, ep, ep->atid);
cfdda9d7 3346
170003c8 3347 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
9eccfe10 3348 sizeof(ep->com.local_addr));
170003c8 3349 memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
9eccfe10
SW
3350 sizeof(ep->com.remote_addr));
3351
170003c8
SW
3352 laddr = (struct sockaddr_in *)&ep->com.local_addr;
3353 raddr = (struct sockaddr_in *)&ep->com.remote_addr;
3354 laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr;
3355 raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr;
9eccfe10 3356
170003c8 3357 if (cm_id->m_remote_addr.ss_family == AF_INET) {
830662f6
VP
3358 iptype = 4;
3359 ra = (__u8 *)&raddr->sin_addr;
cfdda9d7 3360
830662f6
VP
3361 /*
3362 * Handle loopback requests to INADDR_ANY.
3363 */
ba987e51 3364 if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
830662f6
VP
3365 err = pick_local_ipaddrs(dev, cm_id);
3366 if (err)
4a740838 3367 goto fail2;
830662f6
VP
3368 }
3369
3370 /* find a route */
548ddb19
BP
3371 pr_debug("saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
3372 &laddr->sin_addr, ntohs(laddr->sin_port),
a9a42886 3373 ra, ntohs(raddr->sin_port));
804c2f3e
VP
3374 ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
3375 laddr->sin_addr.s_addr,
3376 raddr->sin_addr.s_addr,
3377 laddr->sin_port,
3378 raddr->sin_port, cm_id->tos);
830662f6
VP
3379 } else {
3380 iptype = 6;
3381 ra = (__u8 *)&raddr6->sin6_addr;
3382
3383 /*
3384 * Handle loopback requests to INADDR_ANY.
3385 */
3386 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
3387 err = pick_local_ip6addrs(dev, cm_id);
3388 if (err)
4a740838 3389 goto fail2;
830662f6
VP
3390 }
3391
3392 /* find a route */
548ddb19
BP
3393 pr_debug("saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
3394 laddr6->sin6_addr.s6_addr,
a9a42886
JP
3395 ntohs(laddr6->sin6_port),
3396 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
95554761
VP
3397 ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
3398 laddr6->sin6_addr.s6_addr,
3399 raddr6->sin6_addr.s6_addr,
3400 laddr6->sin6_port,
c8a7eb55 3401 raddr6->sin6_port, cm_id->tos,
95554761 3402 raddr6->sin6_scope_id);
830662f6
VP
3403 }
3404 if (!ep->dst) {
700456bd 3405 pr_err("%s - cannot find route\n", __func__);
cfdda9d7 3406 err = -EHOSTUNREACH;
4a740838 3407 goto fail3;
cfdda9d7 3408 }
cfdda9d7 3409
963cab50 3410 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
ac8e4c69 3411 ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
3786cf18 3412 if (err) {
700456bd 3413 pr_err("%s - cannot alloc l2e\n", __func__);
4a740838 3414 goto fail4;
cfdda9d7
SW
3415 }
3416
548ddb19
BP
3417 pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
3418 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
a9a42886 3419 ep->l2t->idx);
cfdda9d7
SW
3420
3421 state_set(&ep->com, CONNECTING);
ac8e4c69 3422 ep->tos = cm_id->tos;
cfdda9d7
SW
3423
3424 /* send connect request to rnic */
3425 err = send_connect(ep);
3426 if (!err)
3427 goto out;
3428
3429 cxgb4_l2t_release(ep->l2t);
4a740838 3430fail4:
9eccfe10 3431 dst_release(ep->dst);
4a740838 3432fail3:
793dad94 3433 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
cfdda9d7 3434 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
4a740838
H
3435fail2:
3436 skb_queue_purge(&ep->com.ep_skb_list);
9ca6f7cf 3437 deref_cm_id(&ep->com);
4a740838 3438fail1:
cfdda9d7
SW
3439 c4iw_put_ep(&ep->com);
3440out:
3441 return err;
3442}
3443
830662f6
VP
3444static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3445{
3446 int err;
9eccfe10 3447 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
170003c8 3448 &ep->com.local_addr;
830662f6 3449
28de1f74
H
3450 if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) {
3451 err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
3452 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3453 if (err)
3454 return err;
3455 }
ef885dc6 3456 c4iw_init_wr_wait(ep->com.wr_waitp);
830662f6
VP
3457 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
3458 ep->stid, &sin6->sin6_addr,
3459 sin6->sin6_port,
3460 ep->com.dev->rdev.lldi.rxq_ids[0]);
3461 if (!err)
3462 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
ef885dc6 3463 ep->com.wr_waitp,
830662f6 3464 0, 0, __func__);
e6b11163
H
3465 else if (err > 0)
3466 err = net_xmit_errno(err);
28de1f74
H
3467 if (err) {
3468 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3469 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
830662f6
VP
3470 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
3471 err, ep->stid,
3472 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
28de1f74 3473 }
830662f6
VP
3474 return err;
3475}
3476
3477static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3478{
3479 int err;
9eccfe10 3480 struct sockaddr_in *sin = (struct sockaddr_in *)
170003c8 3481 &ep->com.local_addr;
830662f6
VP
3482
3483 if (dev->rdev.lldi.enable_fw_ofld_conn) {
3484 do {
3485 err = cxgb4_create_server_filter(
3486 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3487 sin->sin_addr.s_addr, sin->sin_port, 0,
3488 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
3489 if (err == -EBUSY) {
99718e59
H
3490 if (c4iw_fatal_error(&ep->com.dev->rdev)) {
3491 err = -EIO;
3492 break;
3493 }
830662f6
VP
3494 set_current_state(TASK_UNINTERRUPTIBLE);
3495 schedule_timeout(usecs_to_jiffies(100));
3496 }
3497 } while (err == -EBUSY);
3498 } else {
ef885dc6 3499 c4iw_init_wr_wait(ep->com.wr_waitp);
830662f6
VP
3500 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
3501 ep->stid, sin->sin_addr.s_addr, sin->sin_port,
3502 0, ep->com.dev->rdev.lldi.rxq_ids[0]);
3503 if (!err)
3504 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
ef885dc6 3505 ep->com.wr_waitp,
830662f6 3506 0, 0, __func__);
e6b11163
H
3507 else if (err > 0)
3508 err = net_xmit_errno(err);
830662f6
VP
3509 }
3510 if (err)
3511 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3512 , err, ep->stid,
3513 &sin->sin_addr, ntohs(sin->sin_port));
3514 return err;
3515}
3516
cfdda9d7
SW
3517int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3518{
3519 int err = 0;
3520 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3521 struct c4iw_listen_ep *ep;
3522
cfdda9d7
SW
3523 might_sleep();
3524
3525 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3526 if (!ep) {
700456bd 3527 pr_err("%s - cannot alloc ep\n", __func__);
cfdda9d7
SW
3528 err = -ENOMEM;
3529 goto fail1;
3530 }
4a740838 3531 skb_queue_head_init(&ep->com.ep_skb_list);
548ddb19 3532 pr_debug("ep %p\n", ep);
cfdda9d7 3533 ep->com.cm_id = cm_id;
9ca6f7cf 3534 ref_cm_id(&ep->com);
cfdda9d7
SW
3535 ep->com.dev = dev;
3536 ep->backlog = backlog;
170003c8 3537 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
24d44a39 3538 sizeof(ep->com.local_addr));
cfdda9d7
SW
3539
3540 /*
3541 * Allocate a server TID.
3542 */
8c044690
KS
3543 if (dev->rdev.lldi.enable_fw_ofld_conn &&
3544 ep->com.local_addr.ss_family == AF_INET)
830662f6 3545 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
170003c8 3546 cm_id->m_local_addr.ss_family, ep);
1cab775c 3547 else
830662f6 3548 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
170003c8 3549 cm_id->m_local_addr.ss_family, ep);
1cab775c 3550
cfdda9d7 3551 if (ep->stid == -1) {
700456bd 3552 pr_err("%s - cannot alloc stid\n", __func__);
cfdda9d7
SW
3553 err = -ENOMEM;
3554 goto fail2;
3555 }
793dad94 3556 insert_handle(dev, &dev->stid_idr, ep, ep->stid);
9eccfe10 3557
cfdda9d7 3558 state_set(&ep->com, LISTEN);
830662f6
VP
3559 if (ep->com.local_addr.ss_family == AF_INET)
3560 err = create_server4(dev, ep);
3561 else
3562 err = create_server6(dev, ep);
cfdda9d7
SW
3563 if (!err) {
3564 cm_id->provider_data = ep;
3565 goto out;
3566 }
8b1bbf36 3567 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
830662f6
VP
3568 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3569 ep->com.local_addr.ss_family);
cfdda9d7 3570fail2:
9ca6f7cf 3571 deref_cm_id(&ep->com);
cfdda9d7
SW
3572 c4iw_put_ep(&ep->com);
3573fail1:
3574out:
3575 return err;
3576}
3577
3578int c4iw_destroy_listen(struct iw_cm_id *cm_id)
3579{
3580 int err;
3581 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
3582
548ddb19 3583 pr_debug("ep %p\n", ep);
cfdda9d7
SW
3584
3585 might_sleep();
3586 state_set(&ep->com, DEAD);
830662f6
VP
3587 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
3588 ep->com.local_addr.ss_family == AF_INET) {
1cab775c
VP
3589 err = cxgb4_remove_server_filter(
3590 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3591 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3592 } else {
84cc6ac6 3593 struct sockaddr_in6 *sin6;
ef885dc6 3594 c4iw_init_wr_wait(ep->com.wr_waitp);
830662f6
VP
3595 err = cxgb4_remove_server(
3596 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3597 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
1cab775c
VP
3598 if (err)
3599 goto done;
ef885dc6 3600 err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
1cab775c 3601 0, 0, __func__);
170003c8 3602 sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
84cc6ac6
H
3603 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3604 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
1cab775c 3605 }
793dad94 3606 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
830662f6
VP
3607 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3608 ep->com.local_addr.ss_family);
cfdda9d7 3609done:
9ca6f7cf 3610 deref_cm_id(&ep->com);
cfdda9d7
SW
3611 c4iw_put_ep(&ep->com);
3612 return err;
3613}
3614
3615int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
3616{
3617 int ret = 0;
cfdda9d7
SW
3618 int close = 0;
3619 int fatal = 0;
3620 struct c4iw_rdev *rdev;
cfdda9d7 3621
2f5b48c3 3622 mutex_lock(&ep->com.mutex);
cfdda9d7 3623
548ddb19 3624 pr_debug("ep %p state %s, abrupt %d\n", ep,
a9a42886 3625 states[ep->com.state], abrupt);
cfdda9d7 3626
6e410d8f
H
3627 /*
3628 * Ref the ep here in case we have fatal errors causing the
3629 * ep to be released and freed.
3630 */
3631 c4iw_get_ep(&ep->com);
3632
cfdda9d7
SW
3633 rdev = &ep->com.dev->rdev;
3634 if (c4iw_fatal_error(rdev)) {
3635 fatal = 1;
be13b2df 3636 close_complete_upcall(ep, -EIO);
cfdda9d7
SW
3637 ep->com.state = DEAD;
3638 }
3639 switch (ep->com.state) {
3640 case MPA_REQ_WAIT:
3641 case MPA_REQ_SENT:
3642 case MPA_REQ_RCVD:
3643 case MPA_REP_SENT:
3644 case FPDU_MODE:
4a740838 3645 case CONNECTING:
cfdda9d7
SW
3646 close = 1;
3647 if (abrupt)
3648 ep->com.state = ABORTING;
3649 else {
3650 ep->com.state = CLOSING;
12eb5137
SW
3651
3652 /*
3653 * if we close before we see the fw4_ack() then we fix
3654 * up the timer state since we're reusing it.
3655 */
3656 if (ep->mpa_skb &&
3657 test_bit(STOP_MPA_TIMER, &ep->com.flags)) {
3658 clear_bit(STOP_MPA_TIMER, &ep->com.flags);
3659 stop_ep_timer(ep);
3660 }
ca5a2202 3661 start_ep_timer(ep);
cfdda9d7
SW
3662 }
3663 set_bit(CLOSE_SENT, &ep->com.flags);
3664 break;
3665 case CLOSING:
3666 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
3667 close = 1;
3668 if (abrupt) {
b33bd0cb 3669 (void)stop_ep_timer(ep);
cfdda9d7
SW
3670 ep->com.state = ABORTING;
3671 } else
3672 ep->com.state = MORIBUND;
3673 }
3674 break;
3675 case MORIBUND:
3676 case ABORTING:
3677 case DEAD:
f48fca4d
BP
3678 pr_debug("ignoring disconnect ep %p state %u\n",
3679 ep, ep->com.state);
cfdda9d7
SW
3680 break;
3681 default:
ba97b749 3682 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
cfdda9d7
SW
3683 break;
3684 }
3685
cfdda9d7 3686 if (close) {
8da7e7a5 3687 if (abrupt) {
793dad94 3688 set_bit(EP_DISC_ABORT, &ep->com.history);
4a740838 3689 ret = send_abort(ep);
793dad94
VP
3690 } else {
3691 set_bit(EP_DISC_CLOSE, &ep->com.history);
4a740838 3692 ret = send_halfclose(ep);
793dad94 3693 }
88bc230d 3694 if (ret) {
9ca6f7cf 3695 set_bit(EP_DISC_FAIL, &ep->com.history);
88bc230d
H
3696 if (!abrupt) {
3697 stop_ep_timer(ep);
3698 close_complete_upcall(ep, -EIO);
3699 }
c00dcbaf
H
3700 if (ep->com.qp) {
3701 struct c4iw_qp_attributes attrs;
3702
3703 attrs.next_state = C4IW_QP_STATE_ERROR;
3704 ret = c4iw_modify_qp(ep->com.qp->rhp,
3705 ep->com.qp,
3706 C4IW_QP_ATTR_NEXT_STATE,
3707 &attrs, 1);
3708 if (ret)
700456bd 3709 pr_err("%s - qp <- error failed!\n",
c00dcbaf
H
3710 __func__);
3711 }
cfdda9d7 3712 fatal = 1;
88bc230d 3713 }
cfdda9d7 3714 }
8da7e7a5 3715 mutex_unlock(&ep->com.mutex);
6e410d8f 3716 c4iw_put_ep(&ep->com);
cfdda9d7
SW
3717 if (fatal)
3718 release_ep_resources(ep);
3719 return ret;
3720}
3721
1cab775c
VP
3722static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3723 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3724{
3725 struct c4iw_ep *ep;
793dad94 3726 int atid = be32_to_cpu(req->tid);
1cab775c 3727
ef5d6355
VP
3728 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
3729 (__force u32) req->tid);
1cab775c
VP
3730 if (!ep)
3731 return;
3732
3733 switch (req->retval) {
3734 case FW_ENOMEM:
793dad94
VP
3735 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
3736 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3737 send_fw_act_open_req(ep, atid);
3738 return;
3739 }
9ae970e2 3740 /* fall through */
1cab775c 3741 case FW_EADDRINUSE:
793dad94
VP
3742 set_bit(ACT_RETRY_INUSE, &ep->com.history);
3743 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3744 send_fw_act_open_req(ep, atid);
3745 return;
3746 }
1cab775c
VP
3747 break;
3748 default:
3749 pr_info("%s unexpected ofld conn wr retval %d\n",
3750 __func__, req->retval);
3751 break;
3752 }
793dad94
VP
3753 pr_err("active ofld_connect_wr failure %d atid %d\n",
3754 req->retval, atid);
3755 mutex_lock(&dev->rdev.stats.lock);
3756 dev->rdev.stats.act_ofld_conn_fails++;
3757 mutex_unlock(&dev->rdev.stats.lock);
1cab775c 3758 connect_reply_upcall(ep, status2errno(req->retval));
793dad94 3759 state_set(&ep->com, DEAD);
84cc6ac6
H
3760 if (ep->com.remote_addr.ss_family == AF_INET6) {
3761 struct sockaddr_in6 *sin6 =
170003c8 3762 (struct sockaddr_in6 *)&ep->com.local_addr;
84cc6ac6
H
3763 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3764 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3765 }
793dad94
VP
3766 remove_handle(dev, &dev->atid_idr, atid);
3767 cxgb4_free_atid(dev->rdev.lldi.tids, atid);
3768 dst_release(ep->dst);
3769 cxgb4_l2t_release(ep->l2t);
3770 c4iw_put_ep(&ep->com);
1cab775c
VP
3771}
3772
3773static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3774 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3775{
3776 struct sk_buff *rpl_skb;
3777 struct cpl_pass_accept_req *cpl;
3778 int ret;
3779
710a3110 3780 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
1cab775c 3781 if (req->retval) {
4d45b757 3782 pr_err("%s passive open failure %d\n", __func__, req->retval);
793dad94
VP
3783 mutex_lock(&dev->rdev.stats.lock);
3784 dev->rdev.stats.pas_ofld_conn_fails++;
3785 mutex_unlock(&dev->rdev.stats.lock);
1cab775c
VP
3786 kfree_skb(rpl_skb);
3787 } else {
3788 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
3789 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
ef5d6355
VP
3790 (__force u32) htonl(
3791 (__force u32) req->tid)));
1cab775c
VP
3792 ret = pass_accept_req(dev, rpl_skb);
3793 if (!ret)
3794 kfree_skb(rpl_skb);
3795 }
3796 return;
3797}
3798
11a27e21
RR
3799static inline u64 t4_tcb_get_field64(__be64 *tcb, u16 word)
3800{
3801 u64 tlo = be64_to_cpu(tcb[((31 - word) / 2)]);
3802 u64 thi = be64_to_cpu(tcb[((31 - word) / 2) - 1]);
3803 u64 t;
3804 u32 shift = 32;
3805
3806 t = (thi << shift) | (tlo >> shift);
3807
3808 return t;
3809}
3810
3811static inline u32 t4_tcb_get_field32(__be64 *tcb, u16 word, u32 mask, u32 shift)
3812{
3813 u32 v;
3814 u64 t = be64_to_cpu(tcb[(31 - word) / 2]);
3815
3816 if (word & 0x1)
3817 shift += 32;
3818 v = (t >> shift) & mask;
3819 return v;
3820}
3821
3822static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
3823{
3824 struct cpl_get_tcb_rpl *rpl = cplhdr(skb);
3825 __be64 *tcb = (__be64 *)(rpl + 1);
3826 unsigned int tid = GET_TID(rpl);
3827 struct c4iw_ep *ep;
3828 u64 t_flags_64;
3829 u32 rx_pdu_out;
3830
3831 ep = get_ep_from_tid(dev, tid);
3832 if (!ep)
3833 return 0;
3834 /* Examine the TF_RX_PDU_OUT (bit 49 of the t_flags) in order to
3835 * determine if there's a rx PDU feedback event pending.
3836 *
3837 * If that bit is set, it means we'll need to re-read the TCB's
3838 * rq_start value. The final value is the one present in a TCB
3839 * with the TF_RX_PDU_OUT bit cleared.
3840 */
3841
3842 t_flags_64 = t4_tcb_get_field64(tcb, TCB_T_FLAGS_W);
3843 rx_pdu_out = (t_flags_64 & TF_RX_PDU_OUT_V(1)) >> TF_RX_PDU_OUT_S;
3844
3845 c4iw_put_ep(&ep->com); /* from get_ep_from_tid() */
3846 c4iw_put_ep(&ep->com); /* from read_tcb() */
3847
3848 /* If TF_RX_PDU_OUT bit is set, re-read the TCB */
3849 if (rx_pdu_out) {
3850 if (++ep->rx_pdu_out_cnt >= 2) {
3851 WARN_ONCE(1, "tcb re-read() reached the guard limit, finishing the cleanup\n");
3852 goto cleanup;
3853 }
3854 read_tcb(ep);
3855 return 0;
3856 }
3857
3858 ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W,
3859 TCB_RQ_START_S);
3860cleanup:
3861 pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
3862
3863 if (test_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags))
3864 finish_peer_abort(dev, ep);
3865 else if (test_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags))
3866 send_abort_req(ep);
3867 else
3868 WARN_ONCE(1, "unexpected state!");
3869
3870 return 0;
3871}
3872
1cab775c 3873static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2f5b48c3
SW
3874{
3875 struct cpl_fw6_msg *rpl = cplhdr(skb);
1cab775c
VP
3876 struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
3877
3878 switch (rpl->type) {
3879 case FW6_TYPE_CQE:
3880 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
3881 break;
3882 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
3883 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
3884 switch (req->t_state) {
3885 case TCP_SYN_SENT:
3886 active_ofld_conn_reply(dev, skb, req);
3887 break;
3888 case TCP_SYN_RECV:
3889 passive_ofld_conn_reply(dev, skb, req);
3890 break;
3891 default:
3892 pr_err("%s unexpected ofld conn wr state %d\n",
3893 __func__, req->t_state);
3894 break;
3895 }
3896 break;
3897 }
3898 return 0;
3899}
3900
3901static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
3902{
963cab50
H
3903 __be32 l2info;
3904 __be16 hdr_len, vlantag, len;
3905 u16 eth_hdr_len;
3906 int tcp_hdr_len, ip_hdr_len;
1cab775c
VP
3907 u8 intf;
3908 struct cpl_rx_pkt *cpl = cplhdr(skb);
3909 struct cpl_pass_accept_req *req;
3910 struct tcp_options_received tmp_opt;
f079af7a 3911 struct c4iw_dev *dev;
963cab50 3912 enum chip_type type;
1cab775c 3913
f079af7a 3914 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
1cab775c 3915 /* Store values from cpl_rx_pkt in temporary location. */
963cab50
H
3916 vlantag = cpl->vlan;
3917 len = cpl->len;
3918 l2info = cpl->l2info;
3919 hdr_len = cpl->hdr_len;
1cab775c
VP
3920 intf = cpl->iff;
3921
3922 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
3923
3924 /*
3925 * We need to parse the TCP options from SYN packet.
3926 * to generate cpl_pass_accept_req.
3927 */
3928 memset(&tmp_opt, 0, sizeof(tmp_opt));
3929 tcp_clear_options(&tmp_opt);
eed29f17 3930 tcp_parse_options(&init_net, skb, &tmp_opt, 0, NULL);
1cab775c 3931
d58ff351 3932 req = __skb_push(skb, sizeof(*req));
1cab775c 3933 memset(req, 0, sizeof(*req));
cf7fe64a
HS
3934 req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
3935 SYN_MAC_IDX_V(RX_MACIDX_G(
963cab50 3936 be32_to_cpu(l2info))) |
cf7fe64a 3937 SYN_XACT_MATCH_F);
963cab50
H
3938 type = dev->rdev.lldi.adapter_type;
3939 tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len));
3940 ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len));
3941 req->hdr_len =
3942 cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info))));
3943 if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) {
3944 eth_hdr_len = is_t4(type) ?
3945 RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) :
3946 RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info));
3947 req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) |
3948 IP_HDR_LEN_V(ip_hdr_len) |
3949 ETH_HDR_LEN_V(eth_hdr_len));
3950 } else { /* T6 and later */
3951 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info));
3952 req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) |
3953 T6_IP_HDR_LEN_V(ip_hdr_len) |
3954 T6_ETH_HDR_LEN_V(eth_hdr_len));
3955 }
3956 req->vlan = vlantag;
3957 req->len = len;
6c53e938
HS
3958 req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
3959 PASS_OPEN_TOS_V(tos));
1cab775c
VP
3960 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
3961 if (tmp_opt.wscale_ok)
3962 req->tcpopt.wsf = tmp_opt.snd_wscale;
3963 req->tcpopt.tstamp = tmp_opt.saw_tstamp;
3964 if (tmp_opt.sack_ok)
3965 req->tcpopt.sack = 1;
3966 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
3967 return;
3968}
3969
3970static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3971 __be32 laddr, __be16 lport,
3972 __be32 raddr, __be16 rport,
3973 u32 rcv_isn, u32 filter, u16 window,
3974 u32 rss_qid, u8 port_id)
3975{
3976 struct sk_buff *req_skb;
3977 struct fw_ofld_connection_wr *req;
3978 struct cpl_pass_accept_req *cpl = cplhdr(skb);
1ce1d471 3979 int ret;
1cab775c
VP
3980
3981 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
9ef63f31
PB
3982 if (!req_skb)
3983 return;
de77b966 3984 req = __skb_put_zero(req_skb, sizeof(*req));
6c53e938 3985 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
e2ac9628 3986 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
77a80e23 3987 req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
ef5d6355 3988 req->le.filter = (__force __be32) filter;
1cab775c
VP
3989 req->le.lport = lport;
3990 req->le.pport = rport;
3991 req->le.u.ipv4.lip = laddr;
3992 req->le.u.ipv4.pip = raddr;
3993 req->tcb.rcv_nxt = htonl(rcv_isn + 1);
3994 req->tcb.rcv_adv = htons(window);
3995 req->tcb.t_state_to_astid =
77a80e23
HS
3996 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
3997 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
3998 FW_OFLD_CONNECTION_WR_ASTID_V(
6c53e938 3999 PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
1cab775c
VP
4000
4001 /*
4002 * We store the qid in opt2 which will be used by the firmware
4003 * to send us the wr response.
4004 */
d7990b0c 4005 req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
1cab775c
VP
4006
4007 /*
4008 * We initialize the MSS index in TCB to 0xF.
4009 * So that when driver sends cpl_pass_accept_rpl
4010 * TCB picks up the correct value. If this was 0
4011 * TP will ignore any value > 0 for MSS index.
4012 */
d7990b0c 4013 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
6198dd8d 4014 req->cookie = (uintptr_t)skb;
1cab775c
VP
4015
4016 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
1ce1d471
SW
4017 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
4018 if (ret < 0) {
4019 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__,
4020 ret);
4021 kfree_skb(skb);
4022 kfree_skb(req_skb);
4023 }
1cab775c
VP
4024}
4025
4026/*
4027 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
4028 * messages when a filter is being used instead of server to
4029 * redirect a syn packet. When packets hit filter they are redirected
4030 * to the offload queue and driver tries to establish the connection
4031 * using firmware work request.
4032 */
4033static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
4034{
4035 int stid;
4036 unsigned int filter;
4037 struct ethhdr *eh = NULL;
4038 struct vlan_ethhdr *vlan_eh = NULL;
4039 struct iphdr *iph;
4040 struct tcphdr *tcph;
4041 struct rss_header *rss = (void *)skb->data;
4042 struct cpl_rx_pkt *cpl = (void *)skb->data;
4043 struct cpl_pass_accept_req *req = (void *)(rss + 1);
4044 struct l2t_entry *e;
4045 struct dst_entry *dst;
f86fac79 4046 struct c4iw_ep *lep = NULL;
1cab775c
VP
4047 u16 window;
4048 struct port_info *pi;
4049 struct net_device *pdev;
f079af7a 4050 u16 rss_qid, eth_hdr_len;
1cab775c 4051 int step;
1cab775c
VP
4052 struct neighbour *neigh;
4053
4054 /* Drop all non-SYN packets */
bdc590b9 4055 if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
1cab775c
VP
4056 goto reject;
4057
4058 /*
4059 * Drop all packets which did not hit the filter.
4060 * Unlikely to happen.
4061 */
4062 if (!(rss->filter_hit && rss->filter_tid))
4063 goto reject;
4064
4065 /*
4066 * Calculate the server tid from filter hit index from cpl_rx_pkt.
4067 */
a4ea025f 4068 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
1cab775c 4069
f86fac79 4070 lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
1cab775c 4071 if (!lep) {
4d45b757
BP
4072 pr_warn("%s connect request on invalid stid %d\n",
4073 __func__, stid);
1cab775c
VP
4074 goto reject;
4075 }
4076
963cab50
H
4077 switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) {
4078 case CHELSIO_T4:
4079 eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
4080 break;
4081 case CHELSIO_T5:
4082 eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
4083 break;
4084 case CHELSIO_T6:
4085 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
4086 break;
4087 default:
4088 pr_err("T%d Chip is not supported\n",
4089 CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type));
4090 goto reject;
4091 }
4092
f079af7a 4093 if (eth_hdr_len == ETH_HLEN) {
1cab775c
VP
4094 eh = (struct ethhdr *)(req + 1);
4095 iph = (struct iphdr *)(eh + 1);
4096 } else {
4097 vlan_eh = (struct vlan_ethhdr *)(req + 1);
4098 iph = (struct iphdr *)(vlan_eh + 1);
35c4a95d 4099 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
1cab775c
VP
4100 }
4101
4102 if (iph->version != 0x4)
4103 goto reject;
4104
4105 tcph = (struct tcphdr *)(iph + 1);
4106 skb_set_network_header(skb, (void *)iph - (void *)rss);
4107 skb_set_transport_header(skb, (void *)tcph - (void *)rss);
4108 skb_get(skb);
4109
548ddb19 4110 pr_debug("lip 0x%x lport %u pip 0x%x pport %u tos %d\n",
a9a42886
JP
4111 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
4112 ntohs(tcph->source), iph->tos);
1cab775c 4113
804c2f3e
VP
4114 dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
4115 iph->daddr, iph->saddr, tcph->dest,
4116 tcph->source, iph->tos);
830662f6 4117 if (!dst) {
548ddb19 4118 pr_err("%s - failed to find dst entry!\n", __func__);
1cab775c
VP
4119 goto reject;
4120 }
1cab775c
VP
4121 neigh = dst_neigh_lookup_skb(dst, skb);
4122
aaa0c23c 4123 if (!neigh) {
548ddb19 4124 pr_err("%s - failed to allocate neigh!\n", __func__);
aaa0c23c
ZZ
4125 goto free_dst;
4126 }
4127
1cab775c
VP
4128 if (neigh->dev->flags & IFF_LOOPBACK) {
4129 pdev = ip_dev_find(&init_net, iph->daddr);
4130 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
4131 pdev, 0);
4132 pi = (struct port_info *)netdev_priv(pdev);
1cab775c
VP
4133 dev_put(pdev);
4134 } else {
830662f6 4135 pdev = get_real_dev(neigh->dev);
1cab775c 4136 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
830662f6
VP
4137 pdev, 0);
4138 pi = (struct port_info *)netdev_priv(pdev);
1cab775c 4139 }
ebf00060 4140 neigh_release(neigh);
1cab775c
VP
4141 if (!e) {
4142 pr_err("%s - failed to allocate l2t entry!\n",
4143 __func__);
4144 goto free_dst;
4145 }
4146
4147 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
4148 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
ef5d6355 4149 window = (__force u16) htons((__force u16)tcph->window);
1cab775c
VP
4150
4151 /* Calcuate filter portion for LE region. */
41b4f86c
KS
4152 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
4153 dev->rdev.lldi.ports[0],
4154 e));
1cab775c
VP
4155
4156 /*
4157 * Synthesize the cpl_pass_accept_req. We have everything except the
4158 * TID. Once firmware sends a reply with TID we update the TID field
4159 * in cpl and pass it through the regular cpl_pass_accept_req path.
4160 */
4161 build_cpl_pass_accept_req(skb, stid, iph->tos);
4162 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
4163 tcph->source, ntohl(tcph->seq), filter, window,
4164 rss_qid, pi->port_id);
4165 cxgb4_l2t_release(e);
4166free_dst:
4167 dst_release(dst);
4168reject:
f86fac79
H
4169 if (lep)
4170 c4iw_put_ep(&lep->com);
2f5b48c3
SW
4171 return 0;
4172}
4173
be4c9bad
RD
4174/*
4175 * These are the real handlers that are called from a
4176 * work queue.
4177 */
9dec900c 4178static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
be4c9bad
RD
4179 [CPL_ACT_ESTABLISH] = act_establish,
4180 [CPL_ACT_OPEN_RPL] = act_open_rpl,
4181 [CPL_RX_DATA] = rx_data,
4182 [CPL_ABORT_RPL_RSS] = abort_rpl,
4183 [CPL_ABORT_RPL] = abort_rpl,
4184 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
4185 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
4186 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
4187 [CPL_PASS_ESTABLISH] = pass_establish,
4188 [CPL_PEER_CLOSE] = peer_close,
4189 [CPL_ABORT_REQ_RSS] = peer_abort,
4190 [CPL_CLOSE_CON_RPL] = close_con_rpl,
4191 [CPL_RDMA_TERMINATE] = terminate,
2f5b48c3 4192 [CPL_FW4_ACK] = fw4_ack,
11a27e21 4193 [CPL_GET_TCB_RPL] = read_tcb_rpl,
1cab775c 4194 [CPL_FW6_MSG] = deferred_fw6_msg,
9dec900c 4195 [CPL_RX_PKT] = rx_pkt,
8d1f1a6b
H
4196 [FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe,
4197 [FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe
be4c9bad
RD
4198};
4199
4200static void process_timeout(struct c4iw_ep *ep)
4201{
4202 struct c4iw_qp_attributes attrs;
4203 int abort = 1;
4204
2f5b48c3 4205 mutex_lock(&ep->com.mutex);
548ddb19 4206 pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state);
793dad94 4207 set_bit(TIMEDOUT, &ep->com.history);
be4c9bad
RD
4208 switch (ep->com.state) {
4209 case MPA_REQ_SENT:
be4c9bad
RD
4210 connect_reply_upcall(ep, -ETIMEDOUT);
4211 break;
4212 case MPA_REQ_WAIT:
ceb110a8 4213 case MPA_REQ_RCVD:
e4b76a2a 4214 case MPA_REP_SENT:
ceb110a8 4215 case FPDU_MODE:
be4c9bad
RD
4216 break;
4217 case CLOSING:
4218 case MORIBUND:
4219 if (ep->com.cm_id && ep->com.qp) {
4220 attrs.next_state = C4IW_QP_STATE_ERROR;
4221 c4iw_modify_qp(ep->com.qp->rhp,
4222 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
4223 &attrs, 1);
4224 }
be13b2df 4225 close_complete_upcall(ep, -ETIMEDOUT);
be4c9bad 4226 break;
b33bd0cb
SW
4227 case ABORTING:
4228 case DEAD:
4229
4230 /*
4231 * These states are expected if the ep timed out at the same
4232 * time as another thread was calling stop_ep_timer().
4233 * So we silently do nothing for these states.
4234 */
4235 abort = 0;
4236 break;
be4c9bad 4237 default:
76f267b7 4238 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
be4c9bad 4239 __func__, ep, ep->hwtid, ep->com.state);
be4c9bad
RD
4240 abort = 0;
4241 }
cc18b939 4242 mutex_unlock(&ep->com.mutex);
69736279
H
4243 if (abort)
4244 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
be4c9bad
RD
4245 c4iw_put_ep(&ep->com);
4246}
4247
4248static void process_timedout_eps(void)
4249{
4250 struct c4iw_ep *ep;
4251
4252 spin_lock_irq(&timeout_lock);
4253 while (!list_empty(&timeout_list)) {
4254 struct list_head *tmp;
4255
4256 tmp = timeout_list.next;
4257 list_del(tmp);
b33bd0cb
SW
4258 tmp->next = NULL;
4259 tmp->prev = NULL;
be4c9bad
RD
4260 spin_unlock_irq(&timeout_lock);
4261 ep = list_entry(tmp, struct c4iw_ep, entry);
4262 process_timeout(ep);
4263 spin_lock_irq(&timeout_lock);
4264 }
4265 spin_unlock_irq(&timeout_lock);
4266}
4267
4268static void process_work(struct work_struct *work)
4269{
4270 struct sk_buff *skb = NULL;
4271 struct c4iw_dev *dev;
c1d7356c 4272 struct cpl_act_establish *rpl;
be4c9bad
RD
4273 unsigned int opcode;
4274 int ret;
4275
b33bd0cb 4276 process_timedout_eps();
be4c9bad
RD
4277 while ((skb = skb_dequeue(&rxq))) {
4278 rpl = cplhdr(skb);
4279 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
4280 opcode = rpl->ot.opcode;
4281
ccc04cdd
DC
4282 if (opcode >= ARRAY_SIZE(work_handlers) ||
4283 !work_handlers[opcode]) {
4284 pr_err("No handler for opcode 0x%x.\n", opcode);
be4c9bad 4285 kfree_skb(skb);
ccc04cdd
DC
4286 } else {
4287 ret = work_handlers[opcode](dev, skb);
4288 if (!ret)
4289 kfree_skb(skb);
4290 }
b33bd0cb 4291 process_timedout_eps();
be4c9bad 4292 }
be4c9bad
RD
4293}
4294
4295static DECLARE_WORK(skb_work, process_work);
4296
a9346abe 4297static void ep_timeout(struct timer_list *t)
be4c9bad 4298{
a9346abe 4299 struct c4iw_ep *ep = from_timer(ep, t, timer);
1ec779cc 4300 int kickit = 0;
be4c9bad
RD
4301
4302 spin_lock(&timeout_lock);
1ec779cc 4303 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
b33bd0cb
SW
4304 /*
4305 * Only insert if it is not already on the list.
4306 */
4307 if (!ep->entry.next) {
4308 list_add_tail(&ep->entry, &timeout_list);
4309 kickit = 1;
4310 }
1ec779cc 4311 }
be4c9bad 4312 spin_unlock(&timeout_lock);
1ec779cc
VP
4313 if (kickit)
4314 queue_work(workq, &skb_work);
be4c9bad
RD
4315}
4316
cfdda9d7
SW
4317/*
4318 * All the CM events are handled on a work queue to have a safe context.
4319 */
4320static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
4321{
4322
4323 /*
4324 * Save dev in the skb->cb area.
4325 */
4326 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
4327
4328 /*
4329 * Queue the skb and schedule the worker thread.
4330 */
4331 skb_queue_tail(&rxq, skb);
4332 queue_work(workq, &skb_work);
4333 return 0;
4334}
4335
4336static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
4337{
4338 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
4339
4340 if (rpl->status != CPL_ERR_NONE) {
700456bd
JP
4341 pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
4342 rpl->status, GET_TID(rpl));
cfdda9d7 4343 }
2f5b48c3 4344 kfree_skb(skb);
cfdda9d7
SW
4345 return 0;
4346}
4347
be4c9bad
RD
4348static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
4349{
4350 struct cpl_fw6_msg *rpl = cplhdr(skb);
4351 struct c4iw_wr_wait *wr_waitp;
4352 int ret;
4353
548ddb19 4354 pr_debug("type %u\n", rpl->type);
be4c9bad
RD
4355
4356 switch (rpl->type) {
5be78ee9 4357 case FW6_TYPE_WR_RPL:
be4c9bad 4358 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
c8e081a1 4359 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
548ddb19 4360 pr_debug("wr_waitp %p ret %u\n", wr_waitp, ret);
d9594d99 4361 if (wr_waitp)
2015f26c 4362 c4iw_wake_up_deref(wr_waitp, ret ? -ret : 0);
2f5b48c3 4363 kfree_skb(skb);
be4c9bad 4364 break;
5be78ee9 4365 case FW6_TYPE_CQE:
5be78ee9 4366 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
1cab775c 4367 sched(dev, skb);
5be78ee9 4368 break;
be4c9bad 4369 default:
700456bd
JP
4370 pr_err("%s unexpected fw6 msg type %u\n",
4371 __func__, rpl->type);
2f5b48c3 4372 kfree_skb(skb);
be4c9bad
RD
4373 break;
4374 }
4375 return 0;
4376}
4377
8da7e7a5
SW
4378static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
4379{
4380 struct cpl_abort_req_rss *req = cplhdr(skb);
4381 struct c4iw_ep *ep;
8da7e7a5
SW
4382 unsigned int tid = GET_TID(req);
4383
944661dd
H
4384 ep = get_ep_from_tid(dev, tid);
4385 /* This EP will be dereferenced in peer_abort() */
14b92228 4386 if (!ep) {
700456bd 4387 pr_warn("Abort on non-existent endpoint, tid %d\n", tid);
14b92228
SW
4388 kfree_skb(skb);
4389 return 0;
4390 }
b65eef0a 4391 if (cxgb_is_neg_adv(req->status)) {
f48fca4d
BP
4392 pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
4393 ep->hwtid, req->status,
a9a42886 4394 neg_adv_str(req->status));
944661dd 4395 goto out;
8da7e7a5 4396 }
548ddb19 4397 pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state);
8da7e7a5 4398
2015f26c 4399 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
944661dd 4400out:
8da7e7a5
SW
4401 sched(dev, skb);
4402 return 0;
4403}
4404
be4c9bad
RD
4405/*
4406 * Most upcalls from the T4 Core go to sched() to
4407 * schedule the processing on a work queue.
4408 */
4409c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
4410 [CPL_ACT_ESTABLISH] = sched,
4411 [CPL_ACT_OPEN_RPL] = sched,
4412 [CPL_RX_DATA] = sched,
4413 [CPL_ABORT_RPL_RSS] = sched,
4414 [CPL_ABORT_RPL] = sched,
4415 [CPL_PASS_OPEN_RPL] = sched,
4416 [CPL_CLOSE_LISTSRV_RPL] = sched,
4417 [CPL_PASS_ACCEPT_REQ] = sched,
4418 [CPL_PASS_ESTABLISH] = sched,
4419 [CPL_PEER_CLOSE] = sched,
4420 [CPL_CLOSE_CON_RPL] = sched,
8da7e7a5 4421 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
be4c9bad
RD
4422 [CPL_RDMA_TERMINATE] = sched,
4423 [CPL_FW4_ACK] = sched,
4424 [CPL_SET_TCB_RPL] = set_tcb_rpl,
11a27e21 4425 [CPL_GET_TCB_RPL] = sched,
1cab775c
VP
4426 [CPL_FW6_MSG] = fw6_msg,
4427 [CPL_RX_PKT] = sched
be4c9bad
RD
4428};
4429
cfdda9d7
SW
4430int __init c4iw_cm_init(void)
4431{
be4c9bad 4432 spin_lock_init(&timeout_lock);
cfdda9d7
SW
4433 skb_queue_head_init(&rxq);
4434
52ee1a05 4435 workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM);
cfdda9d7
SW
4436 if (!workq)
4437 return -ENOMEM;
4438
cfdda9d7
SW
4439 return 0;
4440}
4441
46c1376d 4442void c4iw_cm_term(void)
cfdda9d7 4443{
be4c9bad 4444 WARN_ON(!list_empty(&timeout_list));
cfdda9d7
SW
4445 flush_workqueue(workq);
4446 destroy_workqueue(workq);
4447}