2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 #include <net/route.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
50 #include <rdma/ib_addr.h>
52 #include <libcxgb_cm.h>
56 static char *states
[] = {
73 module_param(nocong
, int, 0644);
74 MODULE_PARM_DESC(nocong
, "Turn of congestion control (default=0)");
76 static int enable_ecn
;
77 module_param(enable_ecn
, int, 0644);
78 MODULE_PARM_DESC(enable_ecn
, "Enable ECN (default=0/disabled)");
80 static int dack_mode
= 1;
81 module_param(dack_mode
, int, 0644);
82 MODULE_PARM_DESC(dack_mode
, "Delayed ack mode (default=1)");
84 uint c4iw_max_read_depth
= 32;
85 module_param(c4iw_max_read_depth
, int, 0644);
86 MODULE_PARM_DESC(c4iw_max_read_depth
,
87 "Per-connection max ORD/IRD (default=32)");
89 static int enable_tcp_timestamps
;
90 module_param(enable_tcp_timestamps
, int, 0644);
91 MODULE_PARM_DESC(enable_tcp_timestamps
, "Enable tcp timestamps (default=0)");
93 static int enable_tcp_sack
;
94 module_param(enable_tcp_sack
, int, 0644);
95 MODULE_PARM_DESC(enable_tcp_sack
, "Enable tcp SACK (default=0)");
97 static int enable_tcp_window_scaling
= 1;
98 module_param(enable_tcp_window_scaling
, int, 0644);
99 MODULE_PARM_DESC(enable_tcp_window_scaling
,
100 "Enable tcp window scaling (default=1)");
103 module_param(c4iw_debug
, int, 0644);
104 MODULE_PARM_DESC(c4iw_debug
, "obsolete");
106 static int peer2peer
= 1;
107 module_param(peer2peer
, int, 0644);
108 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=1)");
110 static int p2p_type
= FW_RI_INIT_P2PTYPE_READ_REQ
;
111 module_param(p2p_type
, int, 0644);
112 MODULE_PARM_DESC(p2p_type
, "RDMAP opcode to use for the RTR message: "
113 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
115 static int ep_timeout_secs
= 60;
116 module_param(ep_timeout_secs
, int, 0644);
117 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
118 "in seconds (default=60)");
120 static int mpa_rev
= 2;
121 module_param(mpa_rev
, int, 0644);
122 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
123 "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft"
124 " compliant (default=2)");
126 static int markers_enabled
;
127 module_param(markers_enabled
, int, 0644);
128 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
130 static int crc_enabled
= 1;
131 module_param(crc_enabled
, int, 0644);
132 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
134 static int rcv_win
= 256 * 1024;
135 module_param(rcv_win
, int, 0644);
136 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256KB)");
138 static int snd_win
= 128 * 1024;
139 module_param(snd_win
, int, 0644);
140 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=128KB)");
142 static struct workqueue_struct
*workq
;
144 static struct sk_buff_head rxq
;
146 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
147 static void ep_timeout(unsigned long arg
);
148 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
);
149 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
);
151 static LIST_HEAD(timeout_list
);
152 static spinlock_t timeout_lock
;
154 static void deref_cm_id(struct c4iw_ep_common
*epc
)
156 epc
->cm_id
->rem_ref(epc
->cm_id
);
158 set_bit(CM_ID_DEREFED
, &epc
->history
);
161 static void ref_cm_id(struct c4iw_ep_common
*epc
)
163 set_bit(CM_ID_REFED
, &epc
->history
);
164 epc
->cm_id
->add_ref(epc
->cm_id
);
167 static void deref_qp(struct c4iw_ep
*ep
)
169 c4iw_qp_rem_ref(&ep
->com
.qp
->ibqp
);
170 clear_bit(QP_REFERENCED
, &ep
->com
.flags
);
171 set_bit(QP_DEREFED
, &ep
->com
.history
);
174 static void ref_qp(struct c4iw_ep
*ep
)
176 set_bit(QP_REFERENCED
, &ep
->com
.flags
);
177 set_bit(QP_REFED
, &ep
->com
.history
);
178 c4iw_qp_add_ref(&ep
->com
.qp
->ibqp
);
181 static void start_ep_timer(struct c4iw_ep
*ep
)
183 pr_debug("%s ep %p\n", __func__
, ep
);
184 if (timer_pending(&ep
->timer
)) {
185 pr_err("%s timer already started! ep %p\n",
189 clear_bit(TIMEOUT
, &ep
->com
.flags
);
190 c4iw_get_ep(&ep
->com
);
191 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
192 ep
->timer
.data
= (unsigned long)ep
;
193 ep
->timer
.function
= ep_timeout
;
194 add_timer(&ep
->timer
);
197 static int stop_ep_timer(struct c4iw_ep
*ep
)
199 pr_debug("%s ep %p stopping\n", __func__
, ep
);
200 del_timer_sync(&ep
->timer
);
201 if (!test_and_set_bit(TIMEOUT
, &ep
->com
.flags
)) {
202 c4iw_put_ep(&ep
->com
);
208 static int c4iw_l2t_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
,
209 struct l2t_entry
*l2e
)
213 if (c4iw_fatal_error(rdev
)) {
215 pr_debug("%s - device in error state - dropping\n", __func__
);
218 error
= cxgb4_l2t_send(rdev
->lldi
.ports
[0], skb
, l2e
);
221 else if (error
== NET_XMIT_DROP
)
223 return error
< 0 ? error
: 0;
226 int c4iw_ofld_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
)
230 if (c4iw_fatal_error(rdev
)) {
232 pr_debug("%s - device in error state - dropping\n", __func__
);
235 error
= cxgb4_ofld_send(rdev
->lldi
.ports
[0], skb
);
238 return error
< 0 ? error
: 0;
241 static void release_tid(struct c4iw_rdev
*rdev
, u32 hwtid
, struct sk_buff
*skb
)
243 u32 len
= roundup(sizeof(struct cpl_tid_release
), 16);
245 skb
= get_skb(skb
, len
, GFP_KERNEL
);
249 cxgb_mk_tid_release(skb
, len
, hwtid
, 0);
250 c4iw_ofld_send(rdev
, skb
);
254 static void set_emss(struct c4iw_ep
*ep
, u16 opt
)
256 ep
->emss
= ep
->com
.dev
->rdev
.lldi
.mtus
[TCPOPT_MSS_G(opt
)] -
257 ((AF_INET
== ep
->com
.remote_addr
.ss_family
) ?
258 sizeof(struct iphdr
) : sizeof(struct ipv6hdr
)) -
259 sizeof(struct tcphdr
);
261 if (TCPOPT_TSTAMP_G(opt
))
262 ep
->emss
-= round_up(TCPOLEN_TIMESTAMP
, 4);
266 pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
267 TCPOPT_MSS_G(opt
), ep
->mss
, ep
->emss
);
268 pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__
, TCPOPT_MSS_G(opt
),
272 static enum c4iw_ep_state
state_read(struct c4iw_ep_common
*epc
)
274 enum c4iw_ep_state state
;
276 mutex_lock(&epc
->mutex
);
278 mutex_unlock(&epc
->mutex
);
282 static void __state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
287 static void state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
289 mutex_lock(&epc
->mutex
);
290 pr_debug("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
291 __state_set(epc
, new);
292 mutex_unlock(&epc
->mutex
);
296 static int alloc_ep_skb_list(struct sk_buff_head
*ep_skb_list
, int size
)
302 len
= roundup(sizeof(union cpl_wr_size
), 16);
303 for (i
= 0; i
< size
; i
++) {
304 skb
= alloc_skb(len
, GFP_KERNEL
);
307 skb_queue_tail(ep_skb_list
, skb
);
311 skb_queue_purge(ep_skb_list
);
315 static void *alloc_ep(int size
, gfp_t gfp
)
317 struct c4iw_ep_common
*epc
;
319 epc
= kzalloc(size
, gfp
);
321 kref_init(&epc
->kref
);
322 mutex_init(&epc
->mutex
);
323 c4iw_init_wr_wait(&epc
->wr_wait
);
325 pr_debug("%s alloc ep %p\n", __func__
, epc
);
329 static void remove_ep_tid(struct c4iw_ep
*ep
)
333 spin_lock_irqsave(&ep
->com
.dev
->lock
, flags
);
334 _remove_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
->hwtid
, 0);
335 if (idr_is_empty(&ep
->com
.dev
->hwtid_idr
))
336 wake_up(&ep
->com
.dev
->wait
);
337 spin_unlock_irqrestore(&ep
->com
.dev
->lock
, flags
);
340 static void insert_ep_tid(struct c4iw_ep
*ep
)
344 spin_lock_irqsave(&ep
->com
.dev
->lock
, flags
);
345 _insert_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
, ep
->hwtid
, 0);
346 spin_unlock_irqrestore(&ep
->com
.dev
->lock
, flags
);
350 * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
352 static struct c4iw_ep
*get_ep_from_tid(struct c4iw_dev
*dev
, unsigned int tid
)
357 spin_lock_irqsave(&dev
->lock
, flags
);
358 ep
= idr_find(&dev
->hwtid_idr
, tid
);
360 c4iw_get_ep(&ep
->com
);
361 spin_unlock_irqrestore(&dev
->lock
, flags
);
366 * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
368 static struct c4iw_listen_ep
*get_ep_from_stid(struct c4iw_dev
*dev
,
371 struct c4iw_listen_ep
*ep
;
374 spin_lock_irqsave(&dev
->lock
, flags
);
375 ep
= idr_find(&dev
->stid_idr
, stid
);
377 c4iw_get_ep(&ep
->com
);
378 spin_unlock_irqrestore(&dev
->lock
, flags
);
382 void _c4iw_free_ep(struct kref
*kref
)
386 ep
= container_of(kref
, struct c4iw_ep
, com
.kref
);
387 pr_debug("%s ep %p state %s\n", __func__
, ep
, states
[ep
->com
.state
]);
388 if (test_bit(QP_REFERENCED
, &ep
->com
.flags
))
390 if (test_bit(RELEASE_RESOURCES
, &ep
->com
.flags
)) {
391 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
392 struct sockaddr_in6
*sin6
=
393 (struct sockaddr_in6
*)
397 ep
->com
.dev
->rdev
.lldi
.ports
[0],
398 (const u32
*)&sin6
->sin6_addr
.s6_addr
,
401 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
,
402 ep
->com
.local_addr
.ss_family
);
403 dst_release(ep
->dst
);
404 cxgb4_l2t_release(ep
->l2t
);
406 kfree_skb(ep
->mpa_skb
);
408 if (!skb_queue_empty(&ep
->com
.ep_skb_list
))
409 skb_queue_purge(&ep
->com
.ep_skb_list
);
413 static void release_ep_resources(struct c4iw_ep
*ep
)
415 set_bit(RELEASE_RESOURCES
, &ep
->com
.flags
);
418 * If we have a hwtid, then remove it from the idr table
419 * so lookups will no longer find this endpoint. Otherwise
420 * we have a race where one thread finds the ep ptr just
421 * before the other thread is freeing the ep memory.
425 c4iw_put_ep(&ep
->com
);
428 static int status2errno(int status
)
433 case CPL_ERR_CONN_RESET
:
435 case CPL_ERR_ARP_MISS
:
436 return -EHOSTUNREACH
;
437 case CPL_ERR_CONN_TIMEDOUT
:
439 case CPL_ERR_TCAM_FULL
:
441 case CPL_ERR_CONN_EXIST
:
449 * Try and reuse skbs already allocated...
451 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
453 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
456 skb_reset_transport_header(skb
);
458 skb
= alloc_skb(len
, gfp
);
460 t4_set_arp_err_handler(skb
, NULL
, NULL
);
464 static struct net_device
*get_real_dev(struct net_device
*egress_dev
)
466 return rdma_vlan_dev_real_dev(egress_dev
) ? : egress_dev
;
469 static void arp_failure_discard(void *handle
, struct sk_buff
*skb
)
471 pr_err("ARP failure\n");
475 static void mpa_start_arp_failure(void *handle
, struct sk_buff
*skb
)
477 pr_err("ARP failure during MPA Negotiation - Closing Connection\n");
482 FAKE_CPL_PUT_EP_SAFE
= NUM_CPL_CMDS
+ 0,
483 FAKE_CPL_PASS_PUT_EP_SAFE
= NUM_CPL_CMDS
+ 1,
486 static int _put_ep_safe(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
490 ep
= *((struct c4iw_ep
**)(skb
->cb
+ 2 * sizeof(void *)));
491 release_ep_resources(ep
);
496 static int _put_pass_ep_safe(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
500 ep
= *((struct c4iw_ep
**)(skb
->cb
+ 2 * sizeof(void *)));
501 c4iw_put_ep(&ep
->parent_ep
->com
);
502 release_ep_resources(ep
);
508 * Fake up a special CPL opcode and call sched() so process_work() will call
509 * _put_ep_safe() in a safe context to free the ep resources. This is needed
510 * because ARP error handlers are called in an ATOMIC context, and
511 * _c4iw_free_ep() needs to block.
513 static void queue_arp_failure_cpl(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
516 struct cpl_act_establish
*rpl
= cplhdr(skb
);
518 /* Set our special ARP_FAILURE opcode */
519 rpl
->ot
.opcode
= cpl
;
522 * Save ep in the skb->cb area, after where sched() will save the dev
525 *((struct c4iw_ep
**)(skb
->cb
+ 2 * sizeof(void *))) = ep
;
526 sched(ep
->com
.dev
, skb
);
529 /* Handle an ARP failure for an accept */
530 static void pass_accept_rpl_arp_failure(void *handle
, struct sk_buff
*skb
)
532 struct c4iw_ep
*ep
= handle
;
534 pr_err("ARP failure during accept - tid %u - dropping connection\n",
537 __state_set(&ep
->com
, DEAD
);
538 queue_arp_failure_cpl(ep
, skb
, FAKE_CPL_PASS_PUT_EP_SAFE
);
542 * Handle an ARP failure for an active open.
544 static void act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
546 struct c4iw_ep
*ep
= handle
;
548 pr_err("ARP failure during connect\n");
549 connect_reply_upcall(ep
, -EHOSTUNREACH
);
550 __state_set(&ep
->com
, DEAD
);
551 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
552 struct sockaddr_in6
*sin6
=
553 (struct sockaddr_in6
*)&ep
->com
.local_addr
;
554 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
555 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
557 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
558 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
559 queue_arp_failure_cpl(ep
, skb
, FAKE_CPL_PUT_EP_SAFE
);
563 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
566 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
569 struct c4iw_ep
*ep
= handle
;
570 struct c4iw_rdev
*rdev
= &ep
->com
.dev
->rdev
;
571 struct cpl_abort_req
*req
= cplhdr(skb
);
573 pr_debug("%s rdev %p\n", __func__
, rdev
);
574 req
->cmd
= CPL_ABORT_NO_RST
;
576 ret
= c4iw_ofld_send(rdev
, skb
);
578 __state_set(&ep
->com
, DEAD
);
579 queue_arp_failure_cpl(ep
, skb
, FAKE_CPL_PUT_EP_SAFE
);
584 static int send_flowc(struct c4iw_ep
*ep
)
586 struct fw_flowc_wr
*flowc
;
587 struct sk_buff
*skb
= skb_dequeue(&ep
->com
.ep_skb_list
);
589 u16 vlan
= ep
->l2t
->vlan
;
595 if (vlan
== CPL_L2T_VLAN_NONE
)
600 flowc
= __skb_put(skb
, FLOWC_LEN
);
602 flowc
->op_to_nparams
= cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR
) |
603 FW_FLOWC_WR_NPARAMS_V(nparams
));
604 flowc
->flowid_len16
= cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN
,
605 16)) | FW_WR_FLOWID_V(ep
->hwtid
));
607 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
608 flowc
->mnemval
[0].val
= cpu_to_be32(FW_PFVF_CMD_PFN_V
609 (ep
->com
.dev
->rdev
.lldi
.pf
));
610 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
611 flowc
->mnemval
[1].val
= cpu_to_be32(ep
->tx_chan
);
612 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
613 flowc
->mnemval
[2].val
= cpu_to_be32(ep
->tx_chan
);
614 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
615 flowc
->mnemval
[3].val
= cpu_to_be32(ep
->rss_qid
);
616 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
617 flowc
->mnemval
[4].val
= cpu_to_be32(ep
->snd_seq
);
618 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
619 flowc
->mnemval
[5].val
= cpu_to_be32(ep
->rcv_seq
);
620 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
621 flowc
->mnemval
[6].val
= cpu_to_be32(ep
->snd_win
);
622 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
623 flowc
->mnemval
[7].val
= cpu_to_be32(ep
->emss
);
627 pri
= (vlan
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
628 flowc
->mnemval
[8].mnemonic
= FW_FLOWC_MNEM_SCHEDCLASS
;
629 flowc
->mnemval
[8].val
= cpu_to_be32(pri
);
631 /* Pad WR to 16 byte boundary */
632 flowc
->mnemval
[8].mnemonic
= 0;
633 flowc
->mnemval
[8].val
= 0;
635 for (i
= 0; i
< 9; i
++) {
636 flowc
->mnemval
[i
].r4
[0] = 0;
637 flowc
->mnemval
[i
].r4
[1] = 0;
638 flowc
->mnemval
[i
].r4
[2] = 0;
641 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
642 return c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
645 static int send_halfclose(struct c4iw_ep
*ep
)
647 struct sk_buff
*skb
= skb_dequeue(&ep
->com
.ep_skb_list
);
648 u32 wrlen
= roundup(sizeof(struct cpl_close_con_req
), 16);
650 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
654 cxgb_mk_close_con_req(skb
, wrlen
, ep
->hwtid
, ep
->txq_idx
,
655 NULL
, arp_failure_discard
);
657 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
660 static int send_abort(struct c4iw_ep
*ep
)
662 u32 wrlen
= roundup(sizeof(struct cpl_abort_req
), 16);
663 struct sk_buff
*req_skb
= skb_dequeue(&ep
->com
.ep_skb_list
);
665 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
666 if (WARN_ON(!req_skb
))
669 cxgb_mk_abort_req(req_skb
, wrlen
, ep
->hwtid
, ep
->txq_idx
,
670 ep
, abort_arp_failure
);
672 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, req_skb
, ep
->l2t
);
675 static int send_connect(struct c4iw_ep
*ep
)
677 struct cpl_act_open_req
*req
= NULL
;
678 struct cpl_t5_act_open_req
*t5req
= NULL
;
679 struct cpl_t6_act_open_req
*t6req
= NULL
;
680 struct cpl_act_open_req6
*req6
= NULL
;
681 struct cpl_t5_act_open_req6
*t5req6
= NULL
;
682 struct cpl_t6_act_open_req6
*t6req6
= NULL
;
686 unsigned int mtu_idx
;
688 int win
, sizev4
, sizev6
, wrlen
;
689 struct sockaddr_in
*la
= (struct sockaddr_in
*)
691 struct sockaddr_in
*ra
= (struct sockaddr_in
*)
692 &ep
->com
.remote_addr
;
693 struct sockaddr_in6
*la6
= (struct sockaddr_in6
*)
695 struct sockaddr_in6
*ra6
= (struct sockaddr_in6
*)
696 &ep
->com
.remote_addr
;
698 enum chip_type adapter_type
= ep
->com
.dev
->rdev
.lldi
.adapter_type
;
699 u32 isn
= (prandom_u32() & ~7UL) - 1;
700 struct net_device
*netdev
;
703 netdev
= ep
->com
.dev
->rdev
.lldi
.ports
[0];
705 switch (CHELSIO_CHIP_VERSION(adapter_type
)) {
707 sizev4
= sizeof(struct cpl_act_open_req
);
708 sizev6
= sizeof(struct cpl_act_open_req6
);
711 sizev4
= sizeof(struct cpl_t5_act_open_req
);
712 sizev6
= sizeof(struct cpl_t5_act_open_req6
);
715 sizev4
= sizeof(struct cpl_t6_act_open_req
);
716 sizev6
= sizeof(struct cpl_t6_act_open_req6
);
719 pr_err("T%d Chip is not supported\n",
720 CHELSIO_CHIP_VERSION(adapter_type
));
724 wrlen
= (ep
->com
.remote_addr
.ss_family
== AF_INET
) ?
725 roundup(sizev4
, 16) :
728 pr_debug("%s ep %p atid %u\n", __func__
, ep
, ep
->atid
);
730 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
732 pr_err("%s - failed to alloc skb\n", __func__
);
735 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
737 cxgb_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
738 enable_tcp_timestamps
,
739 (ep
->com
.remote_addr
.ss_family
== AF_INET
) ? 0 : 1);
740 wscale
= cxgb_compute_wscale(rcv_win
);
743 * Specify the largest window that will fit in opt0. The
744 * remainder will be specified in the rx_data_ack.
746 win
= ep
->rcv_win
>> 10;
747 if (win
> RCV_BUFSIZ_M
)
750 opt0
= (nocong
? NO_CONG_F
: 0) |
753 WND_SCALE_V(wscale
) |
755 L2T_IDX_V(ep
->l2t
->idx
) |
756 TX_CHAN_V(ep
->tx_chan
) |
757 SMAC_SEL_V(ep
->smac_idx
) |
758 DSCP_V(ep
->tos
>> 2) |
759 ULP_MODE_V(ULP_MODE_TCPDDP
) |
761 opt2
= RX_CHANNEL_V(0) |
762 CCTRL_ECN_V(enable_ecn
) |
763 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
);
764 if (enable_tcp_timestamps
)
765 opt2
|= TSTAMPS_EN_F
;
768 if (wscale
&& enable_tcp_window_scaling
)
769 opt2
|= WND_SCALE_EN_F
;
770 if (CHELSIO_CHIP_VERSION(adapter_type
) > CHELSIO_T4
) {
774 opt2
|= T5_OPT_2_VALID_F
;
775 opt2
|= CONG_CNTRL_V(CONG_ALG_TAHOE
);
779 params
= cxgb4_select_ntuple(netdev
, ep
->l2t
);
781 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
)
782 cxgb4_clip_get(ep
->com
.dev
->rdev
.lldi
.ports
[0],
783 (const u32
*)&la6
->sin6_addr
.s6_addr
, 1);
785 t4_set_arp_err_handler(skb
, ep
, act_open_req_arp_failure
);
787 if (ep
->com
.remote_addr
.ss_family
== AF_INET
) {
788 switch (CHELSIO_CHIP_VERSION(adapter_type
)) {
790 req
= skb_put(skb
, wrlen
);
794 t5req
= skb_put(skb
, wrlen
);
795 INIT_TP_WR(t5req
, 0);
796 req
= (struct cpl_act_open_req
*)t5req
;
799 t6req
= skb_put(skb
, wrlen
);
800 INIT_TP_WR(t6req
, 0);
801 req
= (struct cpl_act_open_req
*)t6req
;
802 t5req
= (struct cpl_t5_act_open_req
*)t6req
;
805 pr_err("T%d Chip is not supported\n",
806 CHELSIO_CHIP_VERSION(adapter_type
));
811 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
812 ((ep
->rss_qid
<<14) | ep
->atid
)));
813 req
->local_port
= la
->sin_port
;
814 req
->peer_port
= ra
->sin_port
;
815 req
->local_ip
= la
->sin_addr
.s_addr
;
816 req
->peer_ip
= ra
->sin_addr
.s_addr
;
817 req
->opt0
= cpu_to_be64(opt0
);
819 if (is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
820 req
->params
= cpu_to_be32(params
);
821 req
->opt2
= cpu_to_be32(opt2
);
823 if (is_t5(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
825 cpu_to_be64(FILTER_TUPLE_V(params
));
826 t5req
->rsvd
= cpu_to_be32(isn
);
827 pr_debug("%s snd_isn %u\n", __func__
, t5req
->rsvd
);
828 t5req
->opt2
= cpu_to_be32(opt2
);
831 cpu_to_be64(FILTER_TUPLE_V(params
));
832 t6req
->rsvd
= cpu_to_be32(isn
);
833 pr_debug("%s snd_isn %u\n", __func__
, t6req
->rsvd
);
834 t6req
->opt2
= cpu_to_be32(opt2
);
838 switch (CHELSIO_CHIP_VERSION(adapter_type
)) {
840 req6
= skb_put(skb
, wrlen
);
844 t5req6
= skb_put(skb
, wrlen
);
845 INIT_TP_WR(t5req6
, 0);
846 req6
= (struct cpl_act_open_req6
*)t5req6
;
849 t6req6
= skb_put(skb
, wrlen
);
850 INIT_TP_WR(t6req6
, 0);
851 req6
= (struct cpl_act_open_req6
*)t6req6
;
852 t5req6
= (struct cpl_t5_act_open_req6
*)t6req6
;
855 pr_err("T%d Chip is not supported\n",
856 CHELSIO_CHIP_VERSION(adapter_type
));
861 OPCODE_TID(req6
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
862 ((ep
->rss_qid
<<14)|ep
->atid
)));
863 req6
->local_port
= la6
->sin6_port
;
864 req6
->peer_port
= ra6
->sin6_port
;
865 req6
->local_ip_hi
= *((__be64
*)(la6
->sin6_addr
.s6_addr
));
866 req6
->local_ip_lo
= *((__be64
*)(la6
->sin6_addr
.s6_addr
+ 8));
867 req6
->peer_ip_hi
= *((__be64
*)(ra6
->sin6_addr
.s6_addr
));
868 req6
->peer_ip_lo
= *((__be64
*)(ra6
->sin6_addr
.s6_addr
+ 8));
869 req6
->opt0
= cpu_to_be64(opt0
);
871 if (is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
872 req6
->params
= cpu_to_be32(cxgb4_select_ntuple(netdev
,
874 req6
->opt2
= cpu_to_be32(opt2
);
876 if (is_t5(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
878 cpu_to_be64(FILTER_TUPLE_V(params
));
879 t5req6
->rsvd
= cpu_to_be32(isn
);
880 pr_debug("%s snd_isn %u\n", __func__
, t5req6
->rsvd
);
881 t5req6
->opt2
= cpu_to_be32(opt2
);
884 cpu_to_be64(FILTER_TUPLE_V(params
));
885 t6req6
->rsvd
= cpu_to_be32(isn
);
886 pr_debug("%s snd_isn %u\n", __func__
, t6req6
->rsvd
);
887 t6req6
->opt2
= cpu_to_be32(opt2
);
893 set_bit(ACT_OPEN_REQ
, &ep
->com
.history
);
894 ret
= c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
896 if (ret
&& ep
->com
.remote_addr
.ss_family
== AF_INET6
)
897 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
898 (const u32
*)&la6
->sin6_addr
.s6_addr
, 1);
902 static int send_mpa_req(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
905 int mpalen
, wrlen
, ret
;
906 struct fw_ofld_tx_data_wr
*req
;
907 struct mpa_message
*mpa
;
908 struct mpa_v2_conn_params mpa_v2_params
;
910 pr_debug("%s ep %p tid %u pd_len %d\n",
911 __func__
, ep
, ep
->hwtid
, ep
->plen
);
913 BUG_ON(skb_cloned(skb
));
915 mpalen
= sizeof(*mpa
) + ep
->plen
;
916 if (mpa_rev_to_use
== 2)
917 mpalen
+= sizeof(struct mpa_v2_conn_params
);
918 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
919 skb
= get_skb(skb
, wrlen
, GFP_KERNEL
);
921 connect_reply_upcall(ep
, -ENOMEM
);
924 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
926 req
= skb_put_zero(skb
, wrlen
);
927 req
->op_to_immdlen
= cpu_to_be32(
928 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
930 FW_WR_IMMDLEN_V(mpalen
));
931 req
->flowid_len16
= cpu_to_be32(
932 FW_WR_FLOWID_V(ep
->hwtid
) |
933 FW_WR_LEN16_V(wrlen
>> 4));
934 req
->plen
= cpu_to_be32(mpalen
);
935 req
->tunnel_to_proxy
= cpu_to_be32(
936 FW_OFLD_TX_DATA_WR_FLUSH_F
|
937 FW_OFLD_TX_DATA_WR_SHOVE_F
);
939 mpa
= (struct mpa_message
*)(req
+ 1);
940 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
944 mpa
->flags
|= MPA_CRC
;
945 if (markers_enabled
) {
946 mpa
->flags
|= MPA_MARKERS
;
947 ep
->mpa_attr
.recv_marker_enabled
= 1;
949 ep
->mpa_attr
.recv_marker_enabled
= 0;
951 if (mpa_rev_to_use
== 2)
952 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
954 mpa
->private_data_size
= htons(ep
->plen
);
955 mpa
->revision
= mpa_rev_to_use
;
956 if (mpa_rev_to_use
== 1) {
957 ep
->tried_with_mpa_v1
= 1;
958 ep
->retry_with_mpa_v1
= 0;
961 if (mpa_rev_to_use
== 2) {
962 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
963 sizeof (struct mpa_v2_conn_params
));
964 pr_debug("%s initiator ird %u ord %u\n", __func__
, ep
->ird
,
966 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
967 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
970 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
971 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
973 htons(MPA_V2_RDMA_WRITE_RTR
);
974 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
976 htons(MPA_V2_RDMA_READ_RTR
);
978 memcpy(mpa
->private_data
, &mpa_v2_params
,
979 sizeof(struct mpa_v2_conn_params
));
982 memcpy(mpa
->private_data
+
983 sizeof(struct mpa_v2_conn_params
),
984 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
987 memcpy(mpa
->private_data
,
988 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
991 * Reference the mpa skb. This ensures the data area
992 * will remain in memory until the hw acks the tx.
993 * Function fw4_ack() will deref it.
996 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
999 ret
= c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1003 __state_set(&ep
->com
, MPA_REQ_SENT
);
1004 ep
->mpa_attr
.initiator
= 1;
1005 ep
->snd_seq
+= mpalen
;
1009 static int send_mpa_reject(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
1012 struct fw_ofld_tx_data_wr
*req
;
1013 struct mpa_message
*mpa
;
1014 struct sk_buff
*skb
;
1015 struct mpa_v2_conn_params mpa_v2_params
;
1017 pr_debug("%s ep %p tid %u pd_len %d\n",
1018 __func__
, ep
, ep
->hwtid
, ep
->plen
);
1020 mpalen
= sizeof(*mpa
) + plen
;
1021 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
1022 mpalen
+= sizeof(struct mpa_v2_conn_params
);
1023 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
1025 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1027 pr_err("%s - cannot alloc skb!\n", __func__
);
1030 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1032 req
= skb_put_zero(skb
, wrlen
);
1033 req
->op_to_immdlen
= cpu_to_be32(
1034 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
1036 FW_WR_IMMDLEN_V(mpalen
));
1037 req
->flowid_len16
= cpu_to_be32(
1038 FW_WR_FLOWID_V(ep
->hwtid
) |
1039 FW_WR_LEN16_V(wrlen
>> 4));
1040 req
->plen
= cpu_to_be32(mpalen
);
1041 req
->tunnel_to_proxy
= cpu_to_be32(
1042 FW_OFLD_TX_DATA_WR_FLUSH_F
|
1043 FW_OFLD_TX_DATA_WR_SHOVE_F
);
1045 mpa
= (struct mpa_message
*)(req
+ 1);
1046 memset(mpa
, 0, sizeof(*mpa
));
1047 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
1048 mpa
->flags
= MPA_REJECT
;
1049 mpa
->revision
= ep
->mpa_attr
.version
;
1050 mpa
->private_data_size
= htons(plen
);
1052 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
1053 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
1054 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
1055 sizeof (struct mpa_v2_conn_params
));
1056 mpa_v2_params
.ird
= htons(((u16
)ep
->ird
) |
1057 (peer2peer
? MPA_V2_PEER2PEER_MODEL
:
1059 mpa_v2_params
.ord
= htons(((u16
)ep
->ord
) | (peer2peer
?
1061 FW_RI_INIT_P2PTYPE_RDMA_WRITE
?
1062 MPA_V2_RDMA_WRITE_RTR
: p2p_type
==
1063 FW_RI_INIT_P2PTYPE_READ_REQ
?
1064 MPA_V2_RDMA_READ_RTR
: 0) : 0));
1065 memcpy(mpa
->private_data
, &mpa_v2_params
,
1066 sizeof(struct mpa_v2_conn_params
));
1069 memcpy(mpa
->private_data
+
1070 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
1073 memcpy(mpa
->private_data
, pdata
, plen
);
1076 * Reference the mpa skb again. This ensures the data area
1077 * will remain in memory until the hw acks the tx.
1078 * Function fw4_ack() will deref it.
1081 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1082 t4_set_arp_err_handler(skb
, NULL
, mpa_start_arp_failure
);
1083 BUG_ON(ep
->mpa_skb
);
1085 ep
->snd_seq
+= mpalen
;
1086 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1089 static int send_mpa_reply(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
1092 struct fw_ofld_tx_data_wr
*req
;
1093 struct mpa_message
*mpa
;
1094 struct sk_buff
*skb
;
1095 struct mpa_v2_conn_params mpa_v2_params
;
1097 pr_debug("%s ep %p tid %u pd_len %d\n",
1098 __func__
, ep
, ep
->hwtid
, ep
->plen
);
1100 mpalen
= sizeof(*mpa
) + plen
;
1101 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
1102 mpalen
+= sizeof(struct mpa_v2_conn_params
);
1103 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
1105 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1107 pr_err("%s - cannot alloc skb!\n", __func__
);
1110 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1112 req
= skb_put_zero(skb
, wrlen
);
1113 req
->op_to_immdlen
= cpu_to_be32(
1114 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
1116 FW_WR_IMMDLEN_V(mpalen
));
1117 req
->flowid_len16
= cpu_to_be32(
1118 FW_WR_FLOWID_V(ep
->hwtid
) |
1119 FW_WR_LEN16_V(wrlen
>> 4));
1120 req
->plen
= cpu_to_be32(mpalen
);
1121 req
->tunnel_to_proxy
= cpu_to_be32(
1122 FW_OFLD_TX_DATA_WR_FLUSH_F
|
1123 FW_OFLD_TX_DATA_WR_SHOVE_F
);
1125 mpa
= (struct mpa_message
*)(req
+ 1);
1126 memset(mpa
, 0, sizeof(*mpa
));
1127 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
1129 if (ep
->mpa_attr
.crc_enabled
)
1130 mpa
->flags
|= MPA_CRC
;
1131 if (ep
->mpa_attr
.recv_marker_enabled
)
1132 mpa
->flags
|= MPA_MARKERS
;
1133 mpa
->revision
= ep
->mpa_attr
.version
;
1134 mpa
->private_data_size
= htons(plen
);
1136 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
1137 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
1138 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
1139 sizeof (struct mpa_v2_conn_params
));
1140 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
1141 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
1142 if (peer2peer
&& (ep
->mpa_attr
.p2p_type
!=
1143 FW_RI_INIT_P2PTYPE_DISABLED
)) {
1144 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
1146 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
1147 mpa_v2_params
.ord
|=
1148 htons(MPA_V2_RDMA_WRITE_RTR
);
1149 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
1150 mpa_v2_params
.ord
|=
1151 htons(MPA_V2_RDMA_READ_RTR
);
1154 memcpy(mpa
->private_data
, &mpa_v2_params
,
1155 sizeof(struct mpa_v2_conn_params
));
1158 memcpy(mpa
->private_data
+
1159 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
1162 memcpy(mpa
->private_data
, pdata
, plen
);
1165 * Reference the mpa skb. This ensures the data area
1166 * will remain in memory until the hw acks the tx.
1167 * Function fw4_ack() will deref it.
1170 t4_set_arp_err_handler(skb
, NULL
, mpa_start_arp_failure
);
1172 __state_set(&ep
->com
, MPA_REP_SENT
);
1173 ep
->snd_seq
+= mpalen
;
1174 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1177 static int act_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1180 struct cpl_act_establish
*req
= cplhdr(skb
);
1181 unsigned int tid
= GET_TID(req
);
1182 unsigned int atid
= TID_TID_G(ntohl(req
->tos_atid
));
1183 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1186 ep
= lookup_atid(t
, atid
);
1188 pr_debug("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__
, ep
, tid
,
1189 be32_to_cpu(req
->snd_isn
), be32_to_cpu(req
->rcv_isn
));
1191 mutex_lock(&ep
->com
.mutex
);
1192 dst_confirm(ep
->dst
);
1194 /* setup the hwtid for this connection */
1196 cxgb4_insert_tid(t
, ep
, tid
, ep
->com
.local_addr
.ss_family
);
1199 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
1200 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
1202 set_emss(ep
, ntohs(req
->tcp_opt
));
1204 /* dealloc the atid */
1205 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, atid
);
1206 cxgb4_free_atid(t
, atid
);
1207 set_bit(ACT_ESTAB
, &ep
->com
.history
);
1209 /* start MPA negotiation */
1210 ret
= send_flowc(ep
);
1213 if (ep
->retry_with_mpa_v1
)
1214 ret
= send_mpa_req(ep
, skb
, 1);
1216 ret
= send_mpa_req(ep
, skb
, mpa_rev
);
1219 mutex_unlock(&ep
->com
.mutex
);
1222 mutex_unlock(&ep
->com
.mutex
);
1223 connect_reply_upcall(ep
, -ENOMEM
);
1224 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1228 static void close_complete_upcall(struct c4iw_ep
*ep
, int status
)
1230 struct iw_cm_event event
;
1232 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1233 memset(&event
, 0, sizeof(event
));
1234 event
.event
= IW_CM_EVENT_CLOSE
;
1235 event
.status
= status
;
1236 if (ep
->com
.cm_id
) {
1237 pr_debug("close complete delivered ep %p cm_id %p tid %u\n",
1238 ep
, ep
->com
.cm_id
, ep
->hwtid
);
1239 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1240 deref_cm_id(&ep
->com
);
1241 set_bit(CLOSE_UPCALL
, &ep
->com
.history
);
1245 static void peer_close_upcall(struct c4iw_ep
*ep
)
1247 struct iw_cm_event event
;
1249 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1250 memset(&event
, 0, sizeof(event
));
1251 event
.event
= IW_CM_EVENT_DISCONNECT
;
1252 if (ep
->com
.cm_id
) {
1253 pr_debug("peer close delivered ep %p cm_id %p tid %u\n",
1254 ep
, ep
->com
.cm_id
, ep
->hwtid
);
1255 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1256 set_bit(DISCONN_UPCALL
, &ep
->com
.history
);
1260 static void peer_abort_upcall(struct c4iw_ep
*ep
)
1262 struct iw_cm_event event
;
1264 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1265 memset(&event
, 0, sizeof(event
));
1266 event
.event
= IW_CM_EVENT_CLOSE
;
1267 event
.status
= -ECONNRESET
;
1268 if (ep
->com
.cm_id
) {
1269 pr_debug("abort delivered ep %p cm_id %p tid %u\n", ep
,
1270 ep
->com
.cm_id
, ep
->hwtid
);
1271 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1272 deref_cm_id(&ep
->com
);
1273 set_bit(ABORT_UPCALL
, &ep
->com
.history
);
1277 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
)
1279 struct iw_cm_event event
;
1281 pr_debug("%s ep %p tid %u status %d\n",
1282 __func__
, ep
, ep
->hwtid
, status
);
1283 memset(&event
, 0, sizeof(event
));
1284 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
1285 event
.status
= status
;
1286 memcpy(&event
.local_addr
, &ep
->com
.local_addr
,
1287 sizeof(ep
->com
.local_addr
));
1288 memcpy(&event
.remote_addr
, &ep
->com
.remote_addr
,
1289 sizeof(ep
->com
.remote_addr
));
1291 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
1292 if (!ep
->tried_with_mpa_v1
) {
1293 /* this means MPA_v2 is used */
1294 event
.ord
= ep
->ird
;
1295 event
.ird
= ep
->ord
;
1296 event
.private_data_len
= ep
->plen
-
1297 sizeof(struct mpa_v2_conn_params
);
1298 event
.private_data
= ep
->mpa_pkt
+
1299 sizeof(struct mpa_message
) +
1300 sizeof(struct mpa_v2_conn_params
);
1302 /* this means MPA_v1 is used */
1303 event
.ord
= cur_max_read_depth(ep
->com
.dev
);
1304 event
.ird
= cur_max_read_depth(ep
->com
.dev
);
1305 event
.private_data_len
= ep
->plen
;
1306 event
.private_data
= ep
->mpa_pkt
+
1307 sizeof(struct mpa_message
);
1311 pr_debug("%s ep %p tid %u status %d\n", __func__
, ep
,
1313 set_bit(CONN_RPL_UPCALL
, &ep
->com
.history
);
1314 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1317 deref_cm_id(&ep
->com
);
1320 static int connect_request_upcall(struct c4iw_ep
*ep
)
1322 struct iw_cm_event event
;
1325 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1326 memset(&event
, 0, sizeof(event
));
1327 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
1328 memcpy(&event
.local_addr
, &ep
->com
.local_addr
,
1329 sizeof(ep
->com
.local_addr
));
1330 memcpy(&event
.remote_addr
, &ep
->com
.remote_addr
,
1331 sizeof(ep
->com
.remote_addr
));
1332 event
.provider_data
= ep
;
1333 if (!ep
->tried_with_mpa_v1
) {
1334 /* this means MPA_v2 is used */
1335 event
.ord
= ep
->ord
;
1336 event
.ird
= ep
->ird
;
1337 event
.private_data_len
= ep
->plen
-
1338 sizeof(struct mpa_v2_conn_params
);
1339 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
) +
1340 sizeof(struct mpa_v2_conn_params
);
1342 /* this means MPA_v1 is used. Send max supported */
1343 event
.ord
= cur_max_read_depth(ep
->com
.dev
);
1344 event
.ird
= cur_max_read_depth(ep
->com
.dev
);
1345 event
.private_data_len
= ep
->plen
;
1346 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
1348 c4iw_get_ep(&ep
->com
);
1349 ret
= ep
->parent_ep
->com
.cm_id
->event_handler(ep
->parent_ep
->com
.cm_id
,
1352 c4iw_put_ep(&ep
->com
);
1353 set_bit(CONNREQ_UPCALL
, &ep
->com
.history
);
1354 c4iw_put_ep(&ep
->parent_ep
->com
);
1358 static void established_upcall(struct c4iw_ep
*ep
)
1360 struct iw_cm_event event
;
1362 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1363 memset(&event
, 0, sizeof(event
));
1364 event
.event
= IW_CM_EVENT_ESTABLISHED
;
1365 event
.ird
= ep
->ord
;
1366 event
.ord
= ep
->ird
;
1367 if (ep
->com
.cm_id
) {
1368 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1369 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1370 set_bit(ESTAB_UPCALL
, &ep
->com
.history
);
1374 static int update_rx_credits(struct c4iw_ep
*ep
, u32 credits
)
1376 struct sk_buff
*skb
;
1377 u32 wrlen
= roundup(sizeof(struct cpl_rx_data_ack
), 16);
1380 pr_debug("%s ep %p tid %u credits %u\n",
1381 __func__
, ep
, ep
->hwtid
, credits
);
1382 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1384 pr_err("update_rx_credits - cannot alloc skb!\n");
1389 * If we couldn't specify the entire rcv window at connection setup
1390 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1391 * then add the overage in to the credits returned.
1393 if (ep
->rcv_win
> RCV_BUFSIZ_M
* 1024)
1394 credits
+= ep
->rcv_win
- RCV_BUFSIZ_M
* 1024;
1396 credit_dack
= credits
| RX_FORCE_ACK_F
| RX_DACK_CHANGE_F
|
1397 RX_DACK_MODE_V(dack_mode
);
1399 cxgb_mk_rx_data_ack(skb
, wrlen
, ep
->hwtid
, ep
->ctrlq_idx
,
1402 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1406 #define RELAXED_IRD_NEGOTIATION 1
1409 * process_mpa_reply - process streaming mode MPA reply
1413 * 0 upon success indicating a connect request was delivered to the ULP
1414 * or the mpa request is incomplete but valid so far.
1416 * 1 if a failure requires the caller to close the connection.
1418 * 2 if a failure requires the caller to abort the connection.
1420 static int process_mpa_reply(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1422 struct mpa_message
*mpa
;
1423 struct mpa_v2_conn_params
*mpa_v2_params
;
1425 u16 resp_ird
, resp_ord
;
1426 u8 rtr_mismatch
= 0, insuff_ird
= 0;
1427 struct c4iw_qp_attributes attrs
;
1428 enum c4iw_qp_attr_mask mask
;
1432 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1435 * If we get more than the supported amount of private data
1436 * then we must fail this connection.
1438 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1440 goto err_stop_timer
;
1444 * copy the new data into our accumulation buffer.
1446 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1448 ep
->mpa_pkt_len
+= skb
->len
;
1451 * if we don't even have the mpa message, then bail.
1453 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1455 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1457 /* Validate MPA header. */
1458 if (mpa
->revision
> mpa_rev
) {
1459 pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
1460 __func__
, mpa_rev
, mpa
->revision
);
1462 goto err_stop_timer
;
1464 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
1466 goto err_stop_timer
;
1469 plen
= ntohs(mpa
->private_data_size
);
1472 * Fail if there's too much private data.
1474 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1476 goto err_stop_timer
;
1480 * If plen does not account for pkt size
1482 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1484 goto err_stop_timer
;
1487 ep
->plen
= (u8
) plen
;
1490 * If we don't have all the pdata yet, then bail.
1491 * We'll continue process when more data arrives.
1493 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1496 if (mpa
->flags
& MPA_REJECT
) {
1497 err
= -ECONNREFUSED
;
1498 goto err_stop_timer
;
1502 * Stop mpa timer. If it expired, then
1503 * we ignore the MPA reply. process_timeout()
1504 * will abort the connection.
1506 if (stop_ep_timer(ep
))
1510 * If we get here we have accumulated the entire mpa
1511 * start reply message including private data. And
1512 * the MPA header is valid.
1514 __state_set(&ep
->com
, FPDU_MODE
);
1515 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1516 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1517 ep
->mpa_attr
.version
= mpa
->revision
;
1518 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1520 if (mpa
->revision
== 2) {
1521 ep
->mpa_attr
.enhanced_rdma_conn
=
1522 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1523 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1524 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1525 (ep
->mpa_pkt
+ sizeof(*mpa
));
1526 resp_ird
= ntohs(mpa_v2_params
->ird
) &
1527 MPA_V2_IRD_ORD_MASK
;
1528 resp_ord
= ntohs(mpa_v2_params
->ord
) &
1529 MPA_V2_IRD_ORD_MASK
;
1530 pr_debug("%s responder ird %u ord %u ep ird %u ord %u\n",
1532 resp_ird
, resp_ord
, ep
->ird
, ep
->ord
);
1535 * This is a double-check. Ideally, below checks are
1536 * not required since ird/ord stuff has been taken
1537 * care of in c4iw_accept_cr
1539 if (ep
->ird
< resp_ord
) {
1540 if (RELAXED_IRD_NEGOTIATION
&& resp_ord
<=
1541 ep
->com
.dev
->rdev
.lldi
.max_ordird_qp
)
1545 } else if (ep
->ird
> resp_ord
) {
1548 if (ep
->ord
> resp_ird
) {
1549 if (RELAXED_IRD_NEGOTIATION
)
1560 if (ntohs(mpa_v2_params
->ird
) &
1561 MPA_V2_PEER2PEER_MODEL
) {
1562 if (ntohs(mpa_v2_params
->ord
) &
1563 MPA_V2_RDMA_WRITE_RTR
)
1564 ep
->mpa_attr
.p2p_type
=
1565 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1566 else if (ntohs(mpa_v2_params
->ord
) &
1567 MPA_V2_RDMA_READ_RTR
)
1568 ep
->mpa_attr
.p2p_type
=
1569 FW_RI_INIT_P2PTYPE_READ_REQ
;
1572 } else if (mpa
->revision
== 1)
1574 ep
->mpa_attr
.p2p_type
= p2p_type
;
1576 pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
1577 __func__
, ep
->mpa_attr
.crc_enabled
,
1578 ep
->mpa_attr
.recv_marker_enabled
,
1579 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1580 ep
->mpa_attr
.p2p_type
, p2p_type
);
1583 * If responder's RTR does not match with that of initiator, assign
1584 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1585 * generated when moving QP to RTS state.
1586 * A TERM message will be sent after QP has moved to RTS state
1588 if ((ep
->mpa_attr
.version
== 2) && peer2peer
&&
1589 (ep
->mpa_attr
.p2p_type
!= p2p_type
)) {
1590 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1594 attrs
.mpa_attr
= ep
->mpa_attr
;
1595 attrs
.max_ird
= ep
->ird
;
1596 attrs
.max_ord
= ep
->ord
;
1597 attrs
.llp_stream_handle
= ep
;
1598 attrs
.next_state
= C4IW_QP_STATE_RTS
;
1600 mask
= C4IW_QP_ATTR_NEXT_STATE
|
1601 C4IW_QP_ATTR_LLP_STREAM_HANDLE
| C4IW_QP_ATTR_MPA_ATTR
|
1602 C4IW_QP_ATTR_MAX_IRD
| C4IW_QP_ATTR_MAX_ORD
;
1604 /* bind QP and TID with INIT_WR */
1605 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1606 ep
->com
.qp
, mask
, &attrs
, 1);
1611 * If responder's RTR requirement did not match with what initiator
1612 * supports, generate TERM message
1615 pr_err("%s: RTR mismatch, sending TERM\n", __func__
);
1616 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1617 attrs
.ecode
= MPA_NOMATCH_RTR
;
1618 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1619 attrs
.send_term
= 1;
1620 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1621 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1628 * Generate TERM if initiator IRD is not sufficient for responder
1629 * provided ORD. Currently, we do the same behaviour even when
1630 * responder provided IRD is also not sufficient as regards to
1634 pr_err("%s: Insufficient IRD, sending TERM\n", __func__
);
1635 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1636 attrs
.ecode
= MPA_INSUFF_IRD
;
1637 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1638 attrs
.send_term
= 1;
1639 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1640 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1651 connect_reply_upcall(ep
, err
);
1656 * process_mpa_request - process streaming mode MPA request
1660 * 0 upon success indicating a connect request was delivered to the ULP
1661 * or the mpa request is incomplete but valid so far.
1663 * 1 if a failure requires the caller to close the connection.
1665 * 2 if a failure requires the caller to abort the connection.
1667 static int process_mpa_request(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1669 struct mpa_message
*mpa
;
1670 struct mpa_v2_conn_params
*mpa_v2_params
;
1673 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1676 * If we get more than the supported amount of private data
1677 * then we must fail this connection.
1679 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
))
1680 goto err_stop_timer
;
1682 pr_debug("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1685 * Copy the new data into our accumulation buffer.
1687 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1689 ep
->mpa_pkt_len
+= skb
->len
;
1692 * If we don't even have the mpa message, then bail.
1693 * We'll continue process when more data arrives.
1695 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1698 pr_debug("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1699 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1702 * Validate MPA Header.
1704 if (mpa
->revision
> mpa_rev
) {
1705 pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
1706 __func__
, mpa_rev
, mpa
->revision
);
1707 goto err_stop_timer
;
1710 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
)))
1711 goto err_stop_timer
;
1713 plen
= ntohs(mpa
->private_data_size
);
1716 * Fail if there's too much private data.
1718 if (plen
> MPA_MAX_PRIVATE_DATA
)
1719 goto err_stop_timer
;
1722 * If plen does not account for pkt size
1724 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
))
1725 goto err_stop_timer
;
1726 ep
->plen
= (u8
) plen
;
1729 * If we don't have all the pdata yet, then bail.
1731 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1735 * If we get here we have accumulated the entire mpa
1736 * start reply message including private data.
1738 ep
->mpa_attr
.initiator
= 0;
1739 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1740 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1741 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1742 ep
->mpa_attr
.version
= mpa
->revision
;
1743 if (mpa
->revision
== 1)
1744 ep
->tried_with_mpa_v1
= 1;
1745 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1747 if (mpa
->revision
== 2) {
1748 ep
->mpa_attr
.enhanced_rdma_conn
=
1749 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1750 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1751 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1752 (ep
->mpa_pkt
+ sizeof(*mpa
));
1753 ep
->ird
= ntohs(mpa_v2_params
->ird
) &
1754 MPA_V2_IRD_ORD_MASK
;
1755 ep
->ird
= min_t(u32
, ep
->ird
,
1756 cur_max_read_depth(ep
->com
.dev
));
1757 ep
->ord
= ntohs(mpa_v2_params
->ord
) &
1758 MPA_V2_IRD_ORD_MASK
;
1759 ep
->ord
= min_t(u32
, ep
->ord
,
1760 cur_max_read_depth(ep
->com
.dev
));
1761 pr_debug("%s initiator ird %u ord %u\n",
1762 __func__
, ep
->ird
, ep
->ord
);
1763 if (ntohs(mpa_v2_params
->ird
) & MPA_V2_PEER2PEER_MODEL
)
1765 if (ntohs(mpa_v2_params
->ord
) &
1766 MPA_V2_RDMA_WRITE_RTR
)
1767 ep
->mpa_attr
.p2p_type
=
1768 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1769 else if (ntohs(mpa_v2_params
->ord
) &
1770 MPA_V2_RDMA_READ_RTR
)
1771 ep
->mpa_attr
.p2p_type
=
1772 FW_RI_INIT_P2PTYPE_READ_REQ
;
1775 } else if (mpa
->revision
== 1)
1777 ep
->mpa_attr
.p2p_type
= p2p_type
;
1779 pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
1781 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1782 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1783 ep
->mpa_attr
.p2p_type
);
1785 __state_set(&ep
->com
, MPA_REQ_RCVD
);
1788 mutex_lock_nested(&ep
->parent_ep
->com
.mutex
, SINGLE_DEPTH_NESTING
);
1789 if (ep
->parent_ep
->com
.state
!= DEAD
) {
1790 if (connect_request_upcall(ep
))
1791 goto err_unlock_parent
;
1793 goto err_unlock_parent
;
1795 mutex_unlock(&ep
->parent_ep
->com
.mutex
);
1799 mutex_unlock(&ep
->parent_ep
->com
.mutex
);
1802 (void)stop_ep_timer(ep
);
1807 static int rx_data(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1810 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1811 unsigned int dlen
= ntohs(hdr
->len
);
1812 unsigned int tid
= GET_TID(hdr
);
1813 __u8 status
= hdr
->status
;
1816 ep
= get_ep_from_tid(dev
, tid
);
1819 pr_debug("%s ep %p tid %u dlen %u\n", __func__
, ep
, ep
->hwtid
, dlen
);
1820 skb_pull(skb
, sizeof(*hdr
));
1821 skb_trim(skb
, dlen
);
1822 mutex_lock(&ep
->com
.mutex
);
1824 switch (ep
->com
.state
) {
1826 update_rx_credits(ep
, dlen
);
1827 ep
->rcv_seq
+= dlen
;
1828 disconnect
= process_mpa_reply(ep
, skb
);
1831 update_rx_credits(ep
, dlen
);
1832 ep
->rcv_seq
+= dlen
;
1833 disconnect
= process_mpa_request(ep
, skb
);
1836 struct c4iw_qp_attributes attrs
;
1838 update_rx_credits(ep
, dlen
);
1839 BUG_ON(!ep
->com
.qp
);
1841 pr_err("%s Unexpected streaming data." \
1842 " qpid %u ep %p state %d tid %u status %d\n",
1843 __func__
, ep
->com
.qp
->wq
.sq
.qid
, ep
,
1844 ep
->com
.state
, ep
->hwtid
, status
);
1845 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1846 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1847 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1854 mutex_unlock(&ep
->com
.mutex
);
1856 c4iw_ep_disconnect(ep
, disconnect
== 2, GFP_KERNEL
);
1857 c4iw_put_ep(&ep
->com
);
1861 static int abort_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1864 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
);
1866 unsigned int tid
= GET_TID(rpl
);
1868 ep
= get_ep_from_tid(dev
, tid
);
1870 pr_warn("Abort rpl to freed endpoint\n");
1873 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1874 mutex_lock(&ep
->com
.mutex
);
1875 switch (ep
->com
.state
) {
1877 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
1878 __state_set(&ep
->com
, DEAD
);
1882 pr_err("%s ep %p state %d\n", __func__
, ep
, ep
->com
.state
);
1885 mutex_unlock(&ep
->com
.mutex
);
1888 release_ep_resources(ep
);
1889 c4iw_put_ep(&ep
->com
);
1893 static int send_fw_act_open_req(struct c4iw_ep
*ep
, unsigned int atid
)
1895 struct sk_buff
*skb
;
1896 struct fw_ofld_connection_wr
*req
;
1897 unsigned int mtu_idx
;
1899 struct sockaddr_in
*sin
;
1902 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1903 req
= __skb_put_zero(skb
, sizeof(*req
));
1904 req
->op_compl
= htonl(WR_OP_V(FW_OFLD_CONNECTION_WR
));
1905 req
->len16_pkd
= htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req
), 16)));
1906 req
->le
.filter
= cpu_to_be32(cxgb4_select_ntuple(
1907 ep
->com
.dev
->rdev
.lldi
.ports
[0],
1909 sin
= (struct sockaddr_in
*)&ep
->com
.local_addr
;
1910 req
->le
.lport
= sin
->sin_port
;
1911 req
->le
.u
.ipv4
.lip
= sin
->sin_addr
.s_addr
;
1912 sin
= (struct sockaddr_in
*)&ep
->com
.remote_addr
;
1913 req
->le
.pport
= sin
->sin_port
;
1914 req
->le
.u
.ipv4
.pip
= sin
->sin_addr
.s_addr
;
1915 req
->tcb
.t_state_to_astid
=
1916 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT
) |
1917 FW_OFLD_CONNECTION_WR_ASTID_V(atid
));
1918 req
->tcb
.cplrxdataack_cplpassacceptrpl
=
1919 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F
);
1920 req
->tcb
.tx_max
= (__force __be32
) jiffies
;
1921 req
->tcb
.rcv_adv
= htons(1);
1922 cxgb_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
1923 enable_tcp_timestamps
,
1924 (ep
->com
.remote_addr
.ss_family
== AF_INET
) ? 0 : 1);
1925 wscale
= cxgb_compute_wscale(rcv_win
);
1928 * Specify the largest window that will fit in opt0. The
1929 * remainder will be specified in the rx_data_ack.
1931 win
= ep
->rcv_win
>> 10;
1932 if (win
> RCV_BUFSIZ_M
)
1935 req
->tcb
.opt0
= (__force __be64
) (TCAM_BYPASS_F
|
1936 (nocong
? NO_CONG_F
: 0) |
1939 WND_SCALE_V(wscale
) |
1940 MSS_IDX_V(mtu_idx
) |
1941 L2T_IDX_V(ep
->l2t
->idx
) |
1942 TX_CHAN_V(ep
->tx_chan
) |
1943 SMAC_SEL_V(ep
->smac_idx
) |
1944 DSCP_V(ep
->tos
>> 2) |
1945 ULP_MODE_V(ULP_MODE_TCPDDP
) |
1947 req
->tcb
.opt2
= (__force __be32
) (PACE_V(1) |
1948 TX_QUEUE_V(ep
->com
.dev
->rdev
.lldi
.tx_modq
[ep
->tx_chan
]) |
1950 CCTRL_ECN_V(enable_ecn
) |
1951 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
));
1952 if (enable_tcp_timestamps
)
1953 req
->tcb
.opt2
|= (__force __be32
)TSTAMPS_EN_F
;
1954 if (enable_tcp_sack
)
1955 req
->tcb
.opt2
|= (__force __be32
)SACK_EN_F
;
1956 if (wscale
&& enable_tcp_window_scaling
)
1957 req
->tcb
.opt2
|= (__force __be32
)WND_SCALE_EN_F
;
1958 req
->tcb
.opt0
= cpu_to_be64((__force u64
)req
->tcb
.opt0
);
1959 req
->tcb
.opt2
= cpu_to_be32((__force u32
)req
->tcb
.opt2
);
1960 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, ep
->ctrlq_idx
);
1961 set_bit(ACT_OFLD_CONN
, &ep
->com
.history
);
1962 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1966 * Some of the error codes above implicitly indicate that there is no TID
1967 * allocated with the result of an ACT_OPEN. We use this predicate to make
1970 static inline int act_open_has_tid(int status
)
1972 return (status
!= CPL_ERR_TCAM_PARITY
&&
1973 status
!= CPL_ERR_TCAM_MISS
&&
1974 status
!= CPL_ERR_TCAM_FULL
&&
1975 status
!= CPL_ERR_CONN_EXIST_SYNRECV
&&
1976 status
!= CPL_ERR_CONN_EXIST
);
1979 static char *neg_adv_str(unsigned int status
)
1982 case CPL_ERR_RTX_NEG_ADVICE
:
1983 return "Retransmit timeout";
1984 case CPL_ERR_PERSIST_NEG_ADVICE
:
1985 return "Persist timeout";
1986 case CPL_ERR_KEEPALV_NEG_ADVICE
:
1987 return "Keepalive timeout";
1993 static void set_tcp_window(struct c4iw_ep
*ep
, struct port_info
*pi
)
1995 ep
->snd_win
= snd_win
;
1996 ep
->rcv_win
= rcv_win
;
1997 pr_debug("%s snd_win %d rcv_win %d\n",
1998 __func__
, ep
->snd_win
, ep
->rcv_win
);
2001 #define ACT_OPEN_RETRY_COUNT 2
2003 static int import_ep(struct c4iw_ep
*ep
, int iptype
, __u8
*peer_ip
,
2004 struct dst_entry
*dst
, struct c4iw_dev
*cdev
,
2005 bool clear_mpa_v1
, enum chip_type adapter_type
, u8 tos
)
2007 struct neighbour
*n
;
2009 struct net_device
*pdev
;
2011 n
= dst_neigh_lookup(dst
, peer_ip
);
2017 if (n
->dev
->flags
& IFF_LOOPBACK
) {
2019 pdev
= ip_dev_find(&init_net
, *(__be32
*)peer_ip
);
2020 else if (IS_ENABLED(CONFIG_IPV6
))
2021 for_each_netdev(&init_net
, pdev
) {
2022 if (ipv6_chk_addr(&init_net
,
2023 (struct in6_addr
*)peer_ip
,
2034 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
2035 n
, pdev
, rt_tos2priority(tos
));
2040 ep
->mtu
= pdev
->mtu
;
2041 ep
->tx_chan
= cxgb4_port_chan(pdev
);
2042 ep
->smac_idx
= cxgb4_tp_smt_idx(adapter_type
,
2043 cxgb4_port_viid(pdev
));
2044 step
= cdev
->rdev
.lldi
.ntxq
/
2045 cdev
->rdev
.lldi
.nchan
;
2046 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
2047 step
= cdev
->rdev
.lldi
.nrxq
/
2048 cdev
->rdev
.lldi
.nchan
;
2049 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
2050 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
2051 cxgb4_port_idx(pdev
) * step
];
2052 set_tcp_window(ep
, (struct port_info
*)netdev_priv(pdev
));
2055 pdev
= get_real_dev(n
->dev
);
2056 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
2060 ep
->mtu
= dst_mtu(dst
);
2061 ep
->tx_chan
= cxgb4_port_chan(pdev
);
2062 ep
->smac_idx
= cxgb4_tp_smt_idx(adapter_type
,
2063 cxgb4_port_viid(pdev
));
2064 step
= cdev
->rdev
.lldi
.ntxq
/
2065 cdev
->rdev
.lldi
.nchan
;
2066 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
2067 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
2068 step
= cdev
->rdev
.lldi
.nrxq
/
2069 cdev
->rdev
.lldi
.nchan
;
2070 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
2071 cxgb4_port_idx(pdev
) * step
];
2072 set_tcp_window(ep
, (struct port_info
*)netdev_priv(pdev
));
2075 ep
->retry_with_mpa_v1
= 0;
2076 ep
->tried_with_mpa_v1
= 0;
2088 static int c4iw_reconnect(struct c4iw_ep
*ep
)
2092 struct sockaddr_in
*laddr
= (struct sockaddr_in
*)
2093 &ep
->com
.cm_id
->m_local_addr
;
2094 struct sockaddr_in
*raddr
= (struct sockaddr_in
*)
2095 &ep
->com
.cm_id
->m_remote_addr
;
2096 struct sockaddr_in6
*laddr6
= (struct sockaddr_in6
*)
2097 &ep
->com
.cm_id
->m_local_addr
;
2098 struct sockaddr_in6
*raddr6
= (struct sockaddr_in6
*)
2099 &ep
->com
.cm_id
->m_remote_addr
;
2103 pr_debug("%s qp %p cm_id %p\n", __func__
, ep
->com
.qp
, ep
->com
.cm_id
);
2104 init_timer(&ep
->timer
);
2105 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
2107 /* When MPA revision is different on nodes, the node with MPA_rev=2
2108 * tries to reconnect with MPA_rev 1 for the same EP through
2109 * c4iw_reconnect(), where the same EP is assigned with new tid for
2110 * further connection establishment. As we are using the same EP pointer
2111 * for reconnect, few skbs are used during the previous c4iw_connect(),
2112 * which leaves the EP with inadequate skbs for further
2113 * c4iw_reconnect(), Further causing an assert BUG_ON() due to empty
2114 * skb_list() during peer_abort(). Allocate skbs which is already used.
2116 size
= (CN_MAX_CON_BUF
- skb_queue_len(&ep
->com
.ep_skb_list
));
2117 if (alloc_ep_skb_list(&ep
->com
.ep_skb_list
, size
)) {
2123 * Allocate an active TID to initiate a TCP connection.
2125 ep
->atid
= cxgb4_alloc_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
);
2126 if (ep
->atid
== -1) {
2127 pr_err("%s - cannot alloc atid\n", __func__
);
2131 insert_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
, ep
->atid
);
2134 if (ep
->com
.cm_id
->m_local_addr
.ss_family
== AF_INET
) {
2135 ep
->dst
= cxgb_find_route(&ep
->com
.dev
->rdev
.lldi
, get_real_dev
,
2136 laddr
->sin_addr
.s_addr
,
2137 raddr
->sin_addr
.s_addr
,
2139 raddr
->sin_port
, ep
->com
.cm_id
->tos
);
2141 ra
= (__u8
*)&raddr
->sin_addr
;
2143 ep
->dst
= cxgb_find_route6(&ep
->com
.dev
->rdev
.lldi
,
2145 laddr6
->sin6_addr
.s6_addr
,
2146 raddr6
->sin6_addr
.s6_addr
,
2148 raddr6
->sin6_port
, 0,
2149 raddr6
->sin6_scope_id
);
2151 ra
= (__u8
*)&raddr6
->sin6_addr
;
2154 pr_err("%s - cannot find route\n", __func__
);
2155 err
= -EHOSTUNREACH
;
2158 err
= import_ep(ep
, iptype
, ra
, ep
->dst
, ep
->com
.dev
, false,
2159 ep
->com
.dev
->rdev
.lldi
.adapter_type
,
2160 ep
->com
.cm_id
->tos
);
2162 pr_err("%s - cannot alloc l2e\n", __func__
);
2166 pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2167 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
2170 state_set(&ep
->com
, CONNECTING
);
2171 ep
->tos
= ep
->com
.cm_id
->tos
;
2173 /* send connect request to rnic */
2174 err
= send_connect(ep
);
2178 cxgb4_l2t_release(ep
->l2t
);
2180 dst_release(ep
->dst
);
2182 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
2183 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
2186 * remember to send notification to upper layer.
2187 * We are in here so the upper layer is not aware that this is
2188 * re-connect attempt and so, upper layer is still waiting for
2189 * response of 1st connect request.
2191 connect_reply_upcall(ep
, -ECONNRESET
);
2193 c4iw_put_ep(&ep
->com
);
2198 static int act_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2201 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
2202 unsigned int atid
= TID_TID_G(AOPEN_ATID_G(
2203 ntohl(rpl
->atid_status
)));
2204 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2205 int status
= AOPEN_STATUS_G(ntohl(rpl
->atid_status
));
2206 struct sockaddr_in
*la
;
2207 struct sockaddr_in
*ra
;
2208 struct sockaddr_in6
*la6
;
2209 struct sockaddr_in6
*ra6
;
2212 ep
= lookup_atid(t
, atid
);
2213 la
= (struct sockaddr_in
*)&ep
->com
.local_addr
;
2214 ra
= (struct sockaddr_in
*)&ep
->com
.remote_addr
;
2215 la6
= (struct sockaddr_in6
*)&ep
->com
.local_addr
;
2216 ra6
= (struct sockaddr_in6
*)&ep
->com
.remote_addr
;
2218 pr_debug("%s ep %p atid %u status %u errno %d\n", __func__
, ep
, atid
,
2219 status
, status2errno(status
));
2221 if (cxgb_is_neg_adv(status
)) {
2222 pr_debug("%s Connection problems for atid %u status %u (%s)\n",
2223 __func__
, atid
, status
, neg_adv_str(status
));
2224 ep
->stats
.connect_neg_adv
++;
2225 mutex_lock(&dev
->rdev
.stats
.lock
);
2226 dev
->rdev
.stats
.neg_adv
++;
2227 mutex_unlock(&dev
->rdev
.stats
.lock
);
2231 set_bit(ACT_OPEN_RPL
, &ep
->com
.history
);
2234 * Log interesting failures.
2237 case CPL_ERR_CONN_RESET
:
2238 case CPL_ERR_CONN_TIMEDOUT
:
2240 case CPL_ERR_TCAM_FULL
:
2241 mutex_lock(&dev
->rdev
.stats
.lock
);
2242 dev
->rdev
.stats
.tcam_full
++;
2243 mutex_unlock(&dev
->rdev
.stats
.lock
);
2244 if (ep
->com
.local_addr
.ss_family
== AF_INET
&&
2245 dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
2246 ret
= send_fw_act_open_req(ep
, TID_TID_G(AOPEN_ATID_G(
2247 ntohl(rpl
->atid_status
))));
2253 case CPL_ERR_CONN_EXIST
:
2254 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
2255 set_bit(ACT_RETRY_INUSE
, &ep
->com
.history
);
2256 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
2257 struct sockaddr_in6
*sin6
=
2258 (struct sockaddr_in6
*)
2259 &ep
->com
.local_addr
;
2261 ep
->com
.dev
->rdev
.lldi
.ports
[0],
2263 &sin6
->sin6_addr
.s6_addr
, 1);
2265 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
,
2267 cxgb4_free_atid(t
, atid
);
2268 dst_release(ep
->dst
);
2269 cxgb4_l2t_release(ep
->l2t
);
2275 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
2276 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
2277 atid
, status
, status2errno(status
),
2278 &la
->sin_addr
.s_addr
, ntohs(la
->sin_port
),
2279 &ra
->sin_addr
.s_addr
, ntohs(ra
->sin_port
));
2281 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
2282 atid
, status
, status2errno(status
),
2283 la6
->sin6_addr
.s6_addr
, ntohs(la6
->sin6_port
),
2284 ra6
->sin6_addr
.s6_addr
, ntohs(ra6
->sin6_port
));
2290 connect_reply_upcall(ep
, status2errno(status
));
2291 state_set(&ep
->com
, DEAD
);
2293 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
2294 struct sockaddr_in6
*sin6
=
2295 (struct sockaddr_in6
*)&ep
->com
.local_addr
;
2296 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
2297 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
2299 if (status
&& act_open_has_tid(status
))
2300 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, GET_TID(rpl
),
2301 ep
->com
.local_addr
.ss_family
);
2303 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, atid
);
2304 cxgb4_free_atid(t
, atid
);
2305 dst_release(ep
->dst
);
2306 cxgb4_l2t_release(ep
->l2t
);
2307 c4iw_put_ep(&ep
->com
);
2312 static int pass_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2314 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
2315 unsigned int stid
= GET_TID(rpl
);
2316 struct c4iw_listen_ep
*ep
= get_ep_from_stid(dev
, stid
);
2319 pr_debug("%s stid %d lookup failure!\n", __func__
, stid
);
2322 pr_debug("%s ep %p status %d error %d\n", __func__
, ep
,
2323 rpl
->status
, status2errno(rpl
->status
));
2324 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
2325 c4iw_put_ep(&ep
->com
);
2330 static int close_listsrv_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2332 struct cpl_close_listsvr_rpl
*rpl
= cplhdr(skb
);
2333 unsigned int stid
= GET_TID(rpl
);
2334 struct c4iw_listen_ep
*ep
= get_ep_from_stid(dev
, stid
);
2336 pr_debug("%s ep %p\n", __func__
, ep
);
2337 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
2338 c4iw_put_ep(&ep
->com
);
2342 static int accept_cr(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
2343 struct cpl_pass_accept_req
*req
)
2345 struct cpl_pass_accept_rpl
*rpl
;
2346 unsigned int mtu_idx
;
2350 struct cpl_t5_pass_accept_rpl
*rpl5
= NULL
;
2352 enum chip_type adapter_type
= ep
->com
.dev
->rdev
.lldi
.adapter_type
;
2354 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2355 BUG_ON(skb_cloned(skb
));
2359 if (!is_t4(adapter_type
)) {
2360 skb_trim(skb
, roundup(sizeof(*rpl5
), 16));
2362 INIT_TP_WR(rpl5
, ep
->hwtid
);
2364 skb_trim(skb
, sizeof(*rpl
));
2365 INIT_TP_WR(rpl
, ep
->hwtid
);
2367 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
2370 cxgb_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
2371 enable_tcp_timestamps
&& req
->tcpopt
.tstamp
,
2372 (ep
->com
.remote_addr
.ss_family
== AF_INET
) ? 0 : 1);
2373 wscale
= cxgb_compute_wscale(rcv_win
);
2376 * Specify the largest window that will fit in opt0. The
2377 * remainder will be specified in the rx_data_ack.
2379 win
= ep
->rcv_win
>> 10;
2380 if (win
> RCV_BUFSIZ_M
)
2382 opt0
= (nocong
? NO_CONG_F
: 0) |
2385 WND_SCALE_V(wscale
) |
2386 MSS_IDX_V(mtu_idx
) |
2387 L2T_IDX_V(ep
->l2t
->idx
) |
2388 TX_CHAN_V(ep
->tx_chan
) |
2389 SMAC_SEL_V(ep
->smac_idx
) |
2390 DSCP_V(ep
->tos
>> 2) |
2391 ULP_MODE_V(ULP_MODE_TCPDDP
) |
2393 opt2
= RX_CHANNEL_V(0) |
2394 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
);
2396 if (enable_tcp_timestamps
&& req
->tcpopt
.tstamp
)
2397 opt2
|= TSTAMPS_EN_F
;
2398 if (enable_tcp_sack
&& req
->tcpopt
.sack
)
2400 if (wscale
&& enable_tcp_window_scaling
)
2401 opt2
|= WND_SCALE_EN_F
;
2403 const struct tcphdr
*tcph
;
2404 u32 hlen
= ntohl(req
->hdr_len
);
2406 if (CHELSIO_CHIP_VERSION(adapter_type
) <= CHELSIO_T5
)
2407 tcph
= (const void *)(req
+ 1) + ETH_HDR_LEN_G(hlen
) +
2410 tcph
= (const void *)(req
+ 1) +
2411 T6_ETH_HDR_LEN_G(hlen
) + T6_IP_HDR_LEN_G(hlen
);
2412 if (tcph
->ece
&& tcph
->cwr
)
2413 opt2
|= CCTRL_ECN_V(1);
2415 if (CHELSIO_CHIP_VERSION(adapter_type
) > CHELSIO_T4
) {
2416 u32 isn
= (prandom_u32() & ~7UL) - 1;
2417 opt2
|= T5_OPT_2_VALID_F
;
2418 opt2
|= CONG_CNTRL_V(CONG_ALG_TAHOE
);
2421 memset(&rpl5
->iss
, 0, roundup(sizeof(*rpl5
)-sizeof(*rpl
), 16));
2424 rpl5
->iss
= cpu_to_be32(isn
);
2425 pr_debug("%s iss %u\n", __func__
, be32_to_cpu(rpl5
->iss
));
2428 rpl
->opt0
= cpu_to_be64(opt0
);
2429 rpl
->opt2
= cpu_to_be32(opt2
);
2430 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
2431 t4_set_arp_err_handler(skb
, ep
, pass_accept_rpl_arp_failure
);
2433 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
2436 static void reject_cr(struct c4iw_dev
*dev
, u32 hwtid
, struct sk_buff
*skb
)
2438 pr_debug("%s c4iw_dev %p tid %u\n", __func__
, dev
, hwtid
);
2439 BUG_ON(skb_cloned(skb
));
2440 skb_trim(skb
, sizeof(struct cpl_tid_release
));
2441 release_tid(&dev
->rdev
, hwtid
, skb
);
2445 static int pass_accept_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2447 struct c4iw_ep
*child_ep
= NULL
, *parent_ep
;
2448 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
2449 unsigned int stid
= PASS_OPEN_TID_G(ntohl(req
->tos_stid
));
2450 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2451 unsigned int hwtid
= GET_TID(req
);
2452 struct dst_entry
*dst
;
2453 __u8 local_ip
[16], peer_ip
[16];
2454 __be16 local_port
, peer_port
;
2455 struct sockaddr_in6
*sin6
;
2457 u16 peer_mss
= ntohs(req
->tcpopt
.mss
);
2459 unsigned short hdrs
;
2460 u8 tos
= PASS_OPEN_TOS_G(ntohl(req
->tos_stid
));
2462 parent_ep
= (struct c4iw_ep
*)get_ep_from_stid(dev
, stid
);
2464 pr_debug("%s connect request on invalid stid %d\n",
2469 if (state_read(&parent_ep
->com
) != LISTEN
) {
2470 pr_debug("%s - listening ep not in LISTEN\n", __func__
);
2474 cxgb_get_4tuple(req
, parent_ep
->com
.dev
->rdev
.lldi
.adapter_type
,
2475 &iptype
, local_ip
, peer_ip
, &local_port
, &peer_port
);
2477 /* Find output route */
2479 pr_debug("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2480 , __func__
, parent_ep
, hwtid
,
2481 local_ip
, peer_ip
, ntohs(local_port
),
2482 ntohs(peer_port
), peer_mss
);
2483 dst
= cxgb_find_route(&dev
->rdev
.lldi
, get_real_dev
,
2484 *(__be32
*)local_ip
, *(__be32
*)peer_ip
,
2485 local_port
, peer_port
, tos
);
2487 pr_debug("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2488 , __func__
, parent_ep
, hwtid
,
2489 local_ip
, peer_ip
, ntohs(local_port
),
2490 ntohs(peer_port
), peer_mss
);
2491 dst
= cxgb_find_route6(&dev
->rdev
.lldi
, get_real_dev
,
2492 local_ip
, peer_ip
, local_port
, peer_port
,
2493 PASS_OPEN_TOS_G(ntohl(req
->tos_stid
)),
2494 ((struct sockaddr_in6
*)
2495 &parent_ep
->com
.local_addr
)->sin6_scope_id
);
2498 pr_err("%s - failed to find dst entry!\n", __func__
);
2502 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
2504 pr_err("%s - failed to allocate ep entry!\n", __func__
);
2509 err
= import_ep(child_ep
, iptype
, peer_ip
, dst
, dev
, false,
2510 parent_ep
->com
.dev
->rdev
.lldi
.adapter_type
, tos
);
2512 pr_err("%s - failed to allocate l2t entry!\n", __func__
);
2518 hdrs
= ((iptype
== 4) ? sizeof(struct iphdr
) : sizeof(struct ipv6hdr
)) +
2519 sizeof(struct tcphdr
) +
2520 ((enable_tcp_timestamps
&& req
->tcpopt
.tstamp
) ? 12 : 0);
2521 if (peer_mss
&& child_ep
->mtu
> (peer_mss
+ hdrs
))
2522 child_ep
->mtu
= peer_mss
+ hdrs
;
2524 skb_queue_head_init(&child_ep
->com
.ep_skb_list
);
2525 if (alloc_ep_skb_list(&child_ep
->com
.ep_skb_list
, CN_MAX_CON_BUF
))
2528 state_set(&child_ep
->com
, CONNECTING
);
2529 child_ep
->com
.dev
= dev
;
2530 child_ep
->com
.cm_id
= NULL
;
2533 struct sockaddr_in
*sin
= (struct sockaddr_in
*)
2534 &child_ep
->com
.local_addr
;
2536 sin
->sin_family
= AF_INET
;
2537 sin
->sin_port
= local_port
;
2538 sin
->sin_addr
.s_addr
= *(__be32
*)local_ip
;
2540 sin
= (struct sockaddr_in
*)&child_ep
->com
.local_addr
;
2541 sin
->sin_family
= AF_INET
;
2542 sin
->sin_port
= ((struct sockaddr_in
*)
2543 &parent_ep
->com
.local_addr
)->sin_port
;
2544 sin
->sin_addr
.s_addr
= *(__be32
*)local_ip
;
2546 sin
= (struct sockaddr_in
*)&child_ep
->com
.remote_addr
;
2547 sin
->sin_family
= AF_INET
;
2548 sin
->sin_port
= peer_port
;
2549 sin
->sin_addr
.s_addr
= *(__be32
*)peer_ip
;
2551 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.local_addr
;
2552 sin6
->sin6_family
= PF_INET6
;
2553 sin6
->sin6_port
= local_port
;
2554 memcpy(sin6
->sin6_addr
.s6_addr
, local_ip
, 16);
2556 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.local_addr
;
2557 sin6
->sin6_family
= PF_INET6
;
2558 sin6
->sin6_port
= ((struct sockaddr_in6
*)
2559 &parent_ep
->com
.local_addr
)->sin6_port
;
2560 memcpy(sin6
->sin6_addr
.s6_addr
, local_ip
, 16);
2562 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.remote_addr
;
2563 sin6
->sin6_family
= PF_INET6
;
2564 sin6
->sin6_port
= peer_port
;
2565 memcpy(sin6
->sin6_addr
.s6_addr
, peer_ip
, 16);
2568 c4iw_get_ep(&parent_ep
->com
);
2569 child_ep
->parent_ep
= parent_ep
;
2570 child_ep
->tos
= tos
;
2571 child_ep
->dst
= dst
;
2572 child_ep
->hwtid
= hwtid
;
2574 pr_debug("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__
,
2575 child_ep
->tx_chan
, child_ep
->smac_idx
, child_ep
->rss_qid
);
2577 init_timer(&child_ep
->timer
);
2578 cxgb4_insert_tid(t
, child_ep
, hwtid
,
2579 child_ep
->com
.local_addr
.ss_family
);
2580 insert_ep_tid(child_ep
);
2581 if (accept_cr(child_ep
, skb
, req
)) {
2582 c4iw_put_ep(&parent_ep
->com
);
2583 release_ep_resources(child_ep
);
2585 set_bit(PASS_ACCEPT_REQ
, &child_ep
->com
.history
);
2588 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.local_addr
;
2589 cxgb4_clip_get(child_ep
->com
.dev
->rdev
.lldi
.ports
[0],
2590 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
2594 c4iw_put_ep(&child_ep
->com
);
2596 reject_cr(dev
, hwtid
, skb
);
2598 c4iw_put_ep(&parent_ep
->com
);
2603 static int pass_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2606 struct cpl_pass_establish
*req
= cplhdr(skb
);
2607 unsigned int tid
= GET_TID(req
);
2610 ep
= get_ep_from_tid(dev
, tid
);
2611 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2612 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
2613 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
2615 pr_debug("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__
, ep
, tid
,
2616 ntohs(req
->tcp_opt
));
2618 set_emss(ep
, ntohs(req
->tcp_opt
));
2620 dst_confirm(ep
->dst
);
2621 mutex_lock(&ep
->com
.mutex
);
2622 ep
->com
.state
= MPA_REQ_WAIT
;
2624 set_bit(PASS_ESTAB
, &ep
->com
.history
);
2625 ret
= send_flowc(ep
);
2626 mutex_unlock(&ep
->com
.mutex
);
2628 c4iw_ep_disconnect(ep
, 1, GFP_KERNEL
);
2629 c4iw_put_ep(&ep
->com
);
2634 static int peer_close(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2636 struct cpl_peer_close
*hdr
= cplhdr(skb
);
2638 struct c4iw_qp_attributes attrs
;
2641 unsigned int tid
= GET_TID(hdr
);
2644 ep
= get_ep_from_tid(dev
, tid
);
2648 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2649 dst_confirm(ep
->dst
);
2651 set_bit(PEER_CLOSE
, &ep
->com
.history
);
2652 mutex_lock(&ep
->com
.mutex
);
2653 switch (ep
->com
.state
) {
2655 __state_set(&ep
->com
, CLOSING
);
2658 __state_set(&ep
->com
, CLOSING
);
2659 connect_reply_upcall(ep
, -ECONNRESET
);
2664 * We're gonna mark this puppy DEAD, but keep
2665 * the reference on it until the ULP accepts or
2666 * rejects the CR. Also wake up anyone waiting
2667 * in rdma connection migration (see c4iw_accept_cr()).
2669 __state_set(&ep
->com
, CLOSING
);
2670 pr_debug("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
2671 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2674 __state_set(&ep
->com
, CLOSING
);
2675 pr_debug("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
2676 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2680 __state_set(&ep
->com
, CLOSING
);
2681 attrs
.next_state
= C4IW_QP_STATE_CLOSING
;
2682 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2683 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2684 if (ret
!= -ECONNRESET
) {
2685 peer_close_upcall(ep
);
2693 __state_set(&ep
->com
, MORIBUND
);
2697 (void)stop_ep_timer(ep
);
2698 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2699 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2700 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2701 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2703 close_complete_upcall(ep
, 0);
2704 __state_set(&ep
->com
, DEAD
);
2714 mutex_unlock(&ep
->com
.mutex
);
2716 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
2718 release_ep_resources(ep
);
2719 c4iw_put_ep(&ep
->com
);
2723 static int peer_abort(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2725 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
2727 struct sk_buff
*rpl_skb
;
2728 struct c4iw_qp_attributes attrs
;
2731 unsigned int tid
= GET_TID(req
);
2732 u32 len
= roundup(sizeof(struct cpl_abort_rpl
), 16);
2734 ep
= get_ep_from_tid(dev
, tid
);
2738 if (cxgb_is_neg_adv(req
->status
)) {
2739 pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n",
2740 __func__
, ep
->hwtid
, req
->status
,
2741 neg_adv_str(req
->status
));
2742 ep
->stats
.abort_neg_adv
++;
2743 mutex_lock(&dev
->rdev
.stats
.lock
);
2744 dev
->rdev
.stats
.neg_adv
++;
2745 mutex_unlock(&dev
->rdev
.stats
.lock
);
2748 pr_debug("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
2750 set_bit(PEER_ABORT
, &ep
->com
.history
);
2753 * Wake up any threads in rdma_init() or rdma_fini().
2754 * However, this is not needed if com state is just
2757 if (ep
->com
.state
!= MPA_REQ_SENT
)
2758 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2760 mutex_lock(&ep
->com
.mutex
);
2761 switch (ep
->com
.state
) {
2763 c4iw_put_ep(&ep
->parent_ep
->com
);
2766 (void)stop_ep_timer(ep
);
2769 (void)stop_ep_timer(ep
);
2770 if (mpa_rev
== 1 || (mpa_rev
== 2 && ep
->tried_with_mpa_v1
))
2771 connect_reply_upcall(ep
, -ECONNRESET
);
2774 * we just don't send notification upwards because we
2775 * want to retry with mpa_v1 without upper layers even
2778 * do some housekeeping so as to re-initiate the
2781 pr_debug("%s: mpa_rev=%d. Retrying with mpav1\n",
2783 ep
->retry_with_mpa_v1
= 1;
2795 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2796 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
2797 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
2798 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
2801 pr_err("%s - qp <- error failed!\n", __func__
);
2803 peer_abort_upcall(ep
);
2808 pr_debug("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
2809 mutex_unlock(&ep
->com
.mutex
);
2815 dst_confirm(ep
->dst
);
2816 if (ep
->com
.state
!= ABORTING
) {
2817 __state_set(&ep
->com
, DEAD
);
2818 /* we don't release if we want to retry with mpa_v1 */
2819 if (!ep
->retry_with_mpa_v1
)
2822 mutex_unlock(&ep
->com
.mutex
);
2824 rpl_skb
= skb_dequeue(&ep
->com
.ep_skb_list
);
2825 if (WARN_ON(!rpl_skb
)) {
2830 cxgb_mk_abort_rpl(rpl_skb
, len
, ep
->hwtid
, ep
->txq_idx
);
2832 c4iw_ofld_send(&ep
->com
.dev
->rdev
, rpl_skb
);
2835 release_ep_resources(ep
);
2836 else if (ep
->retry_with_mpa_v1
) {
2837 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
2838 struct sockaddr_in6
*sin6
=
2839 (struct sockaddr_in6
*)
2840 &ep
->com
.local_addr
;
2842 ep
->com
.dev
->rdev
.lldi
.ports
[0],
2843 (const u32
*)&sin6
->sin6_addr
.s6_addr
,
2846 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
->hwtid
);
2847 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
,
2848 ep
->com
.local_addr
.ss_family
);
2849 dst_release(ep
->dst
);
2850 cxgb4_l2t_release(ep
->l2t
);
2855 c4iw_put_ep(&ep
->com
);
2856 /* Dereferencing ep, referenced in peer_abort_intr() */
2857 c4iw_put_ep(&ep
->com
);
2861 static int close_con_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2864 struct c4iw_qp_attributes attrs
;
2865 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
);
2867 unsigned int tid
= GET_TID(rpl
);
2869 ep
= get_ep_from_tid(dev
, tid
);
2873 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2875 /* The cm_id may be null if we failed to connect */
2876 mutex_lock(&ep
->com
.mutex
);
2877 set_bit(CLOSE_CON_RPL
, &ep
->com
.history
);
2878 switch (ep
->com
.state
) {
2880 __state_set(&ep
->com
, MORIBUND
);
2883 (void)stop_ep_timer(ep
);
2884 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
2885 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2886 c4iw_modify_qp(ep
->com
.qp
->rhp
,
2888 C4IW_QP_ATTR_NEXT_STATE
,
2891 close_complete_upcall(ep
, 0);
2892 __state_set(&ep
->com
, DEAD
);
2902 mutex_unlock(&ep
->com
.mutex
);
2904 release_ep_resources(ep
);
2905 c4iw_put_ep(&ep
->com
);
2909 static int terminate(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2911 struct cpl_rdma_terminate
*rpl
= cplhdr(skb
);
2912 unsigned int tid
= GET_TID(rpl
);
2914 struct c4iw_qp_attributes attrs
;
2916 ep
= get_ep_from_tid(dev
, tid
);
2919 if (ep
&& ep
->com
.qp
) {
2920 pr_warn("TERM received tid %u qpid %u\n",
2921 tid
, ep
->com
.qp
->wq
.sq
.qid
);
2922 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
2923 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2924 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2926 pr_warn("TERM received tid %u no ep/qp\n", tid
);
2927 c4iw_put_ep(&ep
->com
);
2933 * Upcall from the adapter indicating data has been transmitted.
2934 * For us its just the single MPA request or reply. We can now free
2935 * the skb holding the mpa message.
2937 static int fw4_ack(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2940 struct cpl_fw4_ack
*hdr
= cplhdr(skb
);
2941 u8 credits
= hdr
->credits
;
2942 unsigned int tid
= GET_TID(hdr
);
2945 ep
= get_ep_from_tid(dev
, tid
);
2948 pr_debug("%s ep %p tid %u credits %u\n",
2949 __func__
, ep
, ep
->hwtid
, credits
);
2951 pr_debug("%s 0 credit ack ep %p tid %u state %u\n",
2952 __func__
, ep
, ep
->hwtid
, state_read(&ep
->com
));
2956 dst_confirm(ep
->dst
);
2958 pr_debug("%s last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
2959 __func__
, ep
, ep
->hwtid
,
2960 state_read(&ep
->com
), ep
->mpa_attr
.initiator
? 1 : 0);
2961 mutex_lock(&ep
->com
.mutex
);
2962 kfree_skb(ep
->mpa_skb
);
2964 if (test_bit(STOP_MPA_TIMER
, &ep
->com
.flags
))
2966 mutex_unlock(&ep
->com
.mutex
);
2969 c4iw_put_ep(&ep
->com
);
2973 int c4iw_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
2976 struct c4iw_ep
*ep
= to_ep(cm_id
);
2978 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2980 mutex_lock(&ep
->com
.mutex
);
2981 if (ep
->com
.state
!= MPA_REQ_RCVD
) {
2982 mutex_unlock(&ep
->com
.mutex
);
2983 c4iw_put_ep(&ep
->com
);
2986 set_bit(ULP_REJECT
, &ep
->com
.history
);
2990 abort
= send_mpa_reject(ep
, pdata
, pdata_len
);
2991 mutex_unlock(&ep
->com
.mutex
);
2994 c4iw_ep_disconnect(ep
, abort
!= 0, GFP_KERNEL
);
2995 c4iw_put_ep(&ep
->com
);
2999 int c4iw_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
3002 struct c4iw_qp_attributes attrs
;
3003 enum c4iw_qp_attr_mask mask
;
3004 struct c4iw_ep
*ep
= to_ep(cm_id
);
3005 struct c4iw_dev
*h
= to_c4iw_dev(cm_id
->device
);
3006 struct c4iw_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
3009 pr_debug("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
3011 mutex_lock(&ep
->com
.mutex
);
3012 if (ep
->com
.state
!= MPA_REQ_RCVD
) {
3019 set_bit(ULP_ACCEPT
, &ep
->com
.history
);
3020 if ((conn_param
->ord
> cur_max_read_depth(ep
->com
.dev
)) ||
3021 (conn_param
->ird
> cur_max_read_depth(ep
->com
.dev
))) {
3026 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
3027 if (conn_param
->ord
> ep
->ird
) {
3028 if (RELAXED_IRD_NEGOTIATION
) {
3029 conn_param
->ord
= ep
->ird
;
3031 ep
->ird
= conn_param
->ird
;
3032 ep
->ord
= conn_param
->ord
;
3033 send_mpa_reject(ep
, conn_param
->private_data
,
3034 conn_param
->private_data_len
);
3039 if (conn_param
->ird
< ep
->ord
) {
3040 if (RELAXED_IRD_NEGOTIATION
&&
3041 ep
->ord
<= h
->rdev
.lldi
.max_ordird_qp
) {
3042 conn_param
->ird
= ep
->ord
;
3049 ep
->ird
= conn_param
->ird
;
3050 ep
->ord
= conn_param
->ord
;
3052 if (ep
->mpa_attr
.version
== 1) {
3053 if (peer2peer
&& ep
->ird
== 0)
3057 (ep
->mpa_attr
.p2p_type
!= FW_RI_INIT_P2PTYPE_DISABLED
) &&
3058 (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
) && ep
->ird
== 0)
3062 pr_debug("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
3064 ep
->com
.cm_id
= cm_id
;
3065 ref_cm_id(&ep
->com
);
3069 /* bind QP to EP and move to RTS */
3070 attrs
.mpa_attr
= ep
->mpa_attr
;
3071 attrs
.max_ird
= ep
->ird
;
3072 attrs
.max_ord
= ep
->ord
;
3073 attrs
.llp_stream_handle
= ep
;
3074 attrs
.next_state
= C4IW_QP_STATE_RTS
;
3076 /* bind QP and TID with INIT_WR */
3077 mask
= C4IW_QP_ATTR_NEXT_STATE
|
3078 C4IW_QP_ATTR_LLP_STREAM_HANDLE
|
3079 C4IW_QP_ATTR_MPA_ATTR
|
3080 C4IW_QP_ATTR_MAX_IRD
|
3081 C4IW_QP_ATTR_MAX_ORD
;
3083 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
3084 ep
->com
.qp
, mask
, &attrs
, 1);
3086 goto err_deref_cm_id
;
3088 set_bit(STOP_MPA_TIMER
, &ep
->com
.flags
);
3089 err
= send_mpa_reply(ep
, conn_param
->private_data
,
3090 conn_param
->private_data_len
);
3092 goto err_deref_cm_id
;
3094 __state_set(&ep
->com
, FPDU_MODE
);
3095 established_upcall(ep
);
3096 mutex_unlock(&ep
->com
.mutex
);
3097 c4iw_put_ep(&ep
->com
);
3100 deref_cm_id(&ep
->com
);
3104 mutex_unlock(&ep
->com
.mutex
);
3106 c4iw_ep_disconnect(ep
, 1, GFP_KERNEL
);
3107 c4iw_put_ep(&ep
->com
);
3111 static int pick_local_ipaddrs(struct c4iw_dev
*dev
, struct iw_cm_id
*cm_id
)
3113 struct in_device
*ind
;
3115 struct sockaddr_in
*laddr
= (struct sockaddr_in
*)&cm_id
->m_local_addr
;
3116 struct sockaddr_in
*raddr
= (struct sockaddr_in
*)&cm_id
->m_remote_addr
;
3118 ind
= in_dev_get(dev
->rdev
.lldi
.ports
[0]);
3120 return -EADDRNOTAVAIL
;
3121 for_primary_ifa(ind
) {
3122 laddr
->sin_addr
.s_addr
= ifa
->ifa_address
;
3123 raddr
->sin_addr
.s_addr
= ifa
->ifa_address
;
3129 return found
? 0 : -EADDRNOTAVAIL
;
3132 static int get_lladdr(struct net_device
*dev
, struct in6_addr
*addr
,
3133 unsigned char banned_flags
)
3135 struct inet6_dev
*idev
;
3136 int err
= -EADDRNOTAVAIL
;
3139 idev
= __in6_dev_get(dev
);
3141 struct inet6_ifaddr
*ifp
;
3143 read_lock_bh(&idev
->lock
);
3144 list_for_each_entry(ifp
, &idev
->addr_list
, if_list
) {
3145 if (ifp
->scope
== IFA_LINK
&&
3146 !(ifp
->flags
& banned_flags
)) {
3147 memcpy(addr
, &ifp
->addr
, 16);
3152 read_unlock_bh(&idev
->lock
);
3158 static int pick_local_ip6addrs(struct c4iw_dev
*dev
, struct iw_cm_id
*cm_id
)
3160 struct in6_addr
uninitialized_var(addr
);
3161 struct sockaddr_in6
*la6
= (struct sockaddr_in6
*)&cm_id
->m_local_addr
;
3162 struct sockaddr_in6
*ra6
= (struct sockaddr_in6
*)&cm_id
->m_remote_addr
;
3164 if (!get_lladdr(dev
->rdev
.lldi
.ports
[0], &addr
, IFA_F_TENTATIVE
)) {
3165 memcpy(la6
->sin6_addr
.s6_addr
, &addr
, 16);
3166 memcpy(ra6
->sin6_addr
.s6_addr
, &addr
, 16);
3169 return -EADDRNOTAVAIL
;
3172 int c4iw_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
3174 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
3177 struct sockaddr_in
*laddr
;
3178 struct sockaddr_in
*raddr
;
3179 struct sockaddr_in6
*laddr6
;
3180 struct sockaddr_in6
*raddr6
;
3184 if ((conn_param
->ord
> cur_max_read_depth(dev
)) ||
3185 (conn_param
->ird
> cur_max_read_depth(dev
))) {
3189 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
3191 pr_err("%s - cannot alloc ep\n", __func__
);
3196 skb_queue_head_init(&ep
->com
.ep_skb_list
);
3197 if (alloc_ep_skb_list(&ep
->com
.ep_skb_list
, CN_MAX_CON_BUF
)) {
3202 init_timer(&ep
->timer
);
3203 ep
->plen
= conn_param
->private_data_len
;
3205 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
3206 conn_param
->private_data
, ep
->plen
);
3207 ep
->ird
= conn_param
->ird
;
3208 ep
->ord
= conn_param
->ord
;
3210 if (peer2peer
&& ep
->ord
== 0)
3213 ep
->com
.cm_id
= cm_id
;
3214 ref_cm_id(&ep
->com
);
3216 ep
->com
.qp
= get_qhp(dev
, conn_param
->qpn
);
3218 pr_debug("%s qpn 0x%x not found!\n", __func__
, conn_param
->qpn
);
3223 pr_debug("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
3227 * Allocate an active TID to initiate a TCP connection.
3229 ep
->atid
= cxgb4_alloc_atid(dev
->rdev
.lldi
.tids
, ep
);
3230 if (ep
->atid
== -1) {
3231 pr_err("%s - cannot alloc atid\n", __func__
);
3235 insert_handle(dev
, &dev
->atid_idr
, ep
, ep
->atid
);
3237 memcpy(&ep
->com
.local_addr
, &cm_id
->m_local_addr
,
3238 sizeof(ep
->com
.local_addr
));
3239 memcpy(&ep
->com
.remote_addr
, &cm_id
->m_remote_addr
,
3240 sizeof(ep
->com
.remote_addr
));
3242 laddr
= (struct sockaddr_in
*)&ep
->com
.local_addr
;
3243 raddr
= (struct sockaddr_in
*)&ep
->com
.remote_addr
;
3244 laddr6
= (struct sockaddr_in6
*)&ep
->com
.local_addr
;
3245 raddr6
= (struct sockaddr_in6
*) &ep
->com
.remote_addr
;
3247 if (cm_id
->m_remote_addr
.ss_family
== AF_INET
) {
3249 ra
= (__u8
*)&raddr
->sin_addr
;
3252 * Handle loopback requests to INADDR_ANY.
3254 if (raddr
->sin_addr
.s_addr
== htonl(INADDR_ANY
)) {
3255 err
= pick_local_ipaddrs(dev
, cm_id
);
3261 pr_debug("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
3262 __func__
, &laddr
->sin_addr
, ntohs(laddr
->sin_port
),
3263 ra
, ntohs(raddr
->sin_port
));
3264 ep
->dst
= cxgb_find_route(&dev
->rdev
.lldi
, get_real_dev
,
3265 laddr
->sin_addr
.s_addr
,
3266 raddr
->sin_addr
.s_addr
,
3268 raddr
->sin_port
, cm_id
->tos
);
3271 ra
= (__u8
*)&raddr6
->sin6_addr
;
3274 * Handle loopback requests to INADDR_ANY.
3276 if (ipv6_addr_type(&raddr6
->sin6_addr
) == IPV6_ADDR_ANY
) {
3277 err
= pick_local_ip6addrs(dev
, cm_id
);
3283 pr_debug("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
3284 __func__
, laddr6
->sin6_addr
.s6_addr
,
3285 ntohs(laddr6
->sin6_port
),
3286 raddr6
->sin6_addr
.s6_addr
, ntohs(raddr6
->sin6_port
));
3287 ep
->dst
= cxgb_find_route6(&dev
->rdev
.lldi
, get_real_dev
,
3288 laddr6
->sin6_addr
.s6_addr
,
3289 raddr6
->sin6_addr
.s6_addr
,
3291 raddr6
->sin6_port
, 0,
3292 raddr6
->sin6_scope_id
);
3295 pr_err("%s - cannot find route\n", __func__
);
3296 err
= -EHOSTUNREACH
;
3300 err
= import_ep(ep
, iptype
, ra
, ep
->dst
, ep
->com
.dev
, true,
3301 ep
->com
.dev
->rdev
.lldi
.adapter_type
, cm_id
->tos
);
3303 pr_err("%s - cannot alloc l2e\n", __func__
);
3307 pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
3308 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
3311 state_set(&ep
->com
, CONNECTING
);
3312 ep
->tos
= cm_id
->tos
;
3314 /* send connect request to rnic */
3315 err
= send_connect(ep
);
3319 cxgb4_l2t_release(ep
->l2t
);
3321 dst_release(ep
->dst
);
3323 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
3324 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
3326 skb_queue_purge(&ep
->com
.ep_skb_list
);
3327 deref_cm_id(&ep
->com
);
3329 c4iw_put_ep(&ep
->com
);
3334 static int create_server6(struct c4iw_dev
*dev
, struct c4iw_listen_ep
*ep
)
3337 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)
3338 &ep
->com
.local_addr
;
3340 if (ipv6_addr_type(&sin6
->sin6_addr
) != IPV6_ADDR_ANY
) {
3341 err
= cxgb4_clip_get(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3342 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3346 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3347 err
= cxgb4_create_server6(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3348 ep
->stid
, &sin6
->sin6_addr
,
3350 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
3352 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
,
3356 err
= net_xmit_errno(err
);
3358 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3359 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3360 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
3362 sin6
->sin6_addr
.s6_addr
, ntohs(sin6
->sin6_port
));
3367 static int create_server4(struct c4iw_dev
*dev
, struct c4iw_listen_ep
*ep
)
3370 struct sockaddr_in
*sin
= (struct sockaddr_in
*)
3371 &ep
->com
.local_addr
;
3373 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
3375 err
= cxgb4_create_server_filter(
3376 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3377 sin
->sin_addr
.s_addr
, sin
->sin_port
, 0,
3378 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0, 0);
3379 if (err
== -EBUSY
) {
3380 if (c4iw_fatal_error(&ep
->com
.dev
->rdev
)) {
3384 set_current_state(TASK_UNINTERRUPTIBLE
);
3385 schedule_timeout(usecs_to_jiffies(100));
3387 } while (err
== -EBUSY
);
3389 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3390 err
= cxgb4_create_server(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3391 ep
->stid
, sin
->sin_addr
.s_addr
, sin
->sin_port
,
3392 0, ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
3394 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
,
3398 err
= net_xmit_errno(err
);
3401 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3403 &sin
->sin_addr
, ntohs(sin
->sin_port
));
3407 int c4iw_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
3410 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
3411 struct c4iw_listen_ep
*ep
;
3415 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
3417 pr_err("%s - cannot alloc ep\n", __func__
);
3421 skb_queue_head_init(&ep
->com
.ep_skb_list
);
3422 pr_debug("%s ep %p\n", __func__
, ep
);
3423 ep
->com
.cm_id
= cm_id
;
3424 ref_cm_id(&ep
->com
);
3426 ep
->backlog
= backlog
;
3427 memcpy(&ep
->com
.local_addr
, &cm_id
->m_local_addr
,
3428 sizeof(ep
->com
.local_addr
));
3431 * Allocate a server TID.
3433 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
&&
3434 ep
->com
.local_addr
.ss_family
== AF_INET
)
3435 ep
->stid
= cxgb4_alloc_sftid(dev
->rdev
.lldi
.tids
,
3436 cm_id
->m_local_addr
.ss_family
, ep
);
3438 ep
->stid
= cxgb4_alloc_stid(dev
->rdev
.lldi
.tids
,
3439 cm_id
->m_local_addr
.ss_family
, ep
);
3441 if (ep
->stid
== -1) {
3442 pr_err("%s - cannot alloc stid\n", __func__
);
3446 insert_handle(dev
, &dev
->stid_idr
, ep
, ep
->stid
);
3448 memcpy(&ep
->com
.local_addr
, &cm_id
->m_local_addr
,
3449 sizeof(ep
->com
.local_addr
));
3451 state_set(&ep
->com
, LISTEN
);
3452 if (ep
->com
.local_addr
.ss_family
== AF_INET
)
3453 err
= create_server4(dev
, ep
);
3455 err
= create_server6(dev
, ep
);
3457 cm_id
->provider_data
= ep
;
3461 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
,
3462 ep
->com
.local_addr
.ss_family
);
3464 deref_cm_id(&ep
->com
);
3465 c4iw_put_ep(&ep
->com
);
3471 int c4iw_destroy_listen(struct iw_cm_id
*cm_id
)
3474 struct c4iw_listen_ep
*ep
= to_listen_ep(cm_id
);
3476 pr_debug("%s ep %p\n", __func__
, ep
);
3479 state_set(&ep
->com
, DEAD
);
3480 if (ep
->com
.dev
->rdev
.lldi
.enable_fw_ofld_conn
&&
3481 ep
->com
.local_addr
.ss_family
== AF_INET
) {
3482 err
= cxgb4_remove_server_filter(
3483 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3484 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0);
3486 struct sockaddr_in6
*sin6
;
3487 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3488 err
= cxgb4_remove_server(
3489 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3490 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0);
3493 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
, &ep
->com
.wr_wait
,
3495 sin6
= (struct sockaddr_in6
*)&ep
->com
.local_addr
;
3496 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3497 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3499 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->stid_idr
, ep
->stid
);
3500 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
,
3501 ep
->com
.local_addr
.ss_family
);
3503 deref_cm_id(&ep
->com
);
3504 c4iw_put_ep(&ep
->com
);
3508 int c4iw_ep_disconnect(struct c4iw_ep
*ep
, int abrupt
, gfp_t gfp
)
3513 struct c4iw_rdev
*rdev
;
3515 mutex_lock(&ep
->com
.mutex
);
3517 pr_debug("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
3518 states
[ep
->com
.state
], abrupt
);
3521 * Ref the ep here in case we have fatal errors causing the
3522 * ep to be released and freed.
3524 c4iw_get_ep(&ep
->com
);
3526 rdev
= &ep
->com
.dev
->rdev
;
3527 if (c4iw_fatal_error(rdev
)) {
3529 close_complete_upcall(ep
, -EIO
);
3530 ep
->com
.state
= DEAD
;
3532 switch (ep
->com
.state
) {
3541 ep
->com
.state
= ABORTING
;
3543 ep
->com
.state
= CLOSING
;
3546 * if we close before we see the fw4_ack() then we fix
3547 * up the timer state since we're reusing it.
3550 test_bit(STOP_MPA_TIMER
, &ep
->com
.flags
)) {
3551 clear_bit(STOP_MPA_TIMER
, &ep
->com
.flags
);
3556 set_bit(CLOSE_SENT
, &ep
->com
.flags
);
3559 if (!test_and_set_bit(CLOSE_SENT
, &ep
->com
.flags
)) {
3562 (void)stop_ep_timer(ep
);
3563 ep
->com
.state
= ABORTING
;
3565 ep
->com
.state
= MORIBUND
;
3571 pr_debug("%s ignoring disconnect ep %p state %u\n",
3572 __func__
, ep
, ep
->com
.state
);
3581 set_bit(EP_DISC_ABORT
, &ep
->com
.history
);
3582 close_complete_upcall(ep
, -ECONNRESET
);
3583 ret
= send_abort(ep
);
3585 set_bit(EP_DISC_CLOSE
, &ep
->com
.history
);
3586 ret
= send_halfclose(ep
);
3589 set_bit(EP_DISC_FAIL
, &ep
->com
.history
);
3592 close_complete_upcall(ep
, -EIO
);
3595 struct c4iw_qp_attributes attrs
;
3597 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
3598 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
3600 C4IW_QP_ATTR_NEXT_STATE
,
3603 pr_err("%s - qp <- error failed!\n",
3609 mutex_unlock(&ep
->com
.mutex
);
3610 c4iw_put_ep(&ep
->com
);
3612 release_ep_resources(ep
);
3616 static void active_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3617 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
3620 int atid
= be32_to_cpu(req
->tid
);
3622 ep
= (struct c4iw_ep
*)lookup_atid(dev
->rdev
.lldi
.tids
,
3623 (__force u32
) req
->tid
);
3627 switch (req
->retval
) {
3629 set_bit(ACT_RETRY_NOMEM
, &ep
->com
.history
);
3630 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
3631 send_fw_act_open_req(ep
, atid
);
3635 set_bit(ACT_RETRY_INUSE
, &ep
->com
.history
);
3636 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
3637 send_fw_act_open_req(ep
, atid
);
3642 pr_info("%s unexpected ofld conn wr retval %d\n",
3643 __func__
, req
->retval
);
3646 pr_err("active ofld_connect_wr failure %d atid %d\n",
3648 mutex_lock(&dev
->rdev
.stats
.lock
);
3649 dev
->rdev
.stats
.act_ofld_conn_fails
++;
3650 mutex_unlock(&dev
->rdev
.stats
.lock
);
3651 connect_reply_upcall(ep
, status2errno(req
->retval
));
3652 state_set(&ep
->com
, DEAD
);
3653 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
3654 struct sockaddr_in6
*sin6
=
3655 (struct sockaddr_in6
*)&ep
->com
.local_addr
;
3656 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3657 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3659 remove_handle(dev
, &dev
->atid_idr
, atid
);
3660 cxgb4_free_atid(dev
->rdev
.lldi
.tids
, atid
);
3661 dst_release(ep
->dst
);
3662 cxgb4_l2t_release(ep
->l2t
);
3663 c4iw_put_ep(&ep
->com
);
3666 static void passive_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3667 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
3669 struct sk_buff
*rpl_skb
;
3670 struct cpl_pass_accept_req
*cpl
;
3673 rpl_skb
= (struct sk_buff
*)(unsigned long)req
->cookie
;
3676 pr_debug("%s passive open failure %d\n", __func__
, req
->retval
);
3677 mutex_lock(&dev
->rdev
.stats
.lock
);
3678 dev
->rdev
.stats
.pas_ofld_conn_fails
++;
3679 mutex_unlock(&dev
->rdev
.stats
.lock
);
3682 cpl
= (struct cpl_pass_accept_req
*)cplhdr(rpl_skb
);
3683 OPCODE_TID(cpl
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
,
3684 (__force u32
) htonl(
3685 (__force u32
) req
->tid
)));
3686 ret
= pass_accept_req(dev
, rpl_skb
);
3693 static int deferred_fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3695 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
3696 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
;
3698 switch (rpl
->type
) {
3700 c4iw_ev_dispatch(dev
, (struct t4_cqe
*)&rpl
->data
[0]);
3702 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
3703 req
= (struct cpl_fw6_msg_ofld_connection_wr_rpl
*)rpl
->data
;
3704 switch (req
->t_state
) {
3706 active_ofld_conn_reply(dev
, skb
, req
);
3709 passive_ofld_conn_reply(dev
, skb
, req
);
3712 pr_err("%s unexpected ofld conn wr state %d\n",
3713 __func__
, req
->t_state
);
3721 static void build_cpl_pass_accept_req(struct sk_buff
*skb
, int stid
, u8 tos
)
3724 __be16 hdr_len
, vlantag
, len
;
3726 int tcp_hdr_len
, ip_hdr_len
;
3728 struct cpl_rx_pkt
*cpl
= cplhdr(skb
);
3729 struct cpl_pass_accept_req
*req
;
3730 struct tcp_options_received tmp_opt
;
3731 struct c4iw_dev
*dev
;
3732 enum chip_type type
;
3734 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
3735 /* Store values from cpl_rx_pkt in temporary location. */
3736 vlantag
= cpl
->vlan
;
3738 l2info
= cpl
->l2info
;
3739 hdr_len
= cpl
->hdr_len
;
3742 __skb_pull(skb
, sizeof(*req
) + sizeof(struct rss_header
));
3745 * We need to parse the TCP options from SYN packet.
3746 * to generate cpl_pass_accept_req.
3748 memset(&tmp_opt
, 0, sizeof(tmp_opt
));
3749 tcp_clear_options(&tmp_opt
);
3750 tcp_parse_options(&init_net
, skb
, &tmp_opt
, 0, NULL
);
3752 req
= __skb_push(skb
, sizeof(*req
));
3753 memset(req
, 0, sizeof(*req
));
3754 req
->l2info
= cpu_to_be16(SYN_INTF_V(intf
) |
3755 SYN_MAC_IDX_V(RX_MACIDX_G(
3756 be32_to_cpu(l2info
))) |
3758 type
= dev
->rdev
.lldi
.adapter_type
;
3759 tcp_hdr_len
= RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len
));
3760 ip_hdr_len
= RX_IPHDR_LEN_G(be16_to_cpu(hdr_len
));
3762 cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info
))));
3763 if (CHELSIO_CHIP_VERSION(type
) <= CHELSIO_T5
) {
3764 eth_hdr_len
= is_t4(type
) ?
3765 RX_ETHHDR_LEN_G(be32_to_cpu(l2info
)) :
3766 RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info
));
3767 req
->hdr_len
|= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len
) |
3768 IP_HDR_LEN_V(ip_hdr_len
) |
3769 ETH_HDR_LEN_V(eth_hdr_len
));
3770 } else { /* T6 and later */
3771 eth_hdr_len
= RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info
));
3772 req
->hdr_len
|= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len
) |
3773 T6_IP_HDR_LEN_V(ip_hdr_len
) |
3774 T6_ETH_HDR_LEN_V(eth_hdr_len
));
3776 req
->vlan
= vlantag
;
3778 req
->tos_stid
= cpu_to_be32(PASS_OPEN_TID_V(stid
) |
3779 PASS_OPEN_TOS_V(tos
));
3780 req
->tcpopt
.mss
= htons(tmp_opt
.mss_clamp
);
3781 if (tmp_opt
.wscale_ok
)
3782 req
->tcpopt
.wsf
= tmp_opt
.snd_wscale
;
3783 req
->tcpopt
.tstamp
= tmp_opt
.saw_tstamp
;
3784 if (tmp_opt
.sack_ok
)
3785 req
->tcpopt
.sack
= 1;
3786 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
, 0));
3790 static void send_fw_pass_open_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3791 __be32 laddr
, __be16 lport
,
3792 __be32 raddr
, __be16 rport
,
3793 u32 rcv_isn
, u32 filter
, u16 window
,
3794 u32 rss_qid
, u8 port_id
)
3796 struct sk_buff
*req_skb
;
3797 struct fw_ofld_connection_wr
*req
;
3798 struct cpl_pass_accept_req
*cpl
= cplhdr(skb
);
3801 req_skb
= alloc_skb(sizeof(struct fw_ofld_connection_wr
), GFP_KERNEL
);
3804 req
= __skb_put_zero(req_skb
, sizeof(*req
));
3805 req
->op_compl
= htonl(WR_OP_V(FW_OFLD_CONNECTION_WR
) | FW_WR_COMPL_F
);
3806 req
->len16_pkd
= htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req
), 16)));
3807 req
->le
.version_cpl
= htonl(FW_OFLD_CONNECTION_WR_CPL_F
);
3808 req
->le
.filter
= (__force __be32
) filter
;
3809 req
->le
.lport
= lport
;
3810 req
->le
.pport
= rport
;
3811 req
->le
.u
.ipv4
.lip
= laddr
;
3812 req
->le
.u
.ipv4
.pip
= raddr
;
3813 req
->tcb
.rcv_nxt
= htonl(rcv_isn
+ 1);
3814 req
->tcb
.rcv_adv
= htons(window
);
3815 req
->tcb
.t_state_to_astid
=
3816 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV
) |
3817 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl
->tcpopt
.wsf
) |
3818 FW_OFLD_CONNECTION_WR_ASTID_V(
3819 PASS_OPEN_TID_G(ntohl(cpl
->tos_stid
))));
3822 * We store the qid in opt2 which will be used by the firmware
3823 * to send us the wr response.
3825 req
->tcb
.opt2
= htonl(RSS_QUEUE_V(rss_qid
));
3828 * We initialize the MSS index in TCB to 0xF.
3829 * So that when driver sends cpl_pass_accept_rpl
3830 * TCB picks up the correct value. If this was 0
3831 * TP will ignore any value > 0 for MSS index.
3833 req
->tcb
.opt0
= cpu_to_be64(MSS_IDX_V(0xF));
3834 req
->cookie
= (uintptr_t)skb
;
3836 set_wr_txq(req_skb
, CPL_PRIORITY_CONTROL
, port_id
);
3837 ret
= cxgb4_ofld_send(dev
->rdev
.lldi
.ports
[0], req_skb
);
3839 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__
,
3847 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3848 * messages when a filter is being used instead of server to
3849 * redirect a syn packet. When packets hit filter they are redirected
3850 * to the offload queue and driver tries to establish the connection
3851 * using firmware work request.
3853 static int rx_pkt(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3856 unsigned int filter
;
3857 struct ethhdr
*eh
= NULL
;
3858 struct vlan_ethhdr
*vlan_eh
= NULL
;
3860 struct tcphdr
*tcph
;
3861 struct rss_header
*rss
= (void *)skb
->data
;
3862 struct cpl_rx_pkt
*cpl
= (void *)skb
->data
;
3863 struct cpl_pass_accept_req
*req
= (void *)(rss
+ 1);
3864 struct l2t_entry
*e
;
3865 struct dst_entry
*dst
;
3866 struct c4iw_ep
*lep
= NULL
;
3868 struct port_info
*pi
;
3869 struct net_device
*pdev
;
3870 u16 rss_qid
, eth_hdr_len
;
3873 struct neighbour
*neigh
;
3875 /* Drop all non-SYN packets */
3876 if (!(cpl
->l2info
& cpu_to_be32(RXF_SYN_F
)))
3880 * Drop all packets which did not hit the filter.
3881 * Unlikely to happen.
3883 if (!(rss
->filter_hit
&& rss
->filter_tid
))
3887 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3889 stid
= (__force
int) cpu_to_be32((__force u32
) rss
->hash_val
);
3891 lep
= (struct c4iw_ep
*)get_ep_from_stid(dev
, stid
);
3893 pr_debug("%s connect request on invalid stid %d\n",
3898 switch (CHELSIO_CHIP_VERSION(dev
->rdev
.lldi
.adapter_type
)) {
3900 eth_hdr_len
= RX_ETHHDR_LEN_G(be32_to_cpu(cpl
->l2info
));
3903 eth_hdr_len
= RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl
->l2info
));
3906 eth_hdr_len
= RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl
->l2info
));
3909 pr_err("T%d Chip is not supported\n",
3910 CHELSIO_CHIP_VERSION(dev
->rdev
.lldi
.adapter_type
));
3914 if (eth_hdr_len
== ETH_HLEN
) {
3915 eh
= (struct ethhdr
*)(req
+ 1);
3916 iph
= (struct iphdr
*)(eh
+ 1);
3918 vlan_eh
= (struct vlan_ethhdr
*)(req
+ 1);
3919 iph
= (struct iphdr
*)(vlan_eh
+ 1);
3920 skb
->vlan_tci
= ntohs(cpl
->vlan
);
3923 if (iph
->version
!= 0x4)
3926 tcph
= (struct tcphdr
*)(iph
+ 1);
3927 skb_set_network_header(skb
, (void *)iph
- (void *)rss
);
3928 skb_set_transport_header(skb
, (void *)tcph
- (void *)rss
);
3931 pr_debug("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__
,
3932 ntohl(iph
->daddr
), ntohs(tcph
->dest
), ntohl(iph
->saddr
),
3933 ntohs(tcph
->source
), iph
->tos
);
3935 dst
= cxgb_find_route(&dev
->rdev
.lldi
, get_real_dev
,
3936 iph
->daddr
, iph
->saddr
, tcph
->dest
,
3937 tcph
->source
, iph
->tos
);
3939 pr_err("%s - failed to find dst entry!\n",
3943 neigh
= dst_neigh_lookup_skb(dst
, skb
);
3946 pr_err("%s - failed to allocate neigh!\n",
3951 if (neigh
->dev
->flags
& IFF_LOOPBACK
) {
3952 pdev
= ip_dev_find(&init_net
, iph
->daddr
);
3953 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
3955 pi
= (struct port_info
*)netdev_priv(pdev
);
3956 tx_chan
= cxgb4_port_chan(pdev
);
3959 pdev
= get_real_dev(neigh
->dev
);
3960 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
3962 pi
= (struct port_info
*)netdev_priv(pdev
);
3963 tx_chan
= cxgb4_port_chan(pdev
);
3965 neigh_release(neigh
);
3967 pr_err("%s - failed to allocate l2t entry!\n",
3972 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
3973 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[pi
->port_id
* step
];
3974 window
= (__force u16
) htons((__force u16
)tcph
->window
);
3976 /* Calcuate filter portion for LE region. */
3977 filter
= (__force
unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3978 dev
->rdev
.lldi
.ports
[0],
3982 * Synthesize the cpl_pass_accept_req. We have everything except the
3983 * TID. Once firmware sends a reply with TID we update the TID field
3984 * in cpl and pass it through the regular cpl_pass_accept_req path.
3986 build_cpl_pass_accept_req(skb
, stid
, iph
->tos
);
3987 send_fw_pass_open_req(dev
, skb
, iph
->daddr
, tcph
->dest
, iph
->saddr
,
3988 tcph
->source
, ntohl(tcph
->seq
), filter
, window
,
3989 rss_qid
, pi
->port_id
);
3990 cxgb4_l2t_release(e
);
3995 c4iw_put_ep(&lep
->com
);
4000 * These are the real handlers that are called from a
4003 static c4iw_handler_func work_handlers
[NUM_CPL_CMDS
+ NUM_FAKE_CPLS
] = {
4004 [CPL_ACT_ESTABLISH
] = act_establish
,
4005 [CPL_ACT_OPEN_RPL
] = act_open_rpl
,
4006 [CPL_RX_DATA
] = rx_data
,
4007 [CPL_ABORT_RPL_RSS
] = abort_rpl
,
4008 [CPL_ABORT_RPL
] = abort_rpl
,
4009 [CPL_PASS_OPEN_RPL
] = pass_open_rpl
,
4010 [CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
,
4011 [CPL_PASS_ACCEPT_REQ
] = pass_accept_req
,
4012 [CPL_PASS_ESTABLISH
] = pass_establish
,
4013 [CPL_PEER_CLOSE
] = peer_close
,
4014 [CPL_ABORT_REQ_RSS
] = peer_abort
,
4015 [CPL_CLOSE_CON_RPL
] = close_con_rpl
,
4016 [CPL_RDMA_TERMINATE
] = terminate
,
4017 [CPL_FW4_ACK
] = fw4_ack
,
4018 [CPL_FW6_MSG
] = deferred_fw6_msg
,
4019 [CPL_RX_PKT
] = rx_pkt
,
4020 [FAKE_CPL_PUT_EP_SAFE
] = _put_ep_safe
,
4021 [FAKE_CPL_PASS_PUT_EP_SAFE
] = _put_pass_ep_safe
4024 static void process_timeout(struct c4iw_ep
*ep
)
4026 struct c4iw_qp_attributes attrs
;
4029 mutex_lock(&ep
->com
.mutex
);
4030 pr_debug("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
4032 set_bit(TIMEDOUT
, &ep
->com
.history
);
4033 switch (ep
->com
.state
) {
4035 connect_reply_upcall(ep
, -ETIMEDOUT
);
4044 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
4045 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
4046 c4iw_modify_qp(ep
->com
.qp
->rhp
,
4047 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
4050 close_complete_upcall(ep
, -ETIMEDOUT
);
4056 * These states are expected if the ep timed out at the same
4057 * time as another thread was calling stop_ep_timer().
4058 * So we silently do nothing for these states.
4063 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
4064 __func__
, ep
, ep
->hwtid
, ep
->com
.state
);
4067 mutex_unlock(&ep
->com
.mutex
);
4069 c4iw_ep_disconnect(ep
, 1, GFP_KERNEL
);
4070 c4iw_put_ep(&ep
->com
);
4073 static void process_timedout_eps(void)
4077 spin_lock_irq(&timeout_lock
);
4078 while (!list_empty(&timeout_list
)) {
4079 struct list_head
*tmp
;
4081 tmp
= timeout_list
.next
;
4085 spin_unlock_irq(&timeout_lock
);
4086 ep
= list_entry(tmp
, struct c4iw_ep
, entry
);
4087 process_timeout(ep
);
4088 spin_lock_irq(&timeout_lock
);
4090 spin_unlock_irq(&timeout_lock
);
4093 static void process_work(struct work_struct
*work
)
4095 struct sk_buff
*skb
= NULL
;
4096 struct c4iw_dev
*dev
;
4097 struct cpl_act_establish
*rpl
;
4098 unsigned int opcode
;
4101 process_timedout_eps();
4102 while ((skb
= skb_dequeue(&rxq
))) {
4104 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
4105 opcode
= rpl
->ot
.opcode
;
4107 BUG_ON(!work_handlers
[opcode
]);
4108 ret
= work_handlers
[opcode
](dev
, skb
);
4111 process_timedout_eps();
4115 static DECLARE_WORK(skb_work
, process_work
);
4117 static void ep_timeout(unsigned long arg
)
4119 struct c4iw_ep
*ep
= (struct c4iw_ep
*)arg
;
4122 spin_lock(&timeout_lock
);
4123 if (!test_and_set_bit(TIMEOUT
, &ep
->com
.flags
)) {
4125 * Only insert if it is not already on the list.
4127 if (!ep
->entry
.next
) {
4128 list_add_tail(&ep
->entry
, &timeout_list
);
4132 spin_unlock(&timeout_lock
);
4134 queue_work(workq
, &skb_work
);
4138 * All the CM events are handled on a work queue to have a safe context.
4140 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4144 * Save dev in the skb->cb area.
4146 *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *))) = dev
;
4149 * Queue the skb and schedule the worker thread.
4151 skb_queue_tail(&rxq
, skb
);
4152 queue_work(workq
, &skb_work
);
4156 static int set_tcb_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4158 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
4160 if (rpl
->status
!= CPL_ERR_NONE
) {
4161 pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
4162 rpl
->status
, GET_TID(rpl
));
4168 static int fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4170 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
4171 struct c4iw_wr_wait
*wr_waitp
;
4174 pr_debug("%s type %u\n", __func__
, rpl
->type
);
4176 switch (rpl
->type
) {
4177 case FW6_TYPE_WR_RPL
:
4178 ret
= (int)((be64_to_cpu(rpl
->data
[0]) >> 8) & 0xff);
4179 wr_waitp
= (struct c4iw_wr_wait
*)(__force
unsigned long) rpl
->data
[1];
4180 pr_debug("%s wr_waitp %p ret %u\n", __func__
, wr_waitp
, ret
);
4182 c4iw_wake_up(wr_waitp
, ret
? -ret
: 0);
4186 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
4190 pr_err("%s unexpected fw6 msg type %u\n",
4191 __func__
, rpl
->type
);
4198 static int peer_abort_intr(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4200 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
4202 unsigned int tid
= GET_TID(req
);
4204 ep
= get_ep_from_tid(dev
, tid
);
4205 /* This EP will be dereferenced in peer_abort() */
4207 pr_warn("Abort on non-existent endpoint, tid %d\n", tid
);
4211 if (cxgb_is_neg_adv(req
->status
)) {
4212 pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n",
4213 __func__
, ep
->hwtid
, req
->status
,
4214 neg_adv_str(req
->status
));
4217 pr_debug("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
4220 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
4227 * Most upcalls from the T4 Core go to sched() to
4228 * schedule the processing on a work queue.
4230 c4iw_handler_func c4iw_handlers
[NUM_CPL_CMDS
] = {
4231 [CPL_ACT_ESTABLISH
] = sched
,
4232 [CPL_ACT_OPEN_RPL
] = sched
,
4233 [CPL_RX_DATA
] = sched
,
4234 [CPL_ABORT_RPL_RSS
] = sched
,
4235 [CPL_ABORT_RPL
] = sched
,
4236 [CPL_PASS_OPEN_RPL
] = sched
,
4237 [CPL_CLOSE_LISTSRV_RPL
] = sched
,
4238 [CPL_PASS_ACCEPT_REQ
] = sched
,
4239 [CPL_PASS_ESTABLISH
] = sched
,
4240 [CPL_PEER_CLOSE
] = sched
,
4241 [CPL_CLOSE_CON_RPL
] = sched
,
4242 [CPL_ABORT_REQ_RSS
] = peer_abort_intr
,
4243 [CPL_RDMA_TERMINATE
] = sched
,
4244 [CPL_FW4_ACK
] = sched
,
4245 [CPL_SET_TCB_RPL
] = set_tcb_rpl
,
4246 [CPL_FW6_MSG
] = fw6_msg
,
4247 [CPL_RX_PKT
] = sched
4250 int __init
c4iw_cm_init(void)
4252 spin_lock_init(&timeout_lock
);
4253 skb_queue_head_init(&rxq
);
4255 workq
= alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM
);
4262 void c4iw_cm_term(void)
4264 WARN_ON(!list_empty(&timeout_list
));
4265 flush_workqueue(workq
);
4266 destroy_workqueue(workq
);