2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <net/neighbour.h>
41 #include <net/netevent.h>
42 #include <net/route.h>
45 #include "cxgb3_offload.h"
47 #include "iwch_provider.h"
50 static char *states
[] = {
67 module_param(peer2peer
, int, 0644);
68 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=0)");
70 static int ep_timeout_secs
= 60;
71 module_param(ep_timeout_secs
, int, 0644);
72 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
73 "in seconds (default=60)");
75 static int mpa_rev
= 1;
76 module_param(mpa_rev
, int, 0644);
77 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
78 "1 is spec compliant. (default=1)");
80 static int markers_enabled
= 0;
81 module_param(markers_enabled
, int, 0644);
82 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
84 static int crc_enabled
= 1;
85 module_param(crc_enabled
, int, 0644);
86 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
88 static int rcv_win
= 256 * 1024;
89 module_param(rcv_win
, int, 0644);
90 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256)");
92 static int snd_win
= 32 * 1024;
93 module_param(snd_win
, int, 0644);
94 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=32KB)");
96 static unsigned int nocong
= 0;
97 module_param(nocong
, uint
, 0644);
98 MODULE_PARM_DESC(nocong
, "Turn off congestion control (default=0)");
100 static unsigned int cong_flavor
= 1;
101 module_param(cong_flavor
, uint
, 0644);
102 MODULE_PARM_DESC(cong_flavor
, "TCP Congestion control flavor (default=1)");
104 static void process_work(struct work_struct
*work
);
105 static struct workqueue_struct
*workq
;
106 static DECLARE_WORK(skb_work
, process_work
);
108 static struct sk_buff_head rxq
;
109 static cxgb3_cpl_handler_func work_handlers
[NUM_CPL_CMDS
];
111 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
112 static void ep_timeout(unsigned long arg
);
113 static void connect_reply_upcall(struct iwch_ep
*ep
, int status
);
115 static void start_ep_timer(struct iwch_ep
*ep
)
117 PDBG("%s ep %p\n", __func__
, ep
);
118 if (timer_pending(&ep
->timer
)) {
119 PDBG("%s stopped / restarted timer ep %p\n", __func__
, ep
);
120 del_timer_sync(&ep
->timer
);
123 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
124 ep
->timer
.data
= (unsigned long)ep
;
125 ep
->timer
.function
= ep_timeout
;
126 add_timer(&ep
->timer
);
129 static void stop_ep_timer(struct iwch_ep
*ep
)
131 PDBG("%s ep %p\n", __func__
, ep
);
132 if (!timer_pending(&ep
->timer
)) {
133 printk(KERN_ERR
"%s timer stopped when its not running! ep %p state %u\n",
134 __func__
, ep
, ep
->com
.state
);
138 del_timer_sync(&ep
->timer
);
142 int iwch_l2t_send(struct t3cdev
*tdev
, struct sk_buff
*skb
, struct l2t_entry
*l2e
)
145 struct cxio_rdev
*rdev
;
147 rdev
= (struct cxio_rdev
*)tdev
->ulp
;
148 if (cxio_fatal_error(rdev
)) {
152 error
= l2t_send(tdev
, skb
, l2e
);
158 int iwch_cxgb3_ofld_send(struct t3cdev
*tdev
, struct sk_buff
*skb
)
161 struct cxio_rdev
*rdev
;
163 rdev
= (struct cxio_rdev
*)tdev
->ulp
;
164 if (cxio_fatal_error(rdev
)) {
168 error
= cxgb3_ofld_send(tdev
, skb
);
174 static void release_tid(struct t3cdev
*tdev
, u32 hwtid
, struct sk_buff
*skb
)
176 struct cpl_tid_release
*req
;
178 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
181 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
182 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
183 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
184 skb
->priority
= CPL_PRIORITY_SETUP
;
185 iwch_cxgb3_ofld_send(tdev
, skb
);
189 int iwch_quiesce_tid(struct iwch_ep
*ep
)
191 struct cpl_set_tcb_field
*req
;
192 struct sk_buff
*skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
196 req
= (struct cpl_set_tcb_field
*) skb_put(skb
, sizeof(*req
));
197 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
198 req
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
199 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, ep
->hwtid
));
202 req
->word
= htons(W_TCB_RX_QUIESCE
);
203 req
->mask
= cpu_to_be64(1ULL << S_TCB_RX_QUIESCE
);
204 req
->val
= cpu_to_be64(1 << S_TCB_RX_QUIESCE
);
206 skb
->priority
= CPL_PRIORITY_DATA
;
207 return iwch_cxgb3_ofld_send(ep
->com
.tdev
, skb
);
210 int iwch_resume_tid(struct iwch_ep
*ep
)
212 struct cpl_set_tcb_field
*req
;
213 struct sk_buff
*skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
217 req
= (struct cpl_set_tcb_field
*) skb_put(skb
, sizeof(*req
));
218 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
219 req
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
220 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, ep
->hwtid
));
223 req
->word
= htons(W_TCB_RX_QUIESCE
);
224 req
->mask
= cpu_to_be64(1ULL << S_TCB_RX_QUIESCE
);
227 skb
->priority
= CPL_PRIORITY_DATA
;
228 return iwch_cxgb3_ofld_send(ep
->com
.tdev
, skb
);
231 static void set_emss(struct iwch_ep
*ep
, u16 opt
)
233 PDBG("%s ep %p opt %u\n", __func__
, ep
, opt
);
234 ep
->emss
= T3C_DATA(ep
->com
.tdev
)->mtus
[G_TCPOPT_MSS(opt
)] - 40;
235 if (G_TCPOPT_TSTAMP(opt
))
239 PDBG("emss=%d\n", ep
->emss
);
242 static enum iwch_ep_state
state_read(struct iwch_ep_common
*epc
)
245 enum iwch_ep_state state
;
247 spin_lock_irqsave(&epc
->lock
, flags
);
249 spin_unlock_irqrestore(&epc
->lock
, flags
);
253 static void __state_set(struct iwch_ep_common
*epc
, enum iwch_ep_state
new)
258 static void state_set(struct iwch_ep_common
*epc
, enum iwch_ep_state
new)
262 spin_lock_irqsave(&epc
->lock
, flags
);
263 PDBG("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
264 __state_set(epc
, new);
265 spin_unlock_irqrestore(&epc
->lock
, flags
);
269 static void *alloc_ep(int size
, gfp_t gfp
)
271 struct iwch_ep_common
*epc
;
273 epc
= kzalloc(size
, gfp
);
275 kref_init(&epc
->kref
);
276 spin_lock_init(&epc
->lock
);
277 init_waitqueue_head(&epc
->waitq
);
279 PDBG("%s alloc ep %p\n", __func__
, epc
);
283 void __free_ep(struct kref
*kref
)
286 ep
= container_of(container_of(kref
, struct iwch_ep_common
, kref
),
287 struct iwch_ep
, com
);
288 PDBG("%s ep %p state %s\n", __func__
, ep
, states
[state_read(&ep
->com
)]);
289 if (ep
->com
.flags
& RELEASE_RESOURCES
) {
290 cxgb3_remove_tid(ep
->com
.tdev
, (void *)ep
, ep
->hwtid
);
291 dst_release(ep
->dst
);
292 l2t_release(L2DATA(ep
->com
.tdev
), ep
->l2t
);
297 static void release_ep_resources(struct iwch_ep
*ep
)
299 PDBG("%s ep %p tid %d\n", __func__
, ep
, ep
->hwtid
);
300 ep
->com
.flags
|= RELEASE_RESOURCES
;
304 static void process_work(struct work_struct
*work
)
306 struct sk_buff
*skb
= NULL
;
311 while ((skb
= skb_dequeue(&rxq
))) {
312 ep
= *((void **) (skb
->cb
));
313 tdev
= *((struct t3cdev
**) (skb
->cb
+ sizeof(void *)));
314 ret
= work_handlers
[G_OPCODE(ntohl((__force __be32
)skb
->csum
))](tdev
, skb
, ep
);
315 if (ret
& CPL_RET_BUF_DONE
)
319 * ep was referenced in sched(), and is freed here.
321 put_ep((struct iwch_ep_common
*)ep
);
325 static int status2errno(int status
)
330 case CPL_ERR_CONN_RESET
:
332 case CPL_ERR_ARP_MISS
:
333 return -EHOSTUNREACH
;
334 case CPL_ERR_CONN_TIMEDOUT
:
336 case CPL_ERR_TCAM_FULL
:
338 case CPL_ERR_CONN_EXIST
:
346 * Try and reuse skbs already allocated...
348 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
350 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
354 skb
= alloc_skb(len
, gfp
);
359 static struct rtable
*find_route(struct t3cdev
*dev
, __be32 local_ip
,
360 __be32 peer_ip
, __be16 local_port
,
361 __be16 peer_port
, u8 tos
)
372 .proto
= IPPROTO_TCP
,
380 if (ip_route_output_flow(&init_net
, &rt
, &fl
, NULL
, 0))
385 static unsigned int find_best_mtu(const struct t3c_data
*d
, unsigned short mtu
)
389 while (i
< d
->nmtus
- 1 && d
->mtus
[i
+ 1] <= mtu
)
394 static void arp_failure_discard(struct t3cdev
*dev
, struct sk_buff
*skb
)
396 PDBG("%s t3cdev %p\n", __func__
, dev
);
401 * Handle an ARP failure for an active open.
403 static void act_open_req_arp_failure(struct t3cdev
*dev
, struct sk_buff
*skb
)
405 printk(KERN_ERR MOD
"ARP failure duing connect\n");
410 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
413 static void abort_arp_failure(struct t3cdev
*dev
, struct sk_buff
*skb
)
415 struct cpl_abort_req
*req
= cplhdr(skb
);
417 PDBG("%s t3cdev %p\n", __func__
, dev
);
418 req
->cmd
= CPL_ABORT_NO_RST
;
419 iwch_cxgb3_ofld_send(dev
, skb
);
422 static int send_halfclose(struct iwch_ep
*ep
, gfp_t gfp
)
424 struct cpl_close_con_req
*req
;
427 PDBG("%s ep %p\n", __func__
, ep
);
428 skb
= get_skb(NULL
, sizeof(*req
), gfp
);
430 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
433 skb
->priority
= CPL_PRIORITY_DATA
;
434 set_arp_failure_handler(skb
, arp_failure_discard
);
435 req
= (struct cpl_close_con_req
*) skb_put(skb
, sizeof(*req
));
436 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON
));
437 req
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
438 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
, ep
->hwtid
));
439 return iwch_l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
442 static int send_abort(struct iwch_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
444 struct cpl_abort_req
*req
;
446 PDBG("%s ep %p\n", __func__
, ep
);
447 skb
= get_skb(skb
, sizeof(*req
), gfp
);
449 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
453 skb
->priority
= CPL_PRIORITY_DATA
;
454 set_arp_failure_handler(skb
, abort_arp_failure
);
455 req
= (struct cpl_abort_req
*) skb_put(skb
, sizeof(*req
));
456 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ
));
457 req
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
458 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
459 req
->cmd
= CPL_ABORT_SEND_RST
;
460 return iwch_l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
463 static int send_connect(struct iwch_ep
*ep
)
465 struct cpl_act_open_req
*req
;
467 u32 opt0h
, opt0l
, opt2
;
468 unsigned int mtu_idx
;
471 PDBG("%s ep %p\n", __func__
, ep
);
473 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
475 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
479 mtu_idx
= find_best_mtu(T3C_DATA(ep
->com
.tdev
), dst_mtu(ep
->dst
));
480 wscale
= compute_wscale(rcv_win
);
485 V_WND_SCALE(wscale
) |
487 V_L2T_IDX(ep
->l2t
->idx
) | V_TX_CHANNEL(ep
->l2t
->smt_idx
);
488 opt0l
= V_TOS((ep
->tos
>> 2) & M_TOS
) | V_RCV_BUFSIZ(rcv_win
>>10);
489 opt2
= V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor
);
490 skb
->priority
= CPL_PRIORITY_SETUP
;
491 set_arp_failure_handler(skb
, act_open_req_arp_failure
);
493 req
= (struct cpl_act_open_req
*) skb_put(skb
, sizeof(*req
));
494 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
495 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
, ep
->atid
));
496 req
->local_port
= ep
->com
.local_addr
.sin_port
;
497 req
->peer_port
= ep
->com
.remote_addr
.sin_port
;
498 req
->local_ip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
499 req
->peer_ip
= ep
->com
.remote_addr
.sin_addr
.s_addr
;
500 req
->opt0h
= htonl(opt0h
);
501 req
->opt0l
= htonl(opt0l
);
503 req
->opt2
= htonl(opt2
);
504 return iwch_l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
507 static void send_mpa_req(struct iwch_ep
*ep
, struct sk_buff
*skb
)
510 struct tx_data_wr
*req
;
511 struct mpa_message
*mpa
;
514 PDBG("%s ep %p pd_len %d\n", __func__
, ep
, ep
->plen
);
516 BUG_ON(skb_cloned(skb
));
518 mpalen
= sizeof(*mpa
) + ep
->plen
;
519 if (skb
->data
+ mpalen
+ sizeof(*req
) > skb_end_pointer(skb
)) {
521 skb
=alloc_skb(mpalen
+ sizeof(*req
), GFP_KERNEL
);
523 connect_reply_upcall(ep
, -ENOMEM
);
528 skb_reserve(skb
, sizeof(*req
));
529 skb_put(skb
, mpalen
);
530 skb
->priority
= CPL_PRIORITY_DATA
;
531 mpa
= (struct mpa_message
*) skb
->data
;
532 memset(mpa
, 0, sizeof(*mpa
));
533 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
534 mpa
->flags
= (crc_enabled
? MPA_CRC
: 0) |
535 (markers_enabled
? MPA_MARKERS
: 0);
536 mpa
->private_data_size
= htons(ep
->plen
);
537 mpa
->revision
= mpa_rev
;
540 memcpy(mpa
->private_data
, ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
543 * Reference the mpa skb. This ensures the data area
544 * will remain in memory until the hw acks the tx.
545 * Function tx_ack() will deref it.
548 set_arp_failure_handler(skb
, arp_failure_discard
);
549 skb_reset_transport_header(skb
);
551 req
= (struct tx_data_wr
*) skb_push(skb
, sizeof(*req
));
552 req
->wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA
)|F_WR_COMPL
);
553 req
->wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
554 req
->len
= htonl(len
);
555 req
->param
= htonl(V_TX_PORT(ep
->l2t
->smt_idx
) |
556 V_TX_SNDBUF(snd_win
>>15));
557 req
->flags
= htonl(F_TX_INIT
);
558 req
->sndseq
= htonl(ep
->snd_seq
);
561 iwch_l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
563 state_set(&ep
->com
, MPA_REQ_SENT
);
567 static int send_mpa_reject(struct iwch_ep
*ep
, const void *pdata
, u8 plen
)
570 struct tx_data_wr
*req
;
571 struct mpa_message
*mpa
;
574 PDBG("%s ep %p plen %d\n", __func__
, ep
, plen
);
576 mpalen
= sizeof(*mpa
) + plen
;
578 skb
= get_skb(NULL
, mpalen
+ sizeof(*req
), GFP_KERNEL
);
580 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
583 skb_reserve(skb
, sizeof(*req
));
584 mpa
= (struct mpa_message
*) skb_put(skb
, mpalen
);
585 memset(mpa
, 0, sizeof(*mpa
));
586 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
587 mpa
->flags
= MPA_REJECT
;
588 mpa
->revision
= mpa_rev
;
589 mpa
->private_data_size
= htons(plen
);
591 memcpy(mpa
->private_data
, pdata
, plen
);
594 * Reference the mpa skb again. This ensures the data area
595 * will remain in memory until the hw acks the tx.
596 * Function tx_ack() will deref it.
599 skb
->priority
= CPL_PRIORITY_DATA
;
600 set_arp_failure_handler(skb
, arp_failure_discard
);
601 skb_reset_transport_header(skb
);
602 req
= (struct tx_data_wr
*) skb_push(skb
, sizeof(*req
));
603 req
->wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA
)|F_WR_COMPL
);
604 req
->wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
605 req
->len
= htonl(mpalen
);
606 req
->param
= htonl(V_TX_PORT(ep
->l2t
->smt_idx
) |
607 V_TX_SNDBUF(snd_win
>>15));
608 req
->flags
= htonl(F_TX_INIT
);
609 req
->sndseq
= htonl(ep
->snd_seq
);
612 return iwch_l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
615 static int send_mpa_reply(struct iwch_ep
*ep
, const void *pdata
, u8 plen
)
618 struct tx_data_wr
*req
;
619 struct mpa_message
*mpa
;
623 PDBG("%s ep %p plen %d\n", __func__
, ep
, plen
);
625 mpalen
= sizeof(*mpa
) + plen
;
627 skb
= get_skb(NULL
, mpalen
+ sizeof(*req
), GFP_KERNEL
);
629 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
632 skb
->priority
= CPL_PRIORITY_DATA
;
633 skb_reserve(skb
, sizeof(*req
));
634 mpa
= (struct mpa_message
*) skb_put(skb
, mpalen
);
635 memset(mpa
, 0, sizeof(*mpa
));
636 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
637 mpa
->flags
= (ep
->mpa_attr
.crc_enabled
? MPA_CRC
: 0) |
638 (markers_enabled
? MPA_MARKERS
: 0);
639 mpa
->revision
= mpa_rev
;
640 mpa
->private_data_size
= htons(plen
);
642 memcpy(mpa
->private_data
, pdata
, plen
);
645 * Reference the mpa skb. This ensures the data area
646 * will remain in memory until the hw acks the tx.
647 * Function tx_ack() will deref it.
650 set_arp_failure_handler(skb
, arp_failure_discard
);
651 skb_reset_transport_header(skb
);
653 req
= (struct tx_data_wr
*) skb_push(skb
, sizeof(*req
));
654 req
->wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA
)|F_WR_COMPL
);
655 req
->wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
656 req
->len
= htonl(len
);
657 req
->param
= htonl(V_TX_PORT(ep
->l2t
->smt_idx
) |
658 V_TX_SNDBUF(snd_win
>>15));
659 req
->flags
= htonl(F_TX_INIT
);
660 req
->sndseq
= htonl(ep
->snd_seq
);
662 state_set(&ep
->com
, MPA_REP_SENT
);
663 return iwch_l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
666 static int act_establish(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
668 struct iwch_ep
*ep
= ctx
;
669 struct cpl_act_establish
*req
= cplhdr(skb
);
670 unsigned int tid
= GET_TID(req
);
672 PDBG("%s ep %p tid %d\n", __func__
, ep
, tid
);
674 dst_confirm(ep
->dst
);
676 /* setup the hwtid for this connection */
678 cxgb3_insert_tid(ep
->com
.tdev
, &t3c_client
, ep
, tid
);
680 ep
->snd_seq
= ntohl(req
->snd_isn
);
681 ep
->rcv_seq
= ntohl(req
->rcv_isn
);
683 set_emss(ep
, ntohs(req
->tcp_opt
));
685 /* dealloc the atid */
686 cxgb3_free_atid(ep
->com
.tdev
, ep
->atid
);
688 /* start MPA negotiation */
689 send_mpa_req(ep
, skb
);
694 static void abort_connection(struct iwch_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
696 PDBG("%s ep %p\n", __FILE__
, ep
);
697 state_set(&ep
->com
, ABORTING
);
698 send_abort(ep
, skb
, gfp
);
701 static void close_complete_upcall(struct iwch_ep
*ep
)
703 struct iw_cm_event event
;
705 PDBG("%s ep %p\n", __func__
, ep
);
706 memset(&event
, 0, sizeof(event
));
707 event
.event
= IW_CM_EVENT_CLOSE
;
709 PDBG("close complete delivered ep %p cm_id %p tid %d\n",
710 ep
, ep
->com
.cm_id
, ep
->hwtid
);
711 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
712 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
713 ep
->com
.cm_id
= NULL
;
718 static void peer_close_upcall(struct iwch_ep
*ep
)
720 struct iw_cm_event event
;
722 PDBG("%s ep %p\n", __func__
, ep
);
723 memset(&event
, 0, sizeof(event
));
724 event
.event
= IW_CM_EVENT_DISCONNECT
;
726 PDBG("peer close delivered ep %p cm_id %p tid %d\n",
727 ep
, ep
->com
.cm_id
, ep
->hwtid
);
728 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
732 static void peer_abort_upcall(struct iwch_ep
*ep
)
734 struct iw_cm_event event
;
736 PDBG("%s ep %p\n", __func__
, ep
);
737 memset(&event
, 0, sizeof(event
));
738 event
.event
= IW_CM_EVENT_CLOSE
;
739 event
.status
= -ECONNRESET
;
741 PDBG("abort delivered ep %p cm_id %p tid %d\n", ep
,
742 ep
->com
.cm_id
, ep
->hwtid
);
743 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
744 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
745 ep
->com
.cm_id
= NULL
;
750 static void connect_reply_upcall(struct iwch_ep
*ep
, int status
)
752 struct iw_cm_event event
;
754 PDBG("%s ep %p status %d\n", __func__
, ep
, status
);
755 memset(&event
, 0, sizeof(event
));
756 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
757 event
.status
= status
;
758 event
.local_addr
= ep
->com
.local_addr
;
759 event
.remote_addr
= ep
->com
.remote_addr
;
761 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
762 event
.private_data_len
= ep
->plen
;
763 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
766 PDBG("%s ep %p tid %d status %d\n", __func__
, ep
,
768 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
771 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
772 ep
->com
.cm_id
= NULL
;
777 static void connect_request_upcall(struct iwch_ep
*ep
)
779 struct iw_cm_event event
;
781 PDBG("%s ep %p tid %d\n", __func__
, ep
, ep
->hwtid
);
782 memset(&event
, 0, sizeof(event
));
783 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
784 event
.local_addr
= ep
->com
.local_addr
;
785 event
.remote_addr
= ep
->com
.remote_addr
;
786 event
.private_data_len
= ep
->plen
;
787 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
788 event
.provider_data
= ep
;
789 if (state_read(&ep
->parent_ep
->com
) != DEAD
)
790 ep
->parent_ep
->com
.cm_id
->event_handler(
791 ep
->parent_ep
->com
.cm_id
,
793 put_ep(&ep
->parent_ep
->com
);
794 ep
->parent_ep
= NULL
;
797 static void established_upcall(struct iwch_ep
*ep
)
799 struct iw_cm_event event
;
801 PDBG("%s ep %p\n", __func__
, ep
);
802 memset(&event
, 0, sizeof(event
));
803 event
.event
= IW_CM_EVENT_ESTABLISHED
;
805 PDBG("%s ep %p tid %d\n", __func__
, ep
, ep
->hwtid
);
806 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
810 static int update_rx_credits(struct iwch_ep
*ep
, u32 credits
)
812 struct cpl_rx_data_ack
*req
;
815 PDBG("%s ep %p credits %u\n", __func__
, ep
, credits
);
816 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
818 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
822 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, sizeof(*req
));
823 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
824 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK
, ep
->hwtid
));
825 req
->credit_dack
= htonl(V_RX_CREDITS(credits
) | V_RX_FORCE_ACK(1));
826 skb
->priority
= CPL_PRIORITY_ACK
;
827 iwch_cxgb3_ofld_send(ep
->com
.tdev
, skb
);
831 static void process_mpa_reply(struct iwch_ep
*ep
, struct sk_buff
*skb
)
833 struct mpa_message
*mpa
;
835 struct iwch_qp_attributes attrs
;
836 enum iwch_qp_attr_mask mask
;
839 PDBG("%s ep %p\n", __func__
, ep
);
842 * Stop mpa timer. If it expired, then the state has
843 * changed and we bail since ep_timeout already aborted
847 if (state_read(&ep
->com
) != MPA_REQ_SENT
)
851 * If we get more than the supported amount of private data
852 * then we must fail this connection.
854 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
860 * copy the new data into our accumulation buffer.
862 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
864 ep
->mpa_pkt_len
+= skb
->len
;
867 * if we don't even have the mpa message, then bail.
869 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
871 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
873 /* Validate MPA header. */
874 if (mpa
->revision
!= mpa_rev
) {
878 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
883 plen
= ntohs(mpa
->private_data_size
);
886 * Fail if there's too much private data.
888 if (plen
> MPA_MAX_PRIVATE_DATA
) {
894 * If plen does not account for pkt size
896 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
901 ep
->plen
= (u8
) plen
;
904 * If we don't have all the pdata yet, then bail.
905 * We'll continue process when more data arrives.
907 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
910 if (mpa
->flags
& MPA_REJECT
) {
916 * If we get here we have accumulated the entire mpa
917 * start reply message including private data. And
918 * the MPA header is valid.
920 state_set(&ep
->com
, FPDU_MODE
);
921 ep
->mpa_attr
.initiator
= 1;
922 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
923 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
924 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
925 ep
->mpa_attr
.version
= mpa_rev
;
926 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
927 "xmit_marker_enabled=%d, version=%d\n", __func__
,
928 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
929 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
);
931 attrs
.mpa_attr
= ep
->mpa_attr
;
932 attrs
.max_ird
= ep
->ird
;
933 attrs
.max_ord
= ep
->ord
;
934 attrs
.llp_stream_handle
= ep
;
935 attrs
.next_state
= IWCH_QP_STATE_RTS
;
937 mask
= IWCH_QP_ATTR_NEXT_STATE
|
938 IWCH_QP_ATTR_LLP_STREAM_HANDLE
| IWCH_QP_ATTR_MPA_ATTR
|
939 IWCH_QP_ATTR_MAX_IRD
| IWCH_QP_ATTR_MAX_ORD
;
941 /* bind QP and TID with INIT_WR */
942 err
= iwch_modify_qp(ep
->com
.qp
->rhp
,
943 ep
->com
.qp
, mask
, &attrs
, 1);
947 if (peer2peer
&& iwch_rqes_posted(ep
->com
.qp
) == 0) {
948 iwch_post_zb_read(ep
->com
.qp
);
953 abort_connection(ep
, skb
, GFP_KERNEL
);
955 connect_reply_upcall(ep
, err
);
959 static void process_mpa_request(struct iwch_ep
*ep
, struct sk_buff
*skb
)
961 struct mpa_message
*mpa
;
964 PDBG("%s ep %p\n", __func__
, ep
);
967 * Stop mpa timer. If it expired, then the state has
968 * changed and we bail since ep_timeout already aborted
972 if (state_read(&ep
->com
) != MPA_REQ_WAIT
)
976 * If we get more than the supported amount of private data
977 * then we must fail this connection.
979 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
980 abort_connection(ep
, skb
, GFP_KERNEL
);
984 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
987 * Copy the new data into our accumulation buffer.
989 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
991 ep
->mpa_pkt_len
+= skb
->len
;
994 * If we don't even have the mpa message, then bail.
995 * We'll continue process when more data arrives.
997 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
999 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1000 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1003 * Validate MPA Header.
1005 if (mpa
->revision
!= mpa_rev
) {
1006 abort_connection(ep
, skb
, GFP_KERNEL
);
1010 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
))) {
1011 abort_connection(ep
, skb
, GFP_KERNEL
);
1015 plen
= ntohs(mpa
->private_data_size
);
1018 * Fail if there's too much private data.
1020 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1021 abort_connection(ep
, skb
, GFP_KERNEL
);
1026 * If plen does not account for pkt size
1028 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1029 abort_connection(ep
, skb
, GFP_KERNEL
);
1032 ep
->plen
= (u8
) plen
;
1035 * If we don't have all the pdata yet, then bail.
1037 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1041 * If we get here we have accumulated the entire mpa
1042 * start reply message including private data.
1044 ep
->mpa_attr
.initiator
= 0;
1045 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1046 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1047 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1048 ep
->mpa_attr
.version
= mpa_rev
;
1049 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1050 "xmit_marker_enabled=%d, version=%d\n", __func__
,
1051 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1052 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
);
1054 state_set(&ep
->com
, MPA_REQ_RCVD
);
1057 connect_request_upcall(ep
);
1061 static int rx_data(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1063 struct iwch_ep
*ep
= ctx
;
1064 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1065 unsigned int dlen
= ntohs(hdr
->len
);
1067 PDBG("%s ep %p dlen %u\n", __func__
, ep
, dlen
);
1069 skb_pull(skb
, sizeof(*hdr
));
1070 skb_trim(skb
, dlen
);
1072 ep
->rcv_seq
+= dlen
;
1073 BUG_ON(ep
->rcv_seq
!= (ntohl(hdr
->seq
) + dlen
));
1075 switch (state_read(&ep
->com
)) {
1077 process_mpa_reply(ep
, skb
);
1080 process_mpa_request(ep
, skb
);
1085 printk(KERN_ERR MOD
"%s Unexpected streaming data."
1086 " ep %p state %d tid %d\n",
1087 __func__
, ep
, state_read(&ep
->com
), ep
->hwtid
);
1090 * The ep will timeout and inform the ULP of the failure.
1096 /* update RX credits */
1097 update_rx_credits(ep
, dlen
);
1099 return CPL_RET_BUF_DONE
;
1103 * Upcall from the adapter indicating data has been transmitted.
1104 * For us its just the single MPA request or reply. We can now free
1105 * the skb holding the mpa message.
1107 static int tx_ack(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1109 struct iwch_ep
*ep
= ctx
;
1110 struct cpl_wr_ack
*hdr
= cplhdr(skb
);
1111 unsigned int credits
= ntohs(hdr
->credits
);
1113 PDBG("%s ep %p credits %u\n", __func__
, ep
, credits
);
1116 PDBG(KERN_ERR
"%s 0 credit ack ep %p state %u\n",
1117 __func__
, ep
, state_read(&ep
->com
));
1118 return CPL_RET_BUF_DONE
;
1121 BUG_ON(credits
!= 1);
1122 dst_confirm(ep
->dst
);
1124 PDBG("%s rdma_init wr_ack ep %p state %u\n",
1125 __func__
, ep
, state_read(&ep
->com
));
1126 if (ep
->mpa_attr
.initiator
) {
1127 PDBG("%s initiator ep %p state %u\n",
1128 __func__
, ep
, state_read(&ep
->com
));
1130 iwch_post_zb_read(ep
->com
.qp
);
1132 PDBG("%s responder ep %p state %u\n",
1133 __func__
, ep
, state_read(&ep
->com
));
1134 ep
->com
.rpl_done
= 1;
1135 wake_up(&ep
->com
.waitq
);
1138 PDBG("%s lsm ack ep %p state %u freeing skb\n",
1139 __func__
, ep
, state_read(&ep
->com
));
1140 kfree_skb(ep
->mpa_skb
);
1143 return CPL_RET_BUF_DONE
;
1146 static int abort_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1148 struct iwch_ep
*ep
= ctx
;
1149 unsigned long flags
;
1152 PDBG("%s ep %p\n", __func__
, ep
);
1156 * We get 2 abort replies from the HW. The first one must
1157 * be ignored except for scribbling that we need one more.
1159 if (!(ep
->com
.flags
& ABORT_REQ_IN_PROGRESS
)) {
1160 ep
->com
.flags
|= ABORT_REQ_IN_PROGRESS
;
1161 return CPL_RET_BUF_DONE
;
1164 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1165 switch (ep
->com
.state
) {
1167 close_complete_upcall(ep
);
1168 __state_set(&ep
->com
, DEAD
);
1172 printk(KERN_ERR
"%s ep %p state %d\n",
1173 __func__
, ep
, ep
->com
.state
);
1176 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1179 release_ep_resources(ep
);
1180 return CPL_RET_BUF_DONE
;
1184 * Return whether a failed active open has allocated a TID
1186 static inline int act_open_has_tid(int status
)
1188 return status
!= CPL_ERR_TCAM_FULL
&& status
!= CPL_ERR_CONN_EXIST
&&
1189 status
!= CPL_ERR_ARP_MISS
;
1192 static int act_open_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1194 struct iwch_ep
*ep
= ctx
;
1195 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
1197 PDBG("%s ep %p status %u errno %d\n", __func__
, ep
, rpl
->status
,
1198 status2errno(rpl
->status
));
1199 connect_reply_upcall(ep
, status2errno(rpl
->status
));
1200 state_set(&ep
->com
, DEAD
);
1201 if (ep
->com
.tdev
->type
!= T3A
&& act_open_has_tid(rpl
->status
))
1202 release_tid(ep
->com
.tdev
, GET_TID(rpl
), NULL
);
1203 cxgb3_free_atid(ep
->com
.tdev
, ep
->atid
);
1204 dst_release(ep
->dst
);
1205 l2t_release(L2DATA(ep
->com
.tdev
), ep
->l2t
);
1207 return CPL_RET_BUF_DONE
;
1210 static int listen_start(struct iwch_listen_ep
*ep
)
1212 struct sk_buff
*skb
;
1213 struct cpl_pass_open_req
*req
;
1215 PDBG("%s ep %p\n", __func__
, ep
);
1216 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1218 printk(KERN_ERR MOD
"t3c_listen_start failed to alloc skb!\n");
1222 req
= (struct cpl_pass_open_req
*) skb_put(skb
, sizeof(*req
));
1223 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1224 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ
, ep
->stid
));
1225 req
->local_port
= ep
->com
.local_addr
.sin_port
;
1226 req
->local_ip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
1229 req
->peer_netmask
= 0;
1230 req
->opt0h
= htonl(F_DELACK
| F_TCAM_BYPASS
);
1231 req
->opt0l
= htonl(V_RCV_BUFSIZ(rcv_win
>>10));
1232 req
->opt1
= htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK
));
1235 return iwch_cxgb3_ofld_send(ep
->com
.tdev
, skb
);
1238 static int pass_open_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1240 struct iwch_listen_ep
*ep
= ctx
;
1241 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
1243 PDBG("%s ep %p status %d error %d\n", __func__
, ep
,
1244 rpl
->status
, status2errno(rpl
->status
));
1245 ep
->com
.rpl_err
= status2errno(rpl
->status
);
1246 ep
->com
.rpl_done
= 1;
1247 wake_up(&ep
->com
.waitq
);
1249 return CPL_RET_BUF_DONE
;
1252 static int listen_stop(struct iwch_listen_ep
*ep
)
1254 struct sk_buff
*skb
;
1255 struct cpl_close_listserv_req
*req
;
1257 PDBG("%s ep %p\n", __func__
, ep
);
1258 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1260 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
1263 req
= (struct cpl_close_listserv_req
*) skb_put(skb
, sizeof(*req
));
1264 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1266 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
, ep
->stid
));
1268 return iwch_cxgb3_ofld_send(ep
->com
.tdev
, skb
);
1271 static int close_listsrv_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
,
1274 struct iwch_listen_ep
*ep
= ctx
;
1275 struct cpl_close_listserv_rpl
*rpl
= cplhdr(skb
);
1277 PDBG("%s ep %p\n", __func__
, ep
);
1278 ep
->com
.rpl_err
= status2errno(rpl
->status
);
1279 ep
->com
.rpl_done
= 1;
1280 wake_up(&ep
->com
.waitq
);
1281 return CPL_RET_BUF_DONE
;
1284 static void accept_cr(struct iwch_ep
*ep
, __be32 peer_ip
, struct sk_buff
*skb
)
1286 struct cpl_pass_accept_rpl
*rpl
;
1287 unsigned int mtu_idx
;
1288 u32 opt0h
, opt0l
, opt2
;
1291 PDBG("%s ep %p\n", __func__
, ep
);
1292 BUG_ON(skb_cloned(skb
));
1293 skb_trim(skb
, sizeof(*rpl
));
1295 mtu_idx
= find_best_mtu(T3C_DATA(ep
->com
.tdev
), dst_mtu(ep
->dst
));
1296 wscale
= compute_wscale(rcv_win
);
1297 opt0h
= V_NAGLE(0) |
1301 V_WND_SCALE(wscale
) |
1302 V_MSS_IDX(mtu_idx
) |
1303 V_L2T_IDX(ep
->l2t
->idx
) | V_TX_CHANNEL(ep
->l2t
->smt_idx
);
1304 opt0l
= V_TOS((ep
->tos
>> 2) & M_TOS
) | V_RCV_BUFSIZ(rcv_win
>>10);
1305 opt2
= V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor
);
1308 rpl
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1309 OPCODE_TID(rpl
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
, ep
->hwtid
));
1310 rpl
->peer_ip
= peer_ip
;
1311 rpl
->opt0h
= htonl(opt0h
);
1312 rpl
->opt0l_status
= htonl(opt0l
| CPL_PASS_OPEN_ACCEPT
);
1313 rpl
->opt2
= htonl(opt2
);
1314 rpl
->rsvd
= rpl
->opt2
; /* workaround for HW bug */
1315 skb
->priority
= CPL_PRIORITY_SETUP
;
1316 iwch_l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
1321 static void reject_cr(struct t3cdev
*tdev
, u32 hwtid
, __be32 peer_ip
,
1322 struct sk_buff
*skb
)
1324 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__
, tdev
, hwtid
,
1326 BUG_ON(skb_cloned(skb
));
1327 skb_trim(skb
, sizeof(struct cpl_tid_release
));
1330 if (tdev
->type
!= T3A
)
1331 release_tid(tdev
, hwtid
, skb
);
1333 struct cpl_pass_accept_rpl
*rpl
;
1336 skb
->priority
= CPL_PRIORITY_SETUP
;
1337 rpl
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1338 OPCODE_TID(rpl
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
1340 rpl
->peer_ip
= peer_ip
;
1341 rpl
->opt0h
= htonl(F_TCAM_BYPASS
);
1342 rpl
->opt0l_status
= htonl(CPL_PASS_OPEN_REJECT
);
1344 rpl
->rsvd
= rpl
->opt2
;
1345 iwch_cxgb3_ofld_send(tdev
, skb
);
1349 static int pass_accept_req(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1351 struct iwch_ep
*child_ep
, *parent_ep
= ctx
;
1352 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
1353 unsigned int hwtid
= GET_TID(req
);
1354 struct dst_entry
*dst
;
1355 struct l2t_entry
*l2t
;
1359 PDBG("%s parent ep %p tid %u\n", __func__
, parent_ep
, hwtid
);
1361 if (state_read(&parent_ep
->com
) != LISTEN
) {
1362 printk(KERN_ERR
"%s - listening ep not in LISTEN\n",
1368 * Find the netdev for this connection request.
1370 tim
.mac_addr
= req
->dst_mac
;
1371 tim
.vlan_tag
= ntohs(req
->vlan_tag
);
1372 if (tdev
->ctl(tdev
, GET_IFF_FROM_MAC
, &tim
) < 0 || !tim
.dev
) {
1374 "%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
1385 /* Find output route */
1386 rt
= find_route(tdev
,
1390 req
->peer_port
, G_PASS_OPEN_TOS(ntohl(req
->tos_tid
)));
1392 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
1397 l2t
= t3_l2t_get(tdev
, dst
->neighbour
, dst
->neighbour
->dev
);
1399 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
1404 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
1406 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
1408 l2t_release(L2DATA(tdev
), l2t
);
1412 state_set(&child_ep
->com
, CONNECTING
);
1413 child_ep
->com
.tdev
= tdev
;
1414 child_ep
->com
.cm_id
= NULL
;
1415 child_ep
->com
.local_addr
.sin_family
= PF_INET
;
1416 child_ep
->com
.local_addr
.sin_port
= req
->local_port
;
1417 child_ep
->com
.local_addr
.sin_addr
.s_addr
= req
->local_ip
;
1418 child_ep
->com
.remote_addr
.sin_family
= PF_INET
;
1419 child_ep
->com
.remote_addr
.sin_port
= req
->peer_port
;
1420 child_ep
->com
.remote_addr
.sin_addr
.s_addr
= req
->peer_ip
;
1421 get_ep(&parent_ep
->com
);
1422 child_ep
->parent_ep
= parent_ep
;
1423 child_ep
->tos
= G_PASS_OPEN_TOS(ntohl(req
->tos_tid
));
1424 child_ep
->l2t
= l2t
;
1425 child_ep
->dst
= dst
;
1426 child_ep
->hwtid
= hwtid
;
1427 init_timer(&child_ep
->timer
);
1428 cxgb3_insert_tid(tdev
, &t3c_client
, child_ep
, hwtid
);
1429 accept_cr(child_ep
, req
->peer_ip
, skb
);
1432 reject_cr(tdev
, hwtid
, req
->peer_ip
, skb
);
1434 return CPL_RET_BUF_DONE
;
1437 static int pass_establish(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1439 struct iwch_ep
*ep
= ctx
;
1440 struct cpl_pass_establish
*req
= cplhdr(skb
);
1442 PDBG("%s ep %p\n", __func__
, ep
);
1443 ep
->snd_seq
= ntohl(req
->snd_isn
);
1444 ep
->rcv_seq
= ntohl(req
->rcv_isn
);
1446 set_emss(ep
, ntohs(req
->tcp_opt
));
1448 dst_confirm(ep
->dst
);
1449 state_set(&ep
->com
, MPA_REQ_WAIT
);
1452 return CPL_RET_BUF_DONE
;
1455 static int peer_close(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1457 struct iwch_ep
*ep
= ctx
;
1458 struct iwch_qp_attributes attrs
;
1459 unsigned long flags
;
1463 PDBG("%s ep %p\n", __func__
, ep
);
1464 dst_confirm(ep
->dst
);
1466 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1467 switch (ep
->com
.state
) {
1469 __state_set(&ep
->com
, CLOSING
);
1472 __state_set(&ep
->com
, CLOSING
);
1473 connect_reply_upcall(ep
, -ECONNRESET
);
1478 * We're gonna mark this puppy DEAD, but keep
1479 * the reference on it until the ULP accepts or
1482 __state_set(&ep
->com
, CLOSING
);
1486 __state_set(&ep
->com
, CLOSING
);
1487 ep
->com
.rpl_done
= 1;
1488 ep
->com
.rpl_err
= -ECONNRESET
;
1489 PDBG("waking up ep %p\n", ep
);
1490 wake_up(&ep
->com
.waitq
);
1494 __state_set(&ep
->com
, CLOSING
);
1495 attrs
.next_state
= IWCH_QP_STATE_CLOSING
;
1496 iwch_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1497 IWCH_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1498 peer_close_upcall(ep
);
1504 __state_set(&ep
->com
, MORIBUND
);
1509 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1510 attrs
.next_state
= IWCH_QP_STATE_IDLE
;
1511 iwch_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1512 IWCH_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1514 close_complete_upcall(ep
);
1515 __state_set(&ep
->com
, DEAD
);
1525 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1527 iwch_ep_disconnect(ep
, 0, GFP_KERNEL
);
1529 release_ep_resources(ep
);
1530 return CPL_RET_BUF_DONE
;
1534 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1536 static int is_neg_adv_abort(unsigned int status
)
1538 return status
== CPL_ERR_RTX_NEG_ADVICE
||
1539 status
== CPL_ERR_PERSIST_NEG_ADVICE
;
1542 static int peer_abort(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1544 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
1545 struct iwch_ep
*ep
= ctx
;
1546 struct cpl_abort_rpl
*rpl
;
1547 struct sk_buff
*rpl_skb
;
1548 struct iwch_qp_attributes attrs
;
1551 unsigned long flags
;
1553 if (is_neg_adv_abort(req
->status
)) {
1554 PDBG("%s neg_adv_abort ep %p tid %d\n", __func__
, ep
,
1556 t3_l2t_send_event(ep
->com
.tdev
, ep
->l2t
);
1557 return CPL_RET_BUF_DONE
;
1561 * We get 2 peer aborts from the HW. The first one must
1562 * be ignored except for scribbling that we need one more.
1564 if (!(ep
->com
.flags
& PEER_ABORT_IN_PROGRESS
)) {
1565 ep
->com
.flags
|= PEER_ABORT_IN_PROGRESS
;
1566 return CPL_RET_BUF_DONE
;
1569 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1570 PDBG("%s ep %p state %u\n", __func__
, ep
, ep
->com
.state
);
1571 switch (ep
->com
.state
) {
1579 connect_reply_upcall(ep
, -ECONNRESET
);
1582 ep
->com
.rpl_done
= 1;
1583 ep
->com
.rpl_err
= -ECONNRESET
;
1584 PDBG("waking up ep %p\n", ep
);
1585 wake_up(&ep
->com
.waitq
);
1590 * We're gonna mark this puppy DEAD, but keep
1591 * the reference on it until the ULP accepts or
1601 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1602 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
1603 ret
= iwch_modify_qp(ep
->com
.qp
->rhp
,
1604 ep
->com
.qp
, IWCH_QP_ATTR_NEXT_STATE
,
1608 "%s - qp <- error failed!\n",
1611 peer_abort_upcall(ep
);
1616 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
1617 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1618 return CPL_RET_BUF_DONE
;
1623 dst_confirm(ep
->dst
);
1624 if (ep
->com
.state
!= ABORTING
) {
1625 __state_set(&ep
->com
, DEAD
);
1628 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1630 rpl_skb
= get_skb(skb
, sizeof(*rpl
), GFP_KERNEL
);
1632 printk(KERN_ERR MOD
"%s - cannot allocate skb!\n",
1637 rpl_skb
->priority
= CPL_PRIORITY_DATA
;
1638 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
1639 rpl
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL
));
1640 rpl
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
1641 OPCODE_TID(rpl
) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
1642 rpl
->cmd
= CPL_ABORT_NO_RST
;
1643 iwch_cxgb3_ofld_send(ep
->com
.tdev
, rpl_skb
);
1646 release_ep_resources(ep
);
1647 return CPL_RET_BUF_DONE
;
1650 static int close_con_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1652 struct iwch_ep
*ep
= ctx
;
1653 struct iwch_qp_attributes attrs
;
1654 unsigned long flags
;
1657 PDBG("%s ep %p\n", __func__
, ep
);
1660 /* The cm_id may be null if we failed to connect */
1661 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1662 switch (ep
->com
.state
) {
1664 __state_set(&ep
->com
, MORIBUND
);
1668 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
1669 attrs
.next_state
= IWCH_QP_STATE_IDLE
;
1670 iwch_modify_qp(ep
->com
.qp
->rhp
,
1672 IWCH_QP_ATTR_NEXT_STATE
,
1675 close_complete_upcall(ep
);
1676 __state_set(&ep
->com
, DEAD
);
1686 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1688 release_ep_resources(ep
);
1689 return CPL_RET_BUF_DONE
;
1693 * T3A does 3 things when a TERM is received:
1694 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
1695 * 2) generate an async event on the QP with the TERMINATE opcode
1696 * 3) post a TERMINATE opcde cqe into the associated CQ.
1698 * For (1), we save the message in the qp for later consumer consumption.
1699 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
1700 * For (3), we toss the CQE in cxio_poll_cq().
1702 * terminate() handles case (1)...
1704 static int terminate(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1706 struct iwch_ep
*ep
= ctx
;
1708 if (state_read(&ep
->com
) != FPDU_MODE
)
1709 return CPL_RET_BUF_DONE
;
1711 PDBG("%s ep %p\n", __func__
, ep
);
1712 skb_pull(skb
, sizeof(struct cpl_rdma_terminate
));
1713 PDBG("%s saving %d bytes of term msg\n", __func__
, skb
->len
);
1714 skb_copy_from_linear_data(skb
, ep
->com
.qp
->attr
.terminate_buffer
,
1716 ep
->com
.qp
->attr
.terminate_msg_len
= skb
->len
;
1717 ep
->com
.qp
->attr
.is_terminate_local
= 0;
1718 return CPL_RET_BUF_DONE
;
1721 static int ec_status(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1723 struct cpl_rdma_ec_status
*rep
= cplhdr(skb
);
1724 struct iwch_ep
*ep
= ctx
;
1726 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
, ep
->hwtid
,
1729 struct iwch_qp_attributes attrs
;
1731 printk(KERN_ERR MOD
"%s BAD CLOSE - Aborting tid %u\n",
1732 __func__
, ep
->hwtid
);
1734 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
1735 iwch_modify_qp(ep
->com
.qp
->rhp
,
1736 ep
->com
.qp
, IWCH_QP_ATTR_NEXT_STATE
,
1738 abort_connection(ep
, NULL
, GFP_KERNEL
);
1740 return CPL_RET_BUF_DONE
;
1743 static void ep_timeout(unsigned long arg
)
1745 struct iwch_ep
*ep
= (struct iwch_ep
*)arg
;
1746 struct iwch_qp_attributes attrs
;
1747 unsigned long flags
;
1750 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1751 PDBG("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
1753 switch (ep
->com
.state
) {
1755 __state_set(&ep
->com
, ABORTING
);
1756 connect_reply_upcall(ep
, -ETIMEDOUT
);
1759 __state_set(&ep
->com
, ABORTING
);
1763 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1764 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
1765 iwch_modify_qp(ep
->com
.qp
->rhp
,
1766 ep
->com
.qp
, IWCH_QP_ATTR_NEXT_STATE
,
1769 __state_set(&ep
->com
, ABORTING
);
1772 printk(KERN_ERR
"%s unexpected state ep %p state %u\n",
1773 __func__
, ep
, ep
->com
.state
);
1777 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1779 abort_connection(ep
, NULL
, GFP_ATOMIC
);
1783 int iwch_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
1786 struct iwch_ep
*ep
= to_ep(cm_id
);
1787 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1789 if (state_read(&ep
->com
) == DEAD
) {
1793 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
1795 abort_connection(ep
, NULL
, GFP_KERNEL
);
1797 err
= send_mpa_reject(ep
, pdata
, pdata_len
);
1798 err
= iwch_ep_disconnect(ep
, 0, GFP_KERNEL
);
1803 int iwch_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
1806 struct iwch_qp_attributes attrs
;
1807 enum iwch_qp_attr_mask mask
;
1808 struct iwch_ep
*ep
= to_ep(cm_id
);
1809 struct iwch_dev
*h
= to_iwch_dev(cm_id
->device
);
1810 struct iwch_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
1812 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1813 if (state_read(&ep
->com
) == DEAD
)
1816 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
1819 if ((conn_param
->ord
> qp
->rhp
->attr
.max_rdma_read_qp_depth
) ||
1820 (conn_param
->ird
> qp
->rhp
->attr
.max_rdma_reads_per_qp
)) {
1821 abort_connection(ep
, NULL
, GFP_KERNEL
);
1825 cm_id
->add_ref(cm_id
);
1826 ep
->com
.cm_id
= cm_id
;
1829 ep
->com
.rpl_done
= 0;
1830 ep
->com
.rpl_err
= 0;
1831 ep
->ird
= conn_param
->ird
;
1832 ep
->ord
= conn_param
->ord
;
1834 if (peer2peer
&& ep
->ird
== 0)
1837 PDBG("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
1841 /* bind QP to EP and move to RTS */
1842 attrs
.mpa_attr
= ep
->mpa_attr
;
1843 attrs
.max_ird
= ep
->ird
;
1844 attrs
.max_ord
= ep
->ord
;
1845 attrs
.llp_stream_handle
= ep
;
1846 attrs
.next_state
= IWCH_QP_STATE_RTS
;
1848 /* bind QP and TID with INIT_WR */
1849 mask
= IWCH_QP_ATTR_NEXT_STATE
|
1850 IWCH_QP_ATTR_LLP_STREAM_HANDLE
|
1851 IWCH_QP_ATTR_MPA_ATTR
|
1852 IWCH_QP_ATTR_MAX_IRD
|
1853 IWCH_QP_ATTR_MAX_ORD
;
1855 err
= iwch_modify_qp(ep
->com
.qp
->rhp
,
1856 ep
->com
.qp
, mask
, &attrs
, 1);
1860 /* if needed, wait for wr_ack */
1861 if (iwch_rqes_posted(qp
)) {
1862 wait_event(ep
->com
.waitq
, ep
->com
.rpl_done
);
1863 err
= ep
->com
.rpl_err
;
1868 err
= send_mpa_reply(ep
, conn_param
->private_data
,
1869 conn_param
->private_data_len
);
1874 state_set(&ep
->com
, FPDU_MODE
);
1875 established_upcall(ep
);
1879 ep
->com
.cm_id
= NULL
;
1881 cm_id
->rem_ref(cm_id
);
1886 static int is_loopback_dst(struct iw_cm_id
*cm_id
)
1888 struct net_device
*dev
;
1890 dev
= ip_dev_find(&init_net
, cm_id
->remote_addr
.sin_addr
.s_addr
);
1897 int iwch_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
1900 struct iwch_dev
*h
= to_iwch_dev(cm_id
->device
);
1904 if (is_loopback_dst(cm_id
)) {
1909 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
1911 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
1915 init_timer(&ep
->timer
);
1916 ep
->plen
= conn_param
->private_data_len
;
1918 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
1919 conn_param
->private_data
, ep
->plen
);
1920 ep
->ird
= conn_param
->ird
;
1921 ep
->ord
= conn_param
->ord
;
1923 if (peer2peer
&& ep
->ord
== 0)
1926 ep
->com
.tdev
= h
->rdev
.t3cdev_p
;
1928 cm_id
->add_ref(cm_id
);
1929 ep
->com
.cm_id
= cm_id
;
1930 ep
->com
.qp
= get_qhp(h
, conn_param
->qpn
);
1931 BUG_ON(!ep
->com
.qp
);
1932 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
1936 * Allocate an active TID to initiate a TCP connection.
1938 ep
->atid
= cxgb3_alloc_atid(h
->rdev
.t3cdev_p
, &t3c_client
, ep
);
1939 if (ep
->atid
== -1) {
1940 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
1946 rt
= find_route(h
->rdev
.t3cdev_p
,
1947 cm_id
->local_addr
.sin_addr
.s_addr
,
1948 cm_id
->remote_addr
.sin_addr
.s_addr
,
1949 cm_id
->local_addr
.sin_port
,
1950 cm_id
->remote_addr
.sin_port
, IPTOS_LOWDELAY
);
1952 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
1953 err
= -EHOSTUNREACH
;
1956 ep
->dst
= &rt
->u
.dst
;
1958 /* get a l2t entry */
1959 ep
->l2t
= t3_l2t_get(ep
->com
.tdev
, ep
->dst
->neighbour
,
1960 ep
->dst
->neighbour
->dev
);
1962 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
1967 state_set(&ep
->com
, CONNECTING
);
1968 ep
->tos
= IPTOS_LOWDELAY
;
1969 ep
->com
.local_addr
= cm_id
->local_addr
;
1970 ep
->com
.remote_addr
= cm_id
->remote_addr
;
1972 /* send connect request to rnic */
1973 err
= send_connect(ep
);
1977 l2t_release(L2DATA(h
->rdev
.t3cdev_p
), ep
->l2t
);
1979 dst_release(ep
->dst
);
1981 cxgb3_free_atid(ep
->com
.tdev
, ep
->atid
);
1983 cm_id
->rem_ref(cm_id
);
1989 int iwch_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
1992 struct iwch_dev
*h
= to_iwch_dev(cm_id
->device
);
1993 struct iwch_listen_ep
*ep
;
1998 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
2000 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
2004 PDBG("%s ep %p\n", __func__
, ep
);
2005 ep
->com
.tdev
= h
->rdev
.t3cdev_p
;
2006 cm_id
->add_ref(cm_id
);
2007 ep
->com
.cm_id
= cm_id
;
2008 ep
->backlog
= backlog
;
2009 ep
->com
.local_addr
= cm_id
->local_addr
;
2012 * Allocate a server TID.
2014 ep
->stid
= cxgb3_alloc_stid(h
->rdev
.t3cdev_p
, &t3c_client
, ep
);
2015 if (ep
->stid
== -1) {
2016 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
2021 state_set(&ep
->com
, LISTEN
);
2022 err
= listen_start(ep
);
2026 /* wait for pass_open_rpl */
2027 wait_event(ep
->com
.waitq
, ep
->com
.rpl_done
);
2028 err
= ep
->com
.rpl_err
;
2030 cm_id
->provider_data
= ep
;
2034 cxgb3_free_stid(ep
->com
.tdev
, ep
->stid
);
2036 cm_id
->rem_ref(cm_id
);
2043 int iwch_destroy_listen(struct iw_cm_id
*cm_id
)
2046 struct iwch_listen_ep
*ep
= to_listen_ep(cm_id
);
2048 PDBG("%s ep %p\n", __func__
, ep
);
2051 state_set(&ep
->com
, DEAD
);
2052 ep
->com
.rpl_done
= 0;
2053 ep
->com
.rpl_err
= 0;
2054 err
= listen_stop(ep
);
2057 wait_event(ep
->com
.waitq
, ep
->com
.rpl_done
);
2058 cxgb3_free_stid(ep
->com
.tdev
, ep
->stid
);
2060 err
= ep
->com
.rpl_err
;
2061 cm_id
->rem_ref(cm_id
);
2066 int iwch_ep_disconnect(struct iwch_ep
*ep
, int abrupt
, gfp_t gfp
)
2069 unsigned long flags
;
2072 struct t3cdev
*tdev
;
2073 struct cxio_rdev
*rdev
;
2075 spin_lock_irqsave(&ep
->com
.lock
, flags
);
2077 PDBG("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
2078 states
[ep
->com
.state
], abrupt
);
2080 tdev
= (struct t3cdev
*)ep
->com
.tdev
;
2081 rdev
= (struct cxio_rdev
*)tdev
->ulp
;
2082 if (cxio_fatal_error(rdev
)) {
2084 close_complete_upcall(ep
);
2085 ep
->com
.state
= DEAD
;
2087 switch (ep
->com
.state
) {
2095 ep
->com
.state
= ABORTING
;
2097 ep
->com
.state
= CLOSING
;
2105 ep
->com
.state
= ABORTING
;
2107 ep
->com
.state
= MORIBUND
;
2112 PDBG("%s ignoring disconnect ep %p state %u\n",
2113 __func__
, ep
, ep
->com
.state
);
2120 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
2123 ret
= send_abort(ep
, NULL
, gfp
);
2125 ret
= send_halfclose(ep
, gfp
);
2130 release_ep_resources(ep
);
2134 int iwch_ep_redirect(void *ctx
, struct dst_entry
*old
, struct dst_entry
*new,
2135 struct l2t_entry
*l2t
)
2137 struct iwch_ep
*ep
= ctx
;
2142 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__
, ep
, new,
2145 l2t_release(L2DATA(ep
->com
.tdev
), ep
->l2t
);
2153 * All the CM events are handled on a work queue to have a safe context.
2155 static int sched(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
2157 struct iwch_ep_common
*epc
= ctx
;
2162 * Save ctx and tdev in the skb->cb area.
2164 *((void **) skb
->cb
) = ctx
;
2165 *((struct t3cdev
**) (skb
->cb
+ sizeof(void *))) = tdev
;
2168 * Queue the skb and schedule the worker thread.
2170 skb_queue_tail(&rxq
, skb
);
2171 queue_work(workq
, &skb_work
);
2175 static int set_tcb_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
2177 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
2179 if (rpl
->status
!= CPL_ERR_NONE
) {
2180 printk(KERN_ERR MOD
"Unexpected SET_TCB_RPL status %u "
2181 "for tid %u\n", rpl
->status
, GET_TID(rpl
));
2183 return CPL_RET_BUF_DONE
;
2186 int __init
iwch_cm_init(void)
2188 skb_queue_head_init(&rxq
);
2190 workq
= create_singlethread_workqueue("iw_cxgb3");
2195 * All upcalls from the T3 Core go to sched() to
2196 * schedule the processing on a work queue.
2198 t3c_handlers
[CPL_ACT_ESTABLISH
] = sched
;
2199 t3c_handlers
[CPL_ACT_OPEN_RPL
] = sched
;
2200 t3c_handlers
[CPL_RX_DATA
] = sched
;
2201 t3c_handlers
[CPL_TX_DMA_ACK
] = sched
;
2202 t3c_handlers
[CPL_ABORT_RPL_RSS
] = sched
;
2203 t3c_handlers
[CPL_ABORT_RPL
] = sched
;
2204 t3c_handlers
[CPL_PASS_OPEN_RPL
] = sched
;
2205 t3c_handlers
[CPL_CLOSE_LISTSRV_RPL
] = sched
;
2206 t3c_handlers
[CPL_PASS_ACCEPT_REQ
] = sched
;
2207 t3c_handlers
[CPL_PASS_ESTABLISH
] = sched
;
2208 t3c_handlers
[CPL_PEER_CLOSE
] = sched
;
2209 t3c_handlers
[CPL_CLOSE_CON_RPL
] = sched
;
2210 t3c_handlers
[CPL_ABORT_REQ_RSS
] = sched
;
2211 t3c_handlers
[CPL_RDMA_TERMINATE
] = sched
;
2212 t3c_handlers
[CPL_RDMA_EC_STATUS
] = sched
;
2213 t3c_handlers
[CPL_SET_TCB_RPL
] = set_tcb_rpl
;
2216 * These are the real handlers that are called from a
2219 work_handlers
[CPL_ACT_ESTABLISH
] = act_establish
;
2220 work_handlers
[CPL_ACT_OPEN_RPL
] = act_open_rpl
;
2221 work_handlers
[CPL_RX_DATA
] = rx_data
;
2222 work_handlers
[CPL_TX_DMA_ACK
] = tx_ack
;
2223 work_handlers
[CPL_ABORT_RPL_RSS
] = abort_rpl
;
2224 work_handlers
[CPL_ABORT_RPL
] = abort_rpl
;
2225 work_handlers
[CPL_PASS_OPEN_RPL
] = pass_open_rpl
;
2226 work_handlers
[CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
;
2227 work_handlers
[CPL_PASS_ACCEPT_REQ
] = pass_accept_req
;
2228 work_handlers
[CPL_PASS_ESTABLISH
] = pass_establish
;
2229 work_handlers
[CPL_PEER_CLOSE
] = peer_close
;
2230 work_handlers
[CPL_ABORT_REQ_RSS
] = peer_abort
;
2231 work_handlers
[CPL_CLOSE_CON_RPL
] = close_con_rpl
;
2232 work_handlers
[CPL_RDMA_TERMINATE
] = terminate
;
2233 work_handlers
[CPL_RDMA_EC_STATUS
] = ec_status
;
2237 void __exit
iwch_cm_term(void)
2239 flush_workqueue(workq
);
2240 destroy_workqueue(workq
);