]>
Commit | Line | Data |
---|---|---|
cfdda9d7 SW |
1 | /* |
2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | #include <linux/module.h> | |
33 | #include <linux/list.h> | |
34 | #include <linux/workqueue.h> | |
35 | #include <linux/skbuff.h> | |
36 | #include <linux/timer.h> | |
37 | #include <linux/notifier.h> | |
38 | #include <linux/inetdevice.h> | |
39 | #include <linux/ip.h> | |
40 | #include <linux/tcp.h> | |
41 | ||
42 | #include <net/neighbour.h> | |
43 | #include <net/netevent.h> | |
44 | #include <net/route.h> | |
45 | ||
46 | #include "iw_cxgb4.h" | |
47 | ||
48 | static char *states[] = { | |
49 | "idle", | |
50 | "listen", | |
51 | "connecting", | |
52 | "mpa_wait_req", | |
53 | "mpa_req_sent", | |
54 | "mpa_req_rcvd", | |
55 | "mpa_rep_sent", | |
56 | "fpdu_mode", | |
57 | "aborting", | |
58 | "closing", | |
59 | "moribund", | |
60 | "dead", | |
61 | NULL, | |
62 | }; | |
63 | ||
b52fe09e | 64 | static int dack_mode = 1; |
ba6d3925 | 65 | module_param(dack_mode, int, 0644); |
b52fe09e | 66 | MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); |
ba6d3925 | 67 | |
be4c9bad RD |
68 | int c4iw_max_read_depth = 8; |
69 | module_param(c4iw_max_read_depth, int, 0644); | |
70 | MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); | |
71 | ||
cfdda9d7 SW |
72 | static int enable_tcp_timestamps; |
73 | module_param(enable_tcp_timestamps, int, 0644); | |
74 | MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); | |
75 | ||
76 | static int enable_tcp_sack; | |
77 | module_param(enable_tcp_sack, int, 0644); | |
78 | MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); | |
79 | ||
80 | static int enable_tcp_window_scaling = 1; | |
81 | module_param(enable_tcp_window_scaling, int, 0644); | |
82 | MODULE_PARM_DESC(enable_tcp_window_scaling, | |
83 | "Enable tcp window scaling (default=1)"); | |
84 | ||
85 | int c4iw_debug; | |
86 | module_param(c4iw_debug, int, 0644); | |
87 | MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); | |
88 | ||
89 | static int peer2peer; | |
90 | module_param(peer2peer, int, 0644); | |
91 | MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); | |
92 | ||
93 | static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; | |
94 | module_param(p2p_type, int, 0644); | |
95 | MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " | |
96 | "1=RDMA_READ 0=RDMA_WRITE (default 1)"); | |
97 | ||
98 | static int ep_timeout_secs = 60; | |
99 | module_param(ep_timeout_secs, int, 0644); | |
100 | MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " | |
101 | "in seconds (default=60)"); | |
102 | ||
103 | static int mpa_rev = 1; | |
104 | module_param(mpa_rev, int, 0644); | |
105 | MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " | |
106 | "1 is spec compliant. (default=1)"); | |
107 | ||
108 | static int markers_enabled; | |
109 | module_param(markers_enabled, int, 0644); | |
110 | MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); | |
111 | ||
112 | static int crc_enabled = 1; | |
113 | module_param(crc_enabled, int, 0644); | |
114 | MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); | |
115 | ||
116 | static int rcv_win = 256 * 1024; | |
117 | module_param(rcv_win, int, 0644); | |
118 | MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); | |
119 | ||
98ae68b7 | 120 | static int snd_win = 128 * 1024; |
cfdda9d7 | 121 | module_param(snd_win, int, 0644); |
98ae68b7 | 122 | MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); |
cfdda9d7 | 123 | |
cfdda9d7 | 124 | static struct workqueue_struct *workq; |
cfdda9d7 SW |
125 | |
126 | static struct sk_buff_head rxq; | |
cfdda9d7 SW |
127 | |
128 | static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); | |
129 | static void ep_timeout(unsigned long arg); | |
130 | static void connect_reply_upcall(struct c4iw_ep *ep, int status); | |
131 | ||
be4c9bad RD |
132 | static LIST_HEAD(timeout_list); |
133 | static spinlock_t timeout_lock; | |
134 | ||
cfdda9d7 SW |
135 | static void start_ep_timer(struct c4iw_ep *ep) |
136 | { | |
137 | PDBG("%s ep %p\n", __func__, ep); | |
138 | if (timer_pending(&ep->timer)) { | |
139 | PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); | |
140 | del_timer_sync(&ep->timer); | |
141 | } else | |
142 | c4iw_get_ep(&ep->com); | |
143 | ep->timer.expires = jiffies + ep_timeout_secs * HZ; | |
144 | ep->timer.data = (unsigned long)ep; | |
145 | ep->timer.function = ep_timeout; | |
146 | add_timer(&ep->timer); | |
147 | } | |
148 | ||
149 | static void stop_ep_timer(struct c4iw_ep *ep) | |
150 | { | |
151 | PDBG("%s ep %p\n", __func__, ep); | |
152 | if (!timer_pending(&ep->timer)) { | |
153 | printk(KERN_ERR "%s timer stopped when its not running! " | |
154 | "ep %p state %u\n", __func__, ep, ep->com.state); | |
155 | WARN_ON(1); | |
156 | return; | |
157 | } | |
158 | del_timer_sync(&ep->timer); | |
159 | c4iw_put_ep(&ep->com); | |
160 | } | |
161 | ||
162 | static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, | |
163 | struct l2t_entry *l2e) | |
164 | { | |
165 | int error = 0; | |
166 | ||
167 | if (c4iw_fatal_error(rdev)) { | |
168 | kfree_skb(skb); | |
169 | PDBG("%s - device in error state - dropping\n", __func__); | |
170 | return -EIO; | |
171 | } | |
172 | error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); | |
173 | if (error < 0) | |
174 | kfree_skb(skb); | |
74594861 | 175 | return error < 0 ? error : 0; |
cfdda9d7 SW |
176 | } |
177 | ||
178 | int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) | |
179 | { | |
180 | int error = 0; | |
181 | ||
182 | if (c4iw_fatal_error(rdev)) { | |
183 | kfree_skb(skb); | |
184 | PDBG("%s - device in error state - dropping\n", __func__); | |
185 | return -EIO; | |
186 | } | |
187 | error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); | |
188 | if (error < 0) | |
189 | kfree_skb(skb); | |
74594861 | 190 | return error < 0 ? error : 0; |
cfdda9d7 SW |
191 | } |
192 | ||
193 | static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) | |
194 | { | |
195 | struct cpl_tid_release *req; | |
196 | ||
197 | skb = get_skb(skb, sizeof *req, GFP_KERNEL); | |
198 | if (!skb) | |
199 | return; | |
200 | req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); | |
201 | INIT_TP_WR(req, hwtid); | |
202 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); | |
203 | set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); | |
204 | c4iw_ofld_send(rdev, skb); | |
205 | return; | |
206 | } | |
207 | ||
208 | static void set_emss(struct c4iw_ep *ep, u16 opt) | |
209 | { | |
210 | ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; | |
211 | ep->mss = ep->emss; | |
212 | if (GET_TCPOPT_TSTAMP(opt)) | |
213 | ep->emss -= 12; | |
214 | if (ep->emss < 128) | |
215 | ep->emss = 128; | |
216 | PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), | |
217 | ep->mss, ep->emss); | |
218 | } | |
219 | ||
220 | static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) | |
221 | { | |
cfdda9d7 SW |
222 | enum c4iw_ep_state state; |
223 | ||
2f5b48c3 | 224 | mutex_lock(&epc->mutex); |
cfdda9d7 | 225 | state = epc->state; |
2f5b48c3 | 226 | mutex_unlock(&epc->mutex); |
cfdda9d7 SW |
227 | return state; |
228 | } | |
229 | ||
230 | static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) | |
231 | { | |
232 | epc->state = new; | |
233 | } | |
234 | ||
235 | static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) | |
236 | { | |
2f5b48c3 | 237 | mutex_lock(&epc->mutex); |
cfdda9d7 SW |
238 | PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); |
239 | __state_set(epc, new); | |
2f5b48c3 | 240 | mutex_unlock(&epc->mutex); |
cfdda9d7 SW |
241 | return; |
242 | } | |
243 | ||
244 | static void *alloc_ep(int size, gfp_t gfp) | |
245 | { | |
246 | struct c4iw_ep_common *epc; | |
247 | ||
248 | epc = kzalloc(size, gfp); | |
249 | if (epc) { | |
250 | kref_init(&epc->kref); | |
2f5b48c3 | 251 | mutex_init(&epc->mutex); |
aadc4df3 | 252 | c4iw_init_wr_wait(&epc->wr_wait); |
cfdda9d7 SW |
253 | } |
254 | PDBG("%s alloc ep %p\n", __func__, epc); | |
255 | return epc; | |
256 | } | |
257 | ||
258 | void _c4iw_free_ep(struct kref *kref) | |
259 | { | |
260 | struct c4iw_ep *ep; | |
261 | ||
262 | ep = container_of(kref, struct c4iw_ep, com.kref); | |
263 | PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); | |
264 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { | |
265 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); | |
266 | dst_release(ep->dst); | |
267 | cxgb4_l2t_release(ep->l2t); | |
268 | } | |
269 | kfree(ep); | |
270 | } | |
271 | ||
272 | static void release_ep_resources(struct c4iw_ep *ep) | |
273 | { | |
274 | set_bit(RELEASE_RESOURCES, &ep->com.flags); | |
275 | c4iw_put_ep(&ep->com); | |
276 | } | |
277 | ||
cfdda9d7 SW |
278 | static int status2errno(int status) |
279 | { | |
280 | switch (status) { | |
281 | case CPL_ERR_NONE: | |
282 | return 0; | |
283 | case CPL_ERR_CONN_RESET: | |
284 | return -ECONNRESET; | |
285 | case CPL_ERR_ARP_MISS: | |
286 | return -EHOSTUNREACH; | |
287 | case CPL_ERR_CONN_TIMEDOUT: | |
288 | return -ETIMEDOUT; | |
289 | case CPL_ERR_TCAM_FULL: | |
290 | return -ENOMEM; | |
291 | case CPL_ERR_CONN_EXIST: | |
292 | return -EADDRINUSE; | |
293 | default: | |
294 | return -EIO; | |
295 | } | |
296 | } | |
297 | ||
298 | /* | |
299 | * Try and reuse skbs already allocated... | |
300 | */ | |
301 | static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) | |
302 | { | |
303 | if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { | |
304 | skb_trim(skb, 0); | |
305 | skb_get(skb); | |
306 | skb_reset_transport_header(skb); | |
307 | } else { | |
308 | skb = alloc_skb(len, gfp); | |
309 | } | |
310 | return skb; | |
311 | } | |
312 | ||
313 | static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip, | |
314 | __be32 peer_ip, __be16 local_port, | |
315 | __be16 peer_port, u8 tos) | |
316 | { | |
317 | struct rtable *rt; | |
31e4543d | 318 | struct flowi4 fl4; |
78fbfd8a | 319 | |
31e4543d | 320 | rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, |
78fbfd8a DM |
321 | peer_port, local_port, IPPROTO_TCP, |
322 | tos, 0); | |
b23dd4fe | 323 | if (IS_ERR(rt)) |
cfdda9d7 SW |
324 | return NULL; |
325 | return rt; | |
326 | } | |
327 | ||
328 | static void arp_failure_discard(void *handle, struct sk_buff *skb) | |
329 | { | |
330 | PDBG("%s c4iw_dev %p\n", __func__, handle); | |
331 | kfree_skb(skb); | |
332 | } | |
333 | ||
334 | /* | |
335 | * Handle an ARP failure for an active open. | |
336 | */ | |
337 | static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) | |
338 | { | |
339 | printk(KERN_ERR MOD "ARP failure duing connect\n"); | |
340 | kfree_skb(skb); | |
341 | } | |
342 | ||
343 | /* | |
344 | * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant | |
345 | * and send it along. | |
346 | */ | |
347 | static void abort_arp_failure(void *handle, struct sk_buff *skb) | |
348 | { | |
349 | struct c4iw_rdev *rdev = handle; | |
350 | struct cpl_abort_req *req = cplhdr(skb); | |
351 | ||
352 | PDBG("%s rdev %p\n", __func__, rdev); | |
353 | req->cmd = CPL_ABORT_NO_RST; | |
354 | c4iw_ofld_send(rdev, skb); | |
355 | } | |
356 | ||
357 | static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) | |
358 | { | |
359 | unsigned int flowclen = 80; | |
360 | struct fw_flowc_wr *flowc; | |
361 | int i; | |
362 | ||
363 | skb = get_skb(skb, flowclen, GFP_KERNEL); | |
364 | flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); | |
365 | ||
366 | flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | | |
367 | FW_FLOWC_WR_NPARAMS(8)); | |
368 | flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, | |
369 | 16)) | FW_WR_FLOWID(ep->hwtid)); | |
370 | ||
371 | flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; | |
94788657 | 372 | flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); |
cfdda9d7 SW |
373 | flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; |
374 | flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); | |
375 | flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; | |
376 | flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); | |
377 | flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; | |
378 | flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); | |
379 | flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; | |
380 | flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); | |
381 | flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; | |
382 | flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); | |
383 | flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; | |
384 | flowc->mnemval[6].val = cpu_to_be32(snd_win); | |
385 | flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; | |
386 | flowc->mnemval[7].val = cpu_to_be32(ep->emss); | |
387 | /* Pad WR to 16 byte boundary */ | |
388 | flowc->mnemval[8].mnemonic = 0; | |
389 | flowc->mnemval[8].val = 0; | |
390 | for (i = 0; i < 9; i++) { | |
391 | flowc->mnemval[i].r4[0] = 0; | |
392 | flowc->mnemval[i].r4[1] = 0; | |
393 | flowc->mnemval[i].r4[2] = 0; | |
394 | } | |
395 | ||
396 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
397 | c4iw_ofld_send(&ep->com.dev->rdev, skb); | |
398 | } | |
399 | ||
400 | static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) | |
401 | { | |
402 | struct cpl_close_con_req *req; | |
403 | struct sk_buff *skb; | |
404 | int wrlen = roundup(sizeof *req, 16); | |
405 | ||
406 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
407 | skb = get_skb(NULL, wrlen, gfp); | |
408 | if (!skb) { | |
409 | printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); | |
410 | return -ENOMEM; | |
411 | } | |
412 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
413 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | |
414 | req = (struct cpl_close_con_req *) skb_put(skb, wrlen); | |
415 | memset(req, 0, wrlen); | |
416 | INIT_TP_WR(req, ep->hwtid); | |
417 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, | |
418 | ep->hwtid)); | |
419 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
420 | } | |
421 | ||
422 | static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | |
423 | { | |
424 | struct cpl_abort_req *req; | |
425 | int wrlen = roundup(sizeof *req, 16); | |
426 | ||
427 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
428 | skb = get_skb(skb, wrlen, gfp); | |
429 | if (!skb) { | |
430 | printk(KERN_ERR MOD "%s - failed to alloc skb.\n", | |
431 | __func__); | |
432 | return -ENOMEM; | |
433 | } | |
434 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
435 | t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); | |
436 | req = (struct cpl_abort_req *) skb_put(skb, wrlen); | |
437 | memset(req, 0, wrlen); | |
438 | INIT_TP_WR(req, ep->hwtid); | |
439 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); | |
440 | req->cmd = CPL_ABORT_SEND_RST; | |
441 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
442 | } | |
443 | ||
444 | static int send_connect(struct c4iw_ep *ep) | |
445 | { | |
446 | struct cpl_act_open_req *req; | |
447 | struct sk_buff *skb; | |
448 | u64 opt0; | |
449 | u32 opt2; | |
450 | unsigned int mtu_idx; | |
451 | int wscale; | |
452 | int wrlen = roundup(sizeof *req, 16); | |
453 | ||
454 | PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); | |
455 | ||
456 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | |
457 | if (!skb) { | |
458 | printk(KERN_ERR MOD "%s - failed to alloc skb.\n", | |
459 | __func__); | |
460 | return -ENOMEM; | |
461 | } | |
d4f1a5c6 | 462 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); |
cfdda9d7 SW |
463 | |
464 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | |
465 | wscale = compute_wscale(rcv_win); | |
466 | opt0 = KEEP_ALIVE(1) | | |
ba6d3925 | 467 | DELACK(1) | |
cfdda9d7 SW |
468 | WND_SCALE(wscale) | |
469 | MSS_IDX(mtu_idx) | | |
470 | L2T_IDX(ep->l2t->idx) | | |
471 | TX_CHAN(ep->tx_chan) | | |
472 | SMAC_SEL(ep->smac_idx) | | |
473 | DSCP(ep->tos) | | |
b48f3b9c | 474 | ULP_MODE(ULP_MODE_TCPDDP) | |
cfdda9d7 SW |
475 | RCV_BUFSIZ(rcv_win>>10); |
476 | opt2 = RX_CHANNEL(0) | | |
477 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); | |
478 | if (enable_tcp_timestamps) | |
479 | opt2 |= TSTAMPS_EN(1); | |
480 | if (enable_tcp_sack) | |
481 | opt2 |= SACK_EN(1); | |
482 | if (wscale && enable_tcp_window_scaling) | |
483 | opt2 |= WND_SCALE_EN(1); | |
484 | t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); | |
485 | ||
486 | req = (struct cpl_act_open_req *) skb_put(skb, wrlen); | |
487 | INIT_TP_WR(req, 0); | |
488 | OPCODE_TID(req) = cpu_to_be32( | |
489 | MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); | |
490 | req->local_port = ep->com.local_addr.sin_port; | |
491 | req->peer_port = ep->com.remote_addr.sin_port; | |
492 | req->local_ip = ep->com.local_addr.sin_addr.s_addr; | |
493 | req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; | |
494 | req->opt0 = cpu_to_be64(opt0); | |
495 | req->params = 0; | |
496 | req->opt2 = cpu_to_be32(opt2); | |
497 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
498 | } | |
499 | ||
500 | static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb) | |
501 | { | |
502 | int mpalen, wrlen; | |
503 | struct fw_ofld_tx_data_wr *req; | |
504 | struct mpa_message *mpa; | |
505 | ||
506 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | |
507 | ||
508 | BUG_ON(skb_cloned(skb)); | |
509 | ||
510 | mpalen = sizeof(*mpa) + ep->plen; | |
511 | wrlen = roundup(mpalen + sizeof *req, 16); | |
512 | skb = get_skb(skb, wrlen, GFP_KERNEL); | |
513 | if (!skb) { | |
514 | connect_reply_upcall(ep, -ENOMEM); | |
515 | return; | |
516 | } | |
517 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
518 | ||
519 | req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); | |
520 | memset(req, 0, wrlen); | |
521 | req->op_to_immdlen = cpu_to_be32( | |
522 | FW_WR_OP(FW_OFLD_TX_DATA_WR) | | |
523 | FW_WR_COMPL(1) | | |
524 | FW_WR_IMMDLEN(mpalen)); | |
525 | req->flowid_len16 = cpu_to_be32( | |
526 | FW_WR_FLOWID(ep->hwtid) | | |
527 | FW_WR_LEN16(wrlen >> 4)); | |
528 | req->plen = cpu_to_be32(mpalen); | |
529 | req->tunnel_to_proxy = cpu_to_be32( | |
530 | FW_OFLD_TX_DATA_WR_FLUSH(1) | | |
531 | FW_OFLD_TX_DATA_WR_SHOVE(1)); | |
532 | ||
533 | mpa = (struct mpa_message *)(req + 1); | |
534 | memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); | |
535 | mpa->flags = (crc_enabled ? MPA_CRC : 0) | | |
536 | (markers_enabled ? MPA_MARKERS : 0); | |
537 | mpa->private_data_size = htons(ep->plen); | |
538 | mpa->revision = mpa_rev; | |
539 | ||
540 | if (ep->plen) | |
541 | memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); | |
542 | ||
543 | /* | |
544 | * Reference the mpa skb. This ensures the data area | |
545 | * will remain in memory until the hw acks the tx. | |
546 | * Function fw4_ack() will deref it. | |
547 | */ | |
548 | skb_get(skb); | |
549 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | |
550 | BUG_ON(ep->mpa_skb); | |
551 | ep->mpa_skb = skb; | |
552 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
553 | start_ep_timer(ep); | |
554 | state_set(&ep->com, MPA_REQ_SENT); | |
555 | ep->mpa_attr.initiator = 1; | |
556 | return; | |
557 | } | |
558 | ||
559 | static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) | |
560 | { | |
561 | int mpalen, wrlen; | |
562 | struct fw_ofld_tx_data_wr *req; | |
563 | struct mpa_message *mpa; | |
564 | struct sk_buff *skb; | |
565 | ||
566 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | |
567 | ||
568 | mpalen = sizeof(*mpa) + plen; | |
569 | wrlen = roundup(mpalen + sizeof *req, 16); | |
570 | ||
571 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | |
572 | if (!skb) { | |
573 | printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); | |
574 | return -ENOMEM; | |
575 | } | |
576 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
577 | ||
578 | req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); | |
579 | memset(req, 0, wrlen); | |
580 | req->op_to_immdlen = cpu_to_be32( | |
581 | FW_WR_OP(FW_OFLD_TX_DATA_WR) | | |
582 | FW_WR_COMPL(1) | | |
583 | FW_WR_IMMDLEN(mpalen)); | |
584 | req->flowid_len16 = cpu_to_be32( | |
585 | FW_WR_FLOWID(ep->hwtid) | | |
586 | FW_WR_LEN16(wrlen >> 4)); | |
587 | req->plen = cpu_to_be32(mpalen); | |
588 | req->tunnel_to_proxy = cpu_to_be32( | |
589 | FW_OFLD_TX_DATA_WR_FLUSH(1) | | |
590 | FW_OFLD_TX_DATA_WR_SHOVE(1)); | |
591 | ||
592 | mpa = (struct mpa_message *)(req + 1); | |
593 | memset(mpa, 0, sizeof(*mpa)); | |
594 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | |
595 | mpa->flags = MPA_REJECT; | |
596 | mpa->revision = mpa_rev; | |
597 | mpa->private_data_size = htons(plen); | |
598 | if (plen) | |
599 | memcpy(mpa->private_data, pdata, plen); | |
600 | ||
601 | /* | |
602 | * Reference the mpa skb again. This ensures the data area | |
603 | * will remain in memory until the hw acks the tx. | |
604 | * Function fw4_ack() will deref it. | |
605 | */ | |
606 | skb_get(skb); | |
607 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
608 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | |
609 | BUG_ON(ep->mpa_skb); | |
610 | ep->mpa_skb = skb; | |
611 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
612 | } | |
613 | ||
614 | static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) | |
615 | { | |
616 | int mpalen, wrlen; | |
617 | struct fw_ofld_tx_data_wr *req; | |
618 | struct mpa_message *mpa; | |
619 | struct sk_buff *skb; | |
620 | ||
621 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | |
622 | ||
623 | mpalen = sizeof(*mpa) + plen; | |
624 | wrlen = roundup(mpalen + sizeof *req, 16); | |
625 | ||
626 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | |
627 | if (!skb) { | |
628 | printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); | |
629 | return -ENOMEM; | |
630 | } | |
631 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
632 | ||
633 | req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); | |
634 | memset(req, 0, wrlen); | |
635 | req->op_to_immdlen = cpu_to_be32( | |
636 | FW_WR_OP(FW_OFLD_TX_DATA_WR) | | |
637 | FW_WR_COMPL(1) | | |
638 | FW_WR_IMMDLEN(mpalen)); | |
639 | req->flowid_len16 = cpu_to_be32( | |
640 | FW_WR_FLOWID(ep->hwtid) | | |
641 | FW_WR_LEN16(wrlen >> 4)); | |
642 | req->plen = cpu_to_be32(mpalen); | |
643 | req->tunnel_to_proxy = cpu_to_be32( | |
644 | FW_OFLD_TX_DATA_WR_FLUSH(1) | | |
645 | FW_OFLD_TX_DATA_WR_SHOVE(1)); | |
646 | ||
647 | mpa = (struct mpa_message *)(req + 1); | |
648 | memset(mpa, 0, sizeof(*mpa)); | |
649 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | |
650 | mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | | |
651 | (markers_enabled ? MPA_MARKERS : 0); | |
652 | mpa->revision = mpa_rev; | |
653 | mpa->private_data_size = htons(plen); | |
654 | if (plen) | |
655 | memcpy(mpa->private_data, pdata, plen); | |
656 | ||
657 | /* | |
658 | * Reference the mpa skb. This ensures the data area | |
659 | * will remain in memory until the hw acks the tx. | |
660 | * Function fw4_ack() will deref it. | |
661 | */ | |
662 | skb_get(skb); | |
663 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | |
664 | ep->mpa_skb = skb; | |
665 | state_set(&ep->com, MPA_REP_SENT); | |
666 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
667 | } | |
668 | ||
669 | static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) | |
670 | { | |
671 | struct c4iw_ep *ep; | |
672 | struct cpl_act_establish *req = cplhdr(skb); | |
673 | unsigned int tid = GET_TID(req); | |
674 | unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); | |
675 | struct tid_info *t = dev->rdev.lldi.tids; | |
676 | ||
677 | ep = lookup_atid(t, atid); | |
678 | ||
679 | PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, | |
680 | be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); | |
681 | ||
682 | dst_confirm(ep->dst); | |
683 | ||
684 | /* setup the hwtid for this connection */ | |
685 | ep->hwtid = tid; | |
686 | cxgb4_insert_tid(t, ep, tid); | |
687 | ||
688 | ep->snd_seq = be32_to_cpu(req->snd_isn); | |
689 | ep->rcv_seq = be32_to_cpu(req->rcv_isn); | |
690 | ||
691 | set_emss(ep, ntohs(req->tcp_opt)); | |
692 | ||
693 | /* dealloc the atid */ | |
694 | cxgb4_free_atid(t, atid); | |
695 | ||
696 | /* start MPA negotiation */ | |
697 | send_flowc(ep, NULL); | |
698 | send_mpa_req(ep, skb); | |
699 | ||
700 | return 0; | |
701 | } | |
702 | ||
703 | static void close_complete_upcall(struct c4iw_ep *ep) | |
704 | { | |
705 | struct iw_cm_event event; | |
706 | ||
707 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
708 | memset(&event, 0, sizeof(event)); | |
709 | event.event = IW_CM_EVENT_CLOSE; | |
710 | if (ep->com.cm_id) { | |
711 | PDBG("close complete delivered ep %p cm_id %p tid %u\n", | |
712 | ep, ep->com.cm_id, ep->hwtid); | |
713 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
714 | ep->com.cm_id->rem_ref(ep->com.cm_id); | |
715 | ep->com.cm_id = NULL; | |
716 | ep->com.qp = NULL; | |
717 | } | |
718 | } | |
719 | ||
720 | static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | |
721 | { | |
722 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
723 | close_complete_upcall(ep); | |
724 | state_set(&ep->com, ABORTING); | |
725 | return send_abort(ep, skb, gfp); | |
726 | } | |
727 | ||
728 | static void peer_close_upcall(struct c4iw_ep *ep) | |
729 | { | |
730 | struct iw_cm_event event; | |
731 | ||
732 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
733 | memset(&event, 0, sizeof(event)); | |
734 | event.event = IW_CM_EVENT_DISCONNECT; | |
735 | if (ep->com.cm_id) { | |
736 | PDBG("peer close delivered ep %p cm_id %p tid %u\n", | |
737 | ep, ep->com.cm_id, ep->hwtid); | |
738 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
739 | } | |
740 | } | |
741 | ||
742 | static void peer_abort_upcall(struct c4iw_ep *ep) | |
743 | { | |
744 | struct iw_cm_event event; | |
745 | ||
746 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
747 | memset(&event, 0, sizeof(event)); | |
748 | event.event = IW_CM_EVENT_CLOSE; | |
749 | event.status = -ECONNRESET; | |
750 | if (ep->com.cm_id) { | |
751 | PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, | |
752 | ep->com.cm_id, ep->hwtid); | |
753 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
754 | ep->com.cm_id->rem_ref(ep->com.cm_id); | |
755 | ep->com.cm_id = NULL; | |
756 | ep->com.qp = NULL; | |
757 | } | |
758 | } | |
759 | ||
760 | static void connect_reply_upcall(struct c4iw_ep *ep, int status) | |
761 | { | |
762 | struct iw_cm_event event; | |
763 | ||
764 | PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); | |
765 | memset(&event, 0, sizeof(event)); | |
766 | event.event = IW_CM_EVENT_CONNECT_REPLY; | |
767 | event.status = status; | |
768 | event.local_addr = ep->com.local_addr; | |
769 | event.remote_addr = ep->com.remote_addr; | |
770 | ||
771 | if ((status == 0) || (status == -ECONNREFUSED)) { | |
772 | event.private_data_len = ep->plen; | |
773 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | |
774 | } | |
85963e4c RD |
775 | |
776 | PDBG("%s ep %p tid %u status %d\n", __func__, ep, | |
777 | ep->hwtid, status); | |
778 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
779 | ||
cfdda9d7 SW |
780 | if (status < 0) { |
781 | ep->com.cm_id->rem_ref(ep->com.cm_id); | |
782 | ep->com.cm_id = NULL; | |
783 | ep->com.qp = NULL; | |
784 | } | |
785 | } | |
786 | ||
787 | static void connect_request_upcall(struct c4iw_ep *ep) | |
788 | { | |
789 | struct iw_cm_event event; | |
790 | ||
791 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
792 | memset(&event, 0, sizeof(event)); | |
793 | event.event = IW_CM_EVENT_CONNECT_REQUEST; | |
794 | event.local_addr = ep->com.local_addr; | |
795 | event.remote_addr = ep->com.remote_addr; | |
796 | event.private_data_len = ep->plen; | |
797 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | |
798 | event.provider_data = ep; | |
799 | if (state_read(&ep->parent_ep->com) != DEAD) { | |
800 | c4iw_get_ep(&ep->com); | |
801 | ep->parent_ep->com.cm_id->event_handler( | |
802 | ep->parent_ep->com.cm_id, | |
803 | &event); | |
804 | } | |
805 | c4iw_put_ep(&ep->parent_ep->com); | |
806 | ep->parent_ep = NULL; | |
807 | } | |
808 | ||
809 | static void established_upcall(struct c4iw_ep *ep) | |
810 | { | |
811 | struct iw_cm_event event; | |
812 | ||
813 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
814 | memset(&event, 0, sizeof(event)); | |
815 | event.event = IW_CM_EVENT_ESTABLISHED; | |
816 | if (ep->com.cm_id) { | |
817 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
818 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
819 | } | |
820 | } | |
821 | ||
822 | static int update_rx_credits(struct c4iw_ep *ep, u32 credits) | |
823 | { | |
824 | struct cpl_rx_data_ack *req; | |
825 | struct sk_buff *skb; | |
826 | int wrlen = roundup(sizeof *req, 16); | |
827 | ||
828 | PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); | |
829 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | |
830 | if (!skb) { | |
831 | printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); | |
832 | return 0; | |
833 | } | |
834 | ||
835 | req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); | |
836 | memset(req, 0, wrlen); | |
837 | INIT_TP_WR(req, ep->hwtid); | |
838 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, | |
839 | ep->hwtid)); | |
ba6d3925 SW |
840 | req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | |
841 | F_RX_DACK_CHANGE | | |
842 | V_RX_DACK_MODE(dack_mode)); | |
d4f1a5c6 | 843 | set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); |
cfdda9d7 SW |
844 | c4iw_ofld_send(&ep->com.dev->rdev, skb); |
845 | return credits; | |
846 | } | |
847 | ||
848 | static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |
849 | { | |
850 | struct mpa_message *mpa; | |
851 | u16 plen; | |
852 | struct c4iw_qp_attributes attrs; | |
853 | enum c4iw_qp_attr_mask mask; | |
854 | int err; | |
855 | ||
856 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
857 | ||
858 | /* | |
859 | * Stop mpa timer. If it expired, then the state has | |
860 | * changed and we bail since ep_timeout already aborted | |
861 | * the connection. | |
862 | */ | |
863 | stop_ep_timer(ep); | |
864 | if (state_read(&ep->com) != MPA_REQ_SENT) | |
865 | return; | |
866 | ||
867 | /* | |
868 | * If we get more than the supported amount of private data | |
869 | * then we must fail this connection. | |
870 | */ | |
871 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { | |
872 | err = -EINVAL; | |
873 | goto err; | |
874 | } | |
875 | ||
876 | /* | |
877 | * copy the new data into our accumulation buffer. | |
878 | */ | |
879 | skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), | |
880 | skb->len); | |
881 | ep->mpa_pkt_len += skb->len; | |
882 | ||
883 | /* | |
884 | * if we don't even have the mpa message, then bail. | |
885 | */ | |
886 | if (ep->mpa_pkt_len < sizeof(*mpa)) | |
887 | return; | |
888 | mpa = (struct mpa_message *) ep->mpa_pkt; | |
889 | ||
890 | /* Validate MPA header. */ | |
891 | if (mpa->revision != mpa_rev) { | |
892 | err = -EPROTO; | |
893 | goto err; | |
894 | } | |
895 | if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { | |
896 | err = -EPROTO; | |
897 | goto err; | |
898 | } | |
899 | ||
900 | plen = ntohs(mpa->private_data_size); | |
901 | ||
902 | /* | |
903 | * Fail if there's too much private data. | |
904 | */ | |
905 | if (plen > MPA_MAX_PRIVATE_DATA) { | |
906 | err = -EPROTO; | |
907 | goto err; | |
908 | } | |
909 | ||
910 | /* | |
911 | * If plen does not account for pkt size | |
912 | */ | |
913 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | |
914 | err = -EPROTO; | |
915 | goto err; | |
916 | } | |
917 | ||
918 | ep->plen = (u8) plen; | |
919 | ||
920 | /* | |
921 | * If we don't have all the pdata yet, then bail. | |
922 | * We'll continue process when more data arrives. | |
923 | */ | |
924 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | |
925 | return; | |
926 | ||
927 | if (mpa->flags & MPA_REJECT) { | |
928 | err = -ECONNREFUSED; | |
929 | goto err; | |
930 | } | |
931 | ||
932 | /* | |
933 | * If we get here we have accumulated the entire mpa | |
934 | * start reply message including private data. And | |
935 | * the MPA header is valid. | |
936 | */ | |
937 | state_set(&ep->com, FPDU_MODE); | |
938 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | |
939 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | |
940 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | |
941 | ep->mpa_attr.version = mpa_rev; | |
942 | ep->mpa_attr.p2p_type = peer2peer ? p2p_type : | |
943 | FW_RI_INIT_P2PTYPE_DISABLED; | |
944 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " | |
945 | "xmit_marker_enabled=%d, version=%d\n", __func__, | |
946 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, | |
947 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); | |
948 | ||
949 | attrs.mpa_attr = ep->mpa_attr; | |
950 | attrs.max_ird = ep->ird; | |
951 | attrs.max_ord = ep->ord; | |
952 | attrs.llp_stream_handle = ep; | |
953 | attrs.next_state = C4IW_QP_STATE_RTS; | |
954 | ||
955 | mask = C4IW_QP_ATTR_NEXT_STATE | | |
956 | C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | | |
957 | C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; | |
958 | ||
959 | /* bind QP and TID with INIT_WR */ | |
960 | err = c4iw_modify_qp(ep->com.qp->rhp, | |
961 | ep->com.qp, mask, &attrs, 1); | |
962 | if (err) | |
963 | goto err; | |
964 | goto out; | |
965 | err: | |
b21ef16a SW |
966 | state_set(&ep->com, ABORTING); |
967 | send_abort(ep, skb, GFP_KERNEL); | |
cfdda9d7 SW |
968 | out: |
969 | connect_reply_upcall(ep, err); | |
970 | return; | |
971 | } | |
972 | ||
973 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |
974 | { | |
975 | struct mpa_message *mpa; | |
976 | u16 plen; | |
977 | ||
978 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
979 | ||
980 | if (state_read(&ep->com) != MPA_REQ_WAIT) | |
981 | return; | |
982 | ||
983 | /* | |
984 | * If we get more than the supported amount of private data | |
985 | * then we must fail this connection. | |
986 | */ | |
987 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { | |
988 | stop_ep_timer(ep); | |
989 | abort_connection(ep, skb, GFP_KERNEL); | |
990 | return; | |
991 | } | |
992 | ||
993 | PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); | |
994 | ||
995 | /* | |
996 | * Copy the new data into our accumulation buffer. | |
997 | */ | |
998 | skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), | |
999 | skb->len); | |
1000 | ep->mpa_pkt_len += skb->len; | |
1001 | ||
1002 | /* | |
1003 | * If we don't even have the mpa message, then bail. | |
1004 | * We'll continue process when more data arrives. | |
1005 | */ | |
1006 | if (ep->mpa_pkt_len < sizeof(*mpa)) | |
1007 | return; | |
1008 | ||
1009 | PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); | |
1010 | stop_ep_timer(ep); | |
1011 | mpa = (struct mpa_message *) ep->mpa_pkt; | |
1012 | ||
1013 | /* | |
1014 | * Validate MPA Header. | |
1015 | */ | |
1016 | if (mpa->revision != mpa_rev) { | |
1017 | abort_connection(ep, skb, GFP_KERNEL); | |
1018 | return; | |
1019 | } | |
1020 | ||
1021 | if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { | |
1022 | abort_connection(ep, skb, GFP_KERNEL); | |
1023 | return; | |
1024 | } | |
1025 | ||
1026 | plen = ntohs(mpa->private_data_size); | |
1027 | ||
1028 | /* | |
1029 | * Fail if there's too much private data. | |
1030 | */ | |
1031 | if (plen > MPA_MAX_PRIVATE_DATA) { | |
1032 | abort_connection(ep, skb, GFP_KERNEL); | |
1033 | return; | |
1034 | } | |
1035 | ||
1036 | /* | |
1037 | * If plen does not account for pkt size | |
1038 | */ | |
1039 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | |
1040 | abort_connection(ep, skb, GFP_KERNEL); | |
1041 | return; | |
1042 | } | |
1043 | ep->plen = (u8) plen; | |
1044 | ||
1045 | /* | |
1046 | * If we don't have all the pdata yet, then bail. | |
1047 | */ | |
1048 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | |
1049 | return; | |
1050 | ||
1051 | /* | |
1052 | * If we get here we have accumulated the entire mpa | |
1053 | * start reply message including private data. | |
1054 | */ | |
1055 | ep->mpa_attr.initiator = 0; | |
1056 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | |
1057 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | |
1058 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | |
1059 | ep->mpa_attr.version = mpa_rev; | |
1060 | ep->mpa_attr.p2p_type = peer2peer ? p2p_type : | |
1061 | FW_RI_INIT_P2PTYPE_DISABLED; | |
1062 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " | |
1063 | "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, | |
1064 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, | |
1065 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, | |
1066 | ep->mpa_attr.p2p_type); | |
1067 | ||
1068 | state_set(&ep->com, MPA_REQ_RCVD); | |
1069 | ||
1070 | /* drive upcall */ | |
1071 | connect_request_upcall(ep); | |
1072 | return; | |
1073 | } | |
1074 | ||
1075 | static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |
1076 | { | |
1077 | struct c4iw_ep *ep; | |
1078 | struct cpl_rx_data *hdr = cplhdr(skb); | |
1079 | unsigned int dlen = ntohs(hdr->len); | |
1080 | unsigned int tid = GET_TID(hdr); | |
1081 | struct tid_info *t = dev->rdev.lldi.tids; | |
1082 | ||
1083 | ep = lookup_tid(t, tid); | |
1084 | PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); | |
1085 | skb_pull(skb, sizeof(*hdr)); | |
1086 | skb_trim(skb, dlen); | |
1087 | ||
1088 | ep->rcv_seq += dlen; | |
1089 | BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); | |
1090 | ||
1091 | /* update RX credits */ | |
1092 | update_rx_credits(ep, dlen); | |
1093 | ||
1094 | switch (state_read(&ep->com)) { | |
1095 | case MPA_REQ_SENT: | |
1096 | process_mpa_reply(ep, skb); | |
1097 | break; | |
1098 | case MPA_REQ_WAIT: | |
1099 | process_mpa_request(ep, skb); | |
1100 | break; | |
1101 | case MPA_REP_SENT: | |
1102 | break; | |
1103 | default: | |
1104 | printk(KERN_ERR MOD "%s Unexpected streaming data." | |
1105 | " ep %p state %d tid %u\n", | |
1106 | __func__, ep, state_read(&ep->com), ep->hwtid); | |
1107 | ||
1108 | /* | |
1109 | * The ep will timeout and inform the ULP of the failure. | |
1110 | * See ep_timeout(). | |
1111 | */ | |
1112 | break; | |
1113 | } | |
1114 | return 0; | |
1115 | } | |
1116 | ||
1117 | static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
1118 | { | |
1119 | struct c4iw_ep *ep; | |
1120 | struct cpl_abort_rpl_rss *rpl = cplhdr(skb); | |
cfdda9d7 SW |
1121 | int release = 0; |
1122 | unsigned int tid = GET_TID(rpl); | |
1123 | struct tid_info *t = dev->rdev.lldi.tids; | |
1124 | ||
1125 | ep = lookup_tid(t, tid); | |
1126 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1127 | BUG_ON(!ep); | |
2f5b48c3 | 1128 | mutex_lock(&ep->com.mutex); |
cfdda9d7 SW |
1129 | switch (ep->com.state) { |
1130 | case ABORTING: | |
1131 | __state_set(&ep->com, DEAD); | |
1132 | release = 1; | |
1133 | break; | |
1134 | default: | |
1135 | printk(KERN_ERR "%s ep %p state %d\n", | |
1136 | __func__, ep, ep->com.state); | |
1137 | break; | |
1138 | } | |
2f5b48c3 | 1139 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
1140 | |
1141 | if (release) | |
1142 | release_ep_resources(ep); | |
1143 | return 0; | |
1144 | } | |
1145 | ||
1146 | /* | |
1147 | * Return whether a failed active open has allocated a TID | |
1148 | */ | |
1149 | static inline int act_open_has_tid(int status) | |
1150 | { | |
1151 | return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && | |
1152 | status != CPL_ERR_ARP_MISS; | |
1153 | } | |
1154 | ||
1155 | static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
1156 | { | |
1157 | struct c4iw_ep *ep; | |
1158 | struct cpl_act_open_rpl *rpl = cplhdr(skb); | |
1159 | unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( | |
1160 | ntohl(rpl->atid_status))); | |
1161 | struct tid_info *t = dev->rdev.lldi.tids; | |
1162 | int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); | |
1163 | ||
1164 | ep = lookup_atid(t, atid); | |
1165 | ||
1166 | PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, | |
1167 | status, status2errno(status)); | |
1168 | ||
1169 | if (status == CPL_ERR_RTX_NEG_ADVICE) { | |
1170 | printk(KERN_WARNING MOD "Connection problems for atid %u\n", | |
1171 | atid); | |
1172 | return 0; | |
1173 | } | |
1174 | ||
1175 | connect_reply_upcall(ep, status2errno(status)); | |
1176 | state_set(&ep->com, DEAD); | |
1177 | ||
1178 | if (status && act_open_has_tid(status)) | |
1179 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); | |
1180 | ||
1181 | cxgb4_free_atid(t, atid); | |
1182 | dst_release(ep->dst); | |
1183 | cxgb4_l2t_release(ep->l2t); | |
1184 | c4iw_put_ep(&ep->com); | |
1185 | ||
1186 | return 0; | |
1187 | } | |
1188 | ||
1189 | static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
1190 | { | |
1191 | struct cpl_pass_open_rpl *rpl = cplhdr(skb); | |
1192 | struct tid_info *t = dev->rdev.lldi.tids; | |
1193 | unsigned int stid = GET_TID(rpl); | |
1194 | struct c4iw_listen_ep *ep = lookup_stid(t, stid); | |
1195 | ||
1196 | if (!ep) { | |
1197 | printk(KERN_ERR MOD "stid %d lookup failure!\n", stid); | |
1198 | return 0; | |
1199 | } | |
1200 | PDBG("%s ep %p status %d error %d\n", __func__, ep, | |
1201 | rpl->status, status2errno(rpl->status)); | |
d9594d99 | 1202 | c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); |
cfdda9d7 SW |
1203 | |
1204 | return 0; | |
1205 | } | |
1206 | ||
1207 | static int listen_stop(struct c4iw_listen_ep *ep) | |
1208 | { | |
1209 | struct sk_buff *skb; | |
1210 | struct cpl_close_listsvr_req *req; | |
1211 | ||
1212 | PDBG("%s ep %p\n", __func__, ep); | |
1213 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | |
1214 | if (!skb) { | |
1215 | printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); | |
1216 | return -ENOMEM; | |
1217 | } | |
1218 | req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req)); | |
1219 | INIT_TP_WR(req, 0); | |
1220 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, | |
1221 | ep->stid)); | |
1222 | req->reply_ctrl = cpu_to_be16( | |
1223 | QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0])); | |
1224 | set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); | |
1225 | return c4iw_ofld_send(&ep->com.dev->rdev, skb); | |
1226 | } | |
1227 | ||
1228 | static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
1229 | { | |
1230 | struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); | |
1231 | struct tid_info *t = dev->rdev.lldi.tids; | |
1232 | unsigned int stid = GET_TID(rpl); | |
1233 | struct c4iw_listen_ep *ep = lookup_stid(t, stid); | |
1234 | ||
1235 | PDBG("%s ep %p\n", __func__, ep); | |
d9594d99 | 1236 | c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); |
cfdda9d7 SW |
1237 | return 0; |
1238 | } | |
1239 | ||
1240 | static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, | |
1241 | struct cpl_pass_accept_req *req) | |
1242 | { | |
1243 | struct cpl_pass_accept_rpl *rpl; | |
1244 | unsigned int mtu_idx; | |
1245 | u64 opt0; | |
1246 | u32 opt2; | |
1247 | int wscale; | |
1248 | ||
1249 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1250 | BUG_ON(skb_cloned(skb)); | |
1251 | skb_trim(skb, sizeof(*rpl)); | |
1252 | skb_get(skb); | |
1253 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | |
1254 | wscale = compute_wscale(rcv_win); | |
1255 | opt0 = KEEP_ALIVE(1) | | |
ba6d3925 | 1256 | DELACK(1) | |
cfdda9d7 SW |
1257 | WND_SCALE(wscale) | |
1258 | MSS_IDX(mtu_idx) | | |
1259 | L2T_IDX(ep->l2t->idx) | | |
1260 | TX_CHAN(ep->tx_chan) | | |
1261 | SMAC_SEL(ep->smac_idx) | | |
1262 | DSCP(ep->tos) | | |
b48f3b9c | 1263 | ULP_MODE(ULP_MODE_TCPDDP) | |
cfdda9d7 SW |
1264 | RCV_BUFSIZ(rcv_win>>10); |
1265 | opt2 = RX_CHANNEL(0) | | |
1266 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); | |
1267 | ||
1268 | if (enable_tcp_timestamps && req->tcpopt.tstamp) | |
1269 | opt2 |= TSTAMPS_EN(1); | |
1270 | if (enable_tcp_sack && req->tcpopt.sack) | |
1271 | opt2 |= SACK_EN(1); | |
1272 | if (wscale && enable_tcp_window_scaling) | |
1273 | opt2 |= WND_SCALE_EN(1); | |
1274 | ||
1275 | rpl = cplhdr(skb); | |
1276 | INIT_TP_WR(rpl, ep->hwtid); | |
1277 | OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, | |
1278 | ep->hwtid)); | |
1279 | rpl->opt0 = cpu_to_be64(opt0); | |
1280 | rpl->opt2 = cpu_to_be32(opt2); | |
d4f1a5c6 | 1281 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); |
cfdda9d7 SW |
1282 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
1283 | ||
1284 | return; | |
1285 | } | |
1286 | ||
1287 | static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip, | |
1288 | struct sk_buff *skb) | |
1289 | { | |
1290 | PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid, | |
1291 | peer_ip); | |
1292 | BUG_ON(skb_cloned(skb)); | |
1293 | skb_trim(skb, sizeof(struct cpl_tid_release)); | |
1294 | skb_get(skb); | |
1295 | release_tid(&dev->rdev, hwtid, skb); | |
1296 | return; | |
1297 | } | |
1298 | ||
1299 | static void get_4tuple(struct cpl_pass_accept_req *req, | |
1300 | __be32 *local_ip, __be32 *peer_ip, | |
1301 | __be16 *local_port, __be16 *peer_port) | |
1302 | { | |
1303 | int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); | |
1304 | int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); | |
1305 | struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); | |
1306 | struct tcphdr *tcp = (struct tcphdr *) | |
1307 | ((u8 *)(req + 1) + eth_len + ip_len); | |
1308 | ||
1309 | PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, | |
1310 | ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), | |
1311 | ntohs(tcp->dest)); | |
1312 | ||
1313 | *peer_ip = ip->saddr; | |
1314 | *local_ip = ip->daddr; | |
1315 | *peer_port = tcp->source; | |
1316 | *local_port = tcp->dest; | |
1317 | ||
1318 | return; | |
1319 | } | |
1320 | ||
1321 | static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |
1322 | { | |
1323 | struct c4iw_ep *child_ep, *parent_ep; | |
1324 | struct cpl_pass_accept_req *req = cplhdr(skb); | |
1325 | unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); | |
1326 | struct tid_info *t = dev->rdev.lldi.tids; | |
1327 | unsigned int hwtid = GET_TID(req); | |
69cce1d1 | 1328 | struct neighbour *neigh; |
cfdda9d7 SW |
1329 | struct dst_entry *dst; |
1330 | struct l2t_entry *l2t; | |
1331 | struct rtable *rt; | |
1332 | __be32 local_ip, peer_ip; | |
1333 | __be16 local_port, peer_port; | |
1334 | struct net_device *pdev; | |
1335 | u32 tx_chan, smac_idx; | |
1336 | u16 rss_qid; | |
1337 | u32 mtu; | |
1338 | int step; | |
d4f1a5c6 | 1339 | int txq_idx, ctrlq_idx; |
cfdda9d7 SW |
1340 | |
1341 | parent_ep = lookup_stid(t, stid); | |
1342 | PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); | |
1343 | ||
1344 | get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); | |
1345 | ||
1346 | if (state_read(&parent_ep->com) != LISTEN) { | |
1347 | printk(KERN_ERR "%s - listening ep not in LISTEN\n", | |
1348 | __func__); | |
1349 | goto reject; | |
1350 | } | |
1351 | ||
1352 | /* Find output route */ | |
1353 | rt = find_route(dev, local_ip, peer_ip, local_port, peer_port, | |
1354 | GET_POPEN_TOS(ntohl(req->tos_stid))); | |
1355 | if (!rt) { | |
1356 | printk(KERN_ERR MOD "%s - failed to find dst entry!\n", | |
1357 | __func__); | |
1358 | goto reject; | |
1359 | } | |
d8d1f30b | 1360 | dst = &rt->dst; |
69cce1d1 DM |
1361 | neigh = dst_get_neighbour(dst); |
1362 | if (neigh->dev->flags & IFF_LOOPBACK) { | |
cfdda9d7 SW |
1363 | pdev = ip_dev_find(&init_net, peer_ip); |
1364 | BUG_ON(!pdev); | |
69cce1d1 | 1365 | l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0); |
cfdda9d7 SW |
1366 | mtu = pdev->mtu; |
1367 | tx_chan = cxgb4_port_chan(pdev); | |
2c5934bf | 1368 | smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; |
cfdda9d7 SW |
1369 | step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; |
1370 | txq_idx = cxgb4_port_idx(pdev) * step; | |
d4f1a5c6 | 1371 | ctrlq_idx = cxgb4_port_idx(pdev); |
cfdda9d7 SW |
1372 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; |
1373 | rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step]; | |
1374 | dev_put(pdev); | |
1375 | } else { | |
69cce1d1 | 1376 | l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, neigh->dev, 0); |
cfdda9d7 | 1377 | mtu = dst_mtu(dst); |
69cce1d1 DM |
1378 | tx_chan = cxgb4_port_chan(neigh->dev); |
1379 | smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1; | |
cfdda9d7 | 1380 | step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; |
69cce1d1 DM |
1381 | txq_idx = cxgb4_port_idx(neigh->dev) * step; |
1382 | ctrlq_idx = cxgb4_port_idx(neigh->dev); | |
cfdda9d7 SW |
1383 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; |
1384 | rss_qid = dev->rdev.lldi.rxq_ids[ | |
69cce1d1 | 1385 | cxgb4_port_idx(neigh->dev) * step]; |
cfdda9d7 SW |
1386 | } |
1387 | if (!l2t) { | |
1388 | printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", | |
1389 | __func__); | |
1390 | dst_release(dst); | |
1391 | goto reject; | |
1392 | } | |
1393 | ||
1394 | child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); | |
1395 | if (!child_ep) { | |
1396 | printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", | |
1397 | __func__); | |
1398 | cxgb4_l2t_release(l2t); | |
1399 | dst_release(dst); | |
1400 | goto reject; | |
1401 | } | |
1402 | state_set(&child_ep->com, CONNECTING); | |
1403 | child_ep->com.dev = dev; | |
1404 | child_ep->com.cm_id = NULL; | |
1405 | child_ep->com.local_addr.sin_family = PF_INET; | |
1406 | child_ep->com.local_addr.sin_port = local_port; | |
1407 | child_ep->com.local_addr.sin_addr.s_addr = local_ip; | |
1408 | child_ep->com.remote_addr.sin_family = PF_INET; | |
1409 | child_ep->com.remote_addr.sin_port = peer_port; | |
1410 | child_ep->com.remote_addr.sin_addr.s_addr = peer_ip; | |
1411 | c4iw_get_ep(&parent_ep->com); | |
1412 | child_ep->parent_ep = parent_ep; | |
1413 | child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); | |
1414 | child_ep->l2t = l2t; | |
1415 | child_ep->dst = dst; | |
1416 | child_ep->hwtid = hwtid; | |
1417 | child_ep->tx_chan = tx_chan; | |
1418 | child_ep->smac_idx = smac_idx; | |
1419 | child_ep->rss_qid = rss_qid; | |
1420 | child_ep->mtu = mtu; | |
1421 | child_ep->txq_idx = txq_idx; | |
d4f1a5c6 | 1422 | child_ep->ctrlq_idx = ctrlq_idx; |
cfdda9d7 SW |
1423 | |
1424 | PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, | |
1425 | tx_chan, smac_idx, rss_qid); | |
1426 | ||
1427 | init_timer(&child_ep->timer); | |
1428 | cxgb4_insert_tid(t, child_ep, hwtid); | |
1429 | accept_cr(child_ep, peer_ip, skb, req); | |
1430 | goto out; | |
1431 | reject: | |
1432 | reject_cr(dev, hwtid, peer_ip, skb); | |
1433 | out: | |
1434 | return 0; | |
1435 | } | |
1436 | ||
1437 | static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) | |
1438 | { | |
1439 | struct c4iw_ep *ep; | |
1440 | struct cpl_pass_establish *req = cplhdr(skb); | |
1441 | struct tid_info *t = dev->rdev.lldi.tids; | |
1442 | unsigned int tid = GET_TID(req); | |
1443 | ||
1444 | ep = lookup_tid(t, tid); | |
1445 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1446 | ep->snd_seq = be32_to_cpu(req->snd_isn); | |
1447 | ep->rcv_seq = be32_to_cpu(req->rcv_isn); | |
1448 | ||
1449 | set_emss(ep, ntohs(req->tcp_opt)); | |
1450 | ||
1451 | dst_confirm(ep->dst); | |
1452 | state_set(&ep->com, MPA_REQ_WAIT); | |
1453 | start_ep_timer(ep); | |
1454 | send_flowc(ep, skb); | |
1455 | ||
1456 | return 0; | |
1457 | } | |
1458 | ||
1459 | static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |
1460 | { | |
1461 | struct cpl_peer_close *hdr = cplhdr(skb); | |
1462 | struct c4iw_ep *ep; | |
1463 | struct c4iw_qp_attributes attrs; | |
cfdda9d7 SW |
1464 | int disconnect = 1; |
1465 | int release = 0; | |
cfdda9d7 SW |
1466 | struct tid_info *t = dev->rdev.lldi.tids; |
1467 | unsigned int tid = GET_TID(hdr); | |
8da7e7a5 | 1468 | int ret; |
cfdda9d7 SW |
1469 | |
1470 | ep = lookup_tid(t, tid); | |
1471 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1472 | dst_confirm(ep->dst); | |
1473 | ||
2f5b48c3 | 1474 | mutex_lock(&ep->com.mutex); |
cfdda9d7 SW |
1475 | switch (ep->com.state) { |
1476 | case MPA_REQ_WAIT: | |
1477 | __state_set(&ep->com, CLOSING); | |
1478 | break; | |
1479 | case MPA_REQ_SENT: | |
1480 | __state_set(&ep->com, CLOSING); | |
1481 | connect_reply_upcall(ep, -ECONNRESET); | |
1482 | break; | |
1483 | case MPA_REQ_RCVD: | |
1484 | ||
1485 | /* | |
1486 | * We're gonna mark this puppy DEAD, but keep | |
1487 | * the reference on it until the ULP accepts or | |
1488 | * rejects the CR. Also wake up anyone waiting | |
1489 | * in rdma connection migration (see c4iw_accept_cr()). | |
1490 | */ | |
1491 | __state_set(&ep->com, CLOSING); | |
cfdda9d7 | 1492 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); |
d9594d99 | 1493 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
cfdda9d7 SW |
1494 | break; |
1495 | case MPA_REP_SENT: | |
1496 | __state_set(&ep->com, CLOSING); | |
cfdda9d7 | 1497 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); |
d9594d99 | 1498 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
cfdda9d7 SW |
1499 | break; |
1500 | case FPDU_MODE: | |
ca5a2202 | 1501 | start_ep_timer(ep); |
cfdda9d7 | 1502 | __state_set(&ep->com, CLOSING); |
30c95c2d | 1503 | attrs.next_state = C4IW_QP_STATE_CLOSING; |
8da7e7a5 | 1504 | ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
30c95c2d | 1505 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
8da7e7a5 SW |
1506 | if (ret != -ECONNRESET) { |
1507 | peer_close_upcall(ep); | |
1508 | disconnect = 1; | |
1509 | } | |
cfdda9d7 SW |
1510 | break; |
1511 | case ABORTING: | |
1512 | disconnect = 0; | |
1513 | break; | |
1514 | case CLOSING: | |
1515 | __state_set(&ep->com, MORIBUND); | |
1516 | disconnect = 0; | |
1517 | break; | |
1518 | case MORIBUND: | |
ca5a2202 | 1519 | stop_ep_timer(ep); |
cfdda9d7 SW |
1520 | if (ep->com.cm_id && ep->com.qp) { |
1521 | attrs.next_state = C4IW_QP_STATE_IDLE; | |
1522 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | |
1523 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | |
1524 | } | |
1525 | close_complete_upcall(ep); | |
1526 | __state_set(&ep->com, DEAD); | |
1527 | release = 1; | |
1528 | disconnect = 0; | |
1529 | break; | |
1530 | case DEAD: | |
1531 | disconnect = 0; | |
1532 | break; | |
1533 | default: | |
1534 | BUG_ON(1); | |
1535 | } | |
2f5b48c3 | 1536 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
1537 | if (disconnect) |
1538 | c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | |
1539 | if (release) | |
1540 | release_ep_resources(ep); | |
1541 | return 0; | |
1542 | } | |
1543 | ||
1544 | /* | |
1545 | * Returns whether an ABORT_REQ_RSS message is a negative advice. | |
1546 | */ | |
1547 | static int is_neg_adv_abort(unsigned int status) | |
1548 | { | |
1549 | return status == CPL_ERR_RTX_NEG_ADVICE || | |
1550 | status == CPL_ERR_PERSIST_NEG_ADVICE; | |
1551 | } | |
1552 | ||
1553 | static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |
1554 | { | |
1555 | struct cpl_abort_req_rss *req = cplhdr(skb); | |
1556 | struct c4iw_ep *ep; | |
1557 | struct cpl_abort_rpl *rpl; | |
1558 | struct sk_buff *rpl_skb; | |
1559 | struct c4iw_qp_attributes attrs; | |
1560 | int ret; | |
1561 | int release = 0; | |
cfdda9d7 SW |
1562 | struct tid_info *t = dev->rdev.lldi.tids; |
1563 | unsigned int tid = GET_TID(req); | |
cfdda9d7 SW |
1564 | |
1565 | ep = lookup_tid(t, tid); | |
1566 | if (is_neg_adv_abort(req->status)) { | |
1567 | PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, | |
1568 | ep->hwtid); | |
1569 | return 0; | |
1570 | } | |
cfdda9d7 SW |
1571 | PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, |
1572 | ep->com.state); | |
2f5b48c3 SW |
1573 | |
1574 | /* | |
1575 | * Wake up any threads in rdma_init() or rdma_fini(). | |
1576 | */ | |
d9594d99 | 1577 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
2f5b48c3 SW |
1578 | |
1579 | mutex_lock(&ep->com.mutex); | |
cfdda9d7 SW |
1580 | switch (ep->com.state) { |
1581 | case CONNECTING: | |
1582 | break; | |
1583 | case MPA_REQ_WAIT: | |
ca5a2202 | 1584 | stop_ep_timer(ep); |
cfdda9d7 SW |
1585 | break; |
1586 | case MPA_REQ_SENT: | |
ca5a2202 | 1587 | stop_ep_timer(ep); |
cfdda9d7 SW |
1588 | connect_reply_upcall(ep, -ECONNRESET); |
1589 | break; | |
1590 | case MPA_REP_SENT: | |
cfdda9d7 SW |
1591 | break; |
1592 | case MPA_REQ_RCVD: | |
cfdda9d7 SW |
1593 | break; |
1594 | case MORIBUND: | |
1595 | case CLOSING: | |
ca5a2202 | 1596 | stop_ep_timer(ep); |
cfdda9d7 SW |
1597 | /*FALLTHROUGH*/ |
1598 | case FPDU_MODE: | |
1599 | if (ep->com.cm_id && ep->com.qp) { | |
1600 | attrs.next_state = C4IW_QP_STATE_ERROR; | |
1601 | ret = c4iw_modify_qp(ep->com.qp->rhp, | |
1602 | ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, | |
1603 | &attrs, 1); | |
1604 | if (ret) | |
1605 | printk(KERN_ERR MOD | |
1606 | "%s - qp <- error failed!\n", | |
1607 | __func__); | |
1608 | } | |
1609 | peer_abort_upcall(ep); | |
1610 | break; | |
1611 | case ABORTING: | |
1612 | break; | |
1613 | case DEAD: | |
1614 | PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); | |
2f5b48c3 | 1615 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
1616 | return 0; |
1617 | default: | |
1618 | BUG_ON(1); | |
1619 | break; | |
1620 | } | |
1621 | dst_confirm(ep->dst); | |
1622 | if (ep->com.state != ABORTING) { | |
1623 | __state_set(&ep->com, DEAD); | |
1624 | release = 1; | |
1625 | } | |
2f5b48c3 | 1626 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
1627 | |
1628 | rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); | |
1629 | if (!rpl_skb) { | |
1630 | printk(KERN_ERR MOD "%s - cannot allocate skb!\n", | |
1631 | __func__); | |
1632 | release = 1; | |
1633 | goto out; | |
1634 | } | |
1635 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
1636 | rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); | |
1637 | INIT_TP_WR(rpl, ep->hwtid); | |
1638 | OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); | |
1639 | rpl->cmd = CPL_ABORT_NO_RST; | |
1640 | c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); | |
1641 | out: | |
cfdda9d7 SW |
1642 | if (release) |
1643 | release_ep_resources(ep); | |
1644 | return 0; | |
1645 | } | |
1646 | ||
1647 | static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
1648 | { | |
1649 | struct c4iw_ep *ep; | |
1650 | struct c4iw_qp_attributes attrs; | |
1651 | struct cpl_close_con_rpl *rpl = cplhdr(skb); | |
cfdda9d7 SW |
1652 | int release = 0; |
1653 | struct tid_info *t = dev->rdev.lldi.tids; | |
1654 | unsigned int tid = GET_TID(rpl); | |
cfdda9d7 SW |
1655 | |
1656 | ep = lookup_tid(t, tid); | |
1657 | ||
1658 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1659 | BUG_ON(!ep); | |
1660 | ||
1661 | /* The cm_id may be null if we failed to connect */ | |
2f5b48c3 | 1662 | mutex_lock(&ep->com.mutex); |
cfdda9d7 SW |
1663 | switch (ep->com.state) { |
1664 | case CLOSING: | |
1665 | __state_set(&ep->com, MORIBUND); | |
1666 | break; | |
1667 | case MORIBUND: | |
ca5a2202 | 1668 | stop_ep_timer(ep); |
cfdda9d7 SW |
1669 | if ((ep->com.cm_id) && (ep->com.qp)) { |
1670 | attrs.next_state = C4IW_QP_STATE_IDLE; | |
1671 | c4iw_modify_qp(ep->com.qp->rhp, | |
1672 | ep->com.qp, | |
1673 | C4IW_QP_ATTR_NEXT_STATE, | |
1674 | &attrs, 1); | |
1675 | } | |
1676 | close_complete_upcall(ep); | |
1677 | __state_set(&ep->com, DEAD); | |
1678 | release = 1; | |
1679 | break; | |
1680 | case ABORTING: | |
1681 | case DEAD: | |
1682 | break; | |
1683 | default: | |
1684 | BUG_ON(1); | |
1685 | break; | |
1686 | } | |
2f5b48c3 | 1687 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
1688 | if (release) |
1689 | release_ep_resources(ep); | |
1690 | return 0; | |
1691 | } | |
1692 | ||
1693 | static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) | |
1694 | { | |
0e42c1f4 | 1695 | struct cpl_rdma_terminate *rpl = cplhdr(skb); |
cfdda9d7 | 1696 | struct tid_info *t = dev->rdev.lldi.tids; |
0e42c1f4 SW |
1697 | unsigned int tid = GET_TID(rpl); |
1698 | struct c4iw_ep *ep; | |
1699 | struct c4iw_qp_attributes attrs; | |
cfdda9d7 SW |
1700 | |
1701 | ep = lookup_tid(t, tid); | |
0e42c1f4 | 1702 | BUG_ON(!ep); |
cfdda9d7 | 1703 | |
30c95c2d | 1704 | if (ep && ep->com.qp) { |
0e42c1f4 SW |
1705 | printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, |
1706 | ep->com.qp->wq.sq.qid); | |
1707 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | |
1708 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | |
1709 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | |
1710 | } else | |
30c95c2d | 1711 | printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); |
cfdda9d7 | 1712 | |
cfdda9d7 SW |
1713 | return 0; |
1714 | } | |
1715 | ||
1716 | /* | |
1717 | * Upcall from the adapter indicating data has been transmitted. | |
1718 | * For us its just the single MPA request or reply. We can now free | |
1719 | * the skb holding the mpa message. | |
1720 | */ | |
1721 | static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) | |
1722 | { | |
1723 | struct c4iw_ep *ep; | |
1724 | struct cpl_fw4_ack *hdr = cplhdr(skb); | |
1725 | u8 credits = hdr->credits; | |
1726 | unsigned int tid = GET_TID(hdr); | |
1727 | struct tid_info *t = dev->rdev.lldi.tids; | |
1728 | ||
1729 | ||
1730 | ep = lookup_tid(t, tid); | |
1731 | PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); | |
1732 | if (credits == 0) { | |
aa1ad260 JP |
1733 | PDBG("%s 0 credit ack ep %p tid %u state %u\n", |
1734 | __func__, ep, ep->hwtid, state_read(&ep->com)); | |
cfdda9d7 SW |
1735 | return 0; |
1736 | } | |
1737 | ||
1738 | dst_confirm(ep->dst); | |
1739 | if (ep->mpa_skb) { | |
1740 | PDBG("%s last streaming msg ack ep %p tid %u state %u " | |
1741 | "initiator %u freeing skb\n", __func__, ep, ep->hwtid, | |
1742 | state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); | |
1743 | kfree_skb(ep->mpa_skb); | |
1744 | ep->mpa_skb = NULL; | |
1745 | } | |
1746 | return 0; | |
1747 | } | |
1748 | ||
cfdda9d7 SW |
1749 | int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) |
1750 | { | |
1751 | int err; | |
1752 | struct c4iw_ep *ep = to_ep(cm_id); | |
1753 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1754 | ||
1755 | if (state_read(&ep->com) == DEAD) { | |
1756 | c4iw_put_ep(&ep->com); | |
1757 | return -ECONNRESET; | |
1758 | } | |
1759 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | |
1760 | if (mpa_rev == 0) | |
1761 | abort_connection(ep, NULL, GFP_KERNEL); | |
1762 | else { | |
1763 | err = send_mpa_reject(ep, pdata, pdata_len); | |
1764 | err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | |
1765 | } | |
1766 | c4iw_put_ep(&ep->com); | |
1767 | return 0; | |
1768 | } | |
1769 | ||
1770 | int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |
1771 | { | |
1772 | int err; | |
1773 | struct c4iw_qp_attributes attrs; | |
1774 | enum c4iw_qp_attr_mask mask; | |
1775 | struct c4iw_ep *ep = to_ep(cm_id); | |
1776 | struct c4iw_dev *h = to_c4iw_dev(cm_id->device); | |
1777 | struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); | |
1778 | ||
1779 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1780 | if (state_read(&ep->com) == DEAD) { | |
1781 | err = -ECONNRESET; | |
1782 | goto err; | |
1783 | } | |
1784 | ||
1785 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | |
1786 | BUG_ON(!qp); | |
1787 | ||
be4c9bad RD |
1788 | if ((conn_param->ord > c4iw_max_read_depth) || |
1789 | (conn_param->ird > c4iw_max_read_depth)) { | |
cfdda9d7 SW |
1790 | abort_connection(ep, NULL, GFP_KERNEL); |
1791 | err = -EINVAL; | |
1792 | goto err; | |
1793 | } | |
1794 | ||
1795 | cm_id->add_ref(cm_id); | |
1796 | ep->com.cm_id = cm_id; | |
1797 | ep->com.qp = qp; | |
1798 | ||
1799 | ep->ird = conn_param->ird; | |
1800 | ep->ord = conn_param->ord; | |
1801 | ||
1802 | if (peer2peer && ep->ird == 0) | |
1803 | ep->ird = 1; | |
1804 | ||
1805 | PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); | |
1806 | ||
1807 | /* bind QP to EP and move to RTS */ | |
1808 | attrs.mpa_attr = ep->mpa_attr; | |
1809 | attrs.max_ird = ep->ird; | |
1810 | attrs.max_ord = ep->ord; | |
1811 | attrs.llp_stream_handle = ep; | |
1812 | attrs.next_state = C4IW_QP_STATE_RTS; | |
1813 | ||
1814 | /* bind QP and TID with INIT_WR */ | |
1815 | mask = C4IW_QP_ATTR_NEXT_STATE | | |
1816 | C4IW_QP_ATTR_LLP_STREAM_HANDLE | | |
1817 | C4IW_QP_ATTR_MPA_ATTR | | |
1818 | C4IW_QP_ATTR_MAX_IRD | | |
1819 | C4IW_QP_ATTR_MAX_ORD; | |
1820 | ||
1821 | err = c4iw_modify_qp(ep->com.qp->rhp, | |
1822 | ep->com.qp, mask, &attrs, 1); | |
1823 | if (err) | |
1824 | goto err1; | |
1825 | err = send_mpa_reply(ep, conn_param->private_data, | |
1826 | conn_param->private_data_len); | |
1827 | if (err) | |
1828 | goto err1; | |
1829 | ||
1830 | state_set(&ep->com, FPDU_MODE); | |
1831 | established_upcall(ep); | |
1832 | c4iw_put_ep(&ep->com); | |
1833 | return 0; | |
1834 | err1: | |
1835 | ep->com.cm_id = NULL; | |
1836 | ep->com.qp = NULL; | |
1837 | cm_id->rem_ref(cm_id); | |
1838 | err: | |
1839 | c4iw_put_ep(&ep->com); | |
1840 | return err; | |
1841 | } | |
1842 | ||
1843 | int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |
1844 | { | |
1845 | int err = 0; | |
1846 | struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); | |
1847 | struct c4iw_ep *ep; | |
1848 | struct rtable *rt; | |
1849 | struct net_device *pdev; | |
69cce1d1 | 1850 | struct neighbour *neigh; |
cfdda9d7 SW |
1851 | int step; |
1852 | ||
be4c9bad RD |
1853 | if ((conn_param->ord > c4iw_max_read_depth) || |
1854 | (conn_param->ird > c4iw_max_read_depth)) { | |
1855 | err = -EINVAL; | |
1856 | goto out; | |
1857 | } | |
cfdda9d7 SW |
1858 | ep = alloc_ep(sizeof(*ep), GFP_KERNEL); |
1859 | if (!ep) { | |
1860 | printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); | |
1861 | err = -ENOMEM; | |
1862 | goto out; | |
1863 | } | |
1864 | init_timer(&ep->timer); | |
1865 | ep->plen = conn_param->private_data_len; | |
1866 | if (ep->plen) | |
1867 | memcpy(ep->mpa_pkt + sizeof(struct mpa_message), | |
1868 | conn_param->private_data, ep->plen); | |
1869 | ep->ird = conn_param->ird; | |
1870 | ep->ord = conn_param->ord; | |
1871 | ||
1872 | if (peer2peer && ep->ord == 0) | |
1873 | ep->ord = 1; | |
1874 | ||
1875 | cm_id->add_ref(cm_id); | |
1876 | ep->com.dev = dev; | |
1877 | ep->com.cm_id = cm_id; | |
1878 | ep->com.qp = get_qhp(dev, conn_param->qpn); | |
1879 | BUG_ON(!ep->com.qp); | |
1880 | PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, | |
1881 | ep->com.qp, cm_id); | |
1882 | ||
1883 | /* | |
1884 | * Allocate an active TID to initiate a TCP connection. | |
1885 | */ | |
1886 | ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); | |
1887 | if (ep->atid == -1) { | |
1888 | printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); | |
1889 | err = -ENOMEM; | |
1890 | goto fail2; | |
1891 | } | |
1892 | ||
1893 | PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, | |
1894 | ntohl(cm_id->local_addr.sin_addr.s_addr), | |
1895 | ntohs(cm_id->local_addr.sin_port), | |
1896 | ntohl(cm_id->remote_addr.sin_addr.s_addr), | |
1897 | ntohs(cm_id->remote_addr.sin_port)); | |
1898 | ||
1899 | /* find a route */ | |
1900 | rt = find_route(dev, | |
1901 | cm_id->local_addr.sin_addr.s_addr, | |
1902 | cm_id->remote_addr.sin_addr.s_addr, | |
1903 | cm_id->local_addr.sin_port, | |
1904 | cm_id->remote_addr.sin_port, 0); | |
1905 | if (!rt) { | |
1906 | printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); | |
1907 | err = -EHOSTUNREACH; | |
1908 | goto fail3; | |
1909 | } | |
d8d1f30b | 1910 | ep->dst = &rt->dst; |
cfdda9d7 | 1911 | |
69cce1d1 DM |
1912 | neigh = dst_get_neighbour(ep->dst); |
1913 | ||
cfdda9d7 | 1914 | /* get a l2t entry */ |
69cce1d1 | 1915 | if (neigh->dev->flags & IFF_LOOPBACK) { |
cfdda9d7 SW |
1916 | PDBG("%s LOOPBACK\n", __func__); |
1917 | pdev = ip_dev_find(&init_net, | |
1918 | cm_id->remote_addr.sin_addr.s_addr); | |
1919 | ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, | |
69cce1d1 | 1920 | neigh, pdev, 0); |
cfdda9d7 SW |
1921 | ep->mtu = pdev->mtu; |
1922 | ep->tx_chan = cxgb4_port_chan(pdev); | |
2c5934bf | 1923 | ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; |
cfdda9d7 SW |
1924 | step = ep->com.dev->rdev.lldi.ntxq / |
1925 | ep->com.dev->rdev.lldi.nchan; | |
1926 | ep->txq_idx = cxgb4_port_idx(pdev) * step; | |
1927 | step = ep->com.dev->rdev.lldi.nrxq / | |
1928 | ep->com.dev->rdev.lldi.nchan; | |
d4f1a5c6 | 1929 | ep->ctrlq_idx = cxgb4_port_idx(pdev); |
cfdda9d7 SW |
1930 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ |
1931 | cxgb4_port_idx(pdev) * step]; | |
1932 | dev_put(pdev); | |
1933 | } else { | |
1934 | ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, | |
69cce1d1 | 1935 | neigh, neigh->dev, 0); |
cfdda9d7 | 1936 | ep->mtu = dst_mtu(ep->dst); |
69cce1d1 DM |
1937 | ep->tx_chan = cxgb4_port_chan(neigh->dev); |
1938 | ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1; | |
cfdda9d7 SW |
1939 | step = ep->com.dev->rdev.lldi.ntxq / |
1940 | ep->com.dev->rdev.lldi.nchan; | |
69cce1d1 DM |
1941 | ep->txq_idx = cxgb4_port_idx(neigh->dev) * step; |
1942 | ep->ctrlq_idx = cxgb4_port_idx(neigh->dev); | |
cfdda9d7 SW |
1943 | step = ep->com.dev->rdev.lldi.nrxq / |
1944 | ep->com.dev->rdev.lldi.nchan; | |
1945 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ | |
69cce1d1 | 1946 | cxgb4_port_idx(neigh->dev) * step]; |
cfdda9d7 SW |
1947 | } |
1948 | if (!ep->l2t) { | |
1949 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); | |
1950 | err = -ENOMEM; | |
1951 | goto fail4; | |
1952 | } | |
1953 | ||
1954 | PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", | |
1955 | __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, | |
1956 | ep->l2t->idx); | |
1957 | ||
1958 | state_set(&ep->com, CONNECTING); | |
1959 | ep->tos = 0; | |
1960 | ep->com.local_addr = cm_id->local_addr; | |
1961 | ep->com.remote_addr = cm_id->remote_addr; | |
1962 | ||
1963 | /* send connect request to rnic */ | |
1964 | err = send_connect(ep); | |
1965 | if (!err) | |
1966 | goto out; | |
1967 | ||
1968 | cxgb4_l2t_release(ep->l2t); | |
1969 | fail4: | |
1970 | dst_release(ep->dst); | |
1971 | fail3: | |
1972 | cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); | |
1973 | fail2: | |
1974 | cm_id->rem_ref(cm_id); | |
1975 | c4iw_put_ep(&ep->com); | |
1976 | out: | |
1977 | return err; | |
1978 | } | |
1979 | ||
1980 | int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |
1981 | { | |
1982 | int err = 0; | |
1983 | struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); | |
1984 | struct c4iw_listen_ep *ep; | |
1985 | ||
1986 | ||
1987 | might_sleep(); | |
1988 | ||
1989 | ep = alloc_ep(sizeof(*ep), GFP_KERNEL); | |
1990 | if (!ep) { | |
1991 | printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); | |
1992 | err = -ENOMEM; | |
1993 | goto fail1; | |
1994 | } | |
1995 | PDBG("%s ep %p\n", __func__, ep); | |
1996 | cm_id->add_ref(cm_id); | |
1997 | ep->com.cm_id = cm_id; | |
1998 | ep->com.dev = dev; | |
1999 | ep->backlog = backlog; | |
2000 | ep->com.local_addr = cm_id->local_addr; | |
2001 | ||
2002 | /* | |
2003 | * Allocate a server TID. | |
2004 | */ | |
2005 | ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); | |
2006 | if (ep->stid == -1) { | |
be4c9bad | 2007 | printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); |
cfdda9d7 SW |
2008 | err = -ENOMEM; |
2009 | goto fail2; | |
2010 | } | |
2011 | ||
2012 | state_set(&ep->com, LISTEN); | |
aadc4df3 | 2013 | c4iw_init_wr_wait(&ep->com.wr_wait); |
cfdda9d7 SW |
2014 | err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, |
2015 | ep->com.local_addr.sin_addr.s_addr, | |
2016 | ep->com.local_addr.sin_port, | |
2017 | ep->com.dev->rdev.lldi.rxq_ids[0]); | |
2018 | if (err) | |
2019 | goto fail3; | |
2020 | ||
2021 | /* wait for pass_open_rpl */ | |
aadc4df3 SW |
2022 | err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, |
2023 | __func__); | |
cfdda9d7 SW |
2024 | if (!err) { |
2025 | cm_id->provider_data = ep; | |
2026 | goto out; | |
2027 | } | |
2028 | fail3: | |
2029 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); | |
2030 | fail2: | |
2031 | cm_id->rem_ref(cm_id); | |
2032 | c4iw_put_ep(&ep->com); | |
2033 | fail1: | |
2034 | out: | |
2035 | return err; | |
2036 | } | |
2037 | ||
2038 | int c4iw_destroy_listen(struct iw_cm_id *cm_id) | |
2039 | { | |
2040 | int err; | |
2041 | struct c4iw_listen_ep *ep = to_listen_ep(cm_id); | |
2042 | ||
2043 | PDBG("%s ep %p\n", __func__, ep); | |
2044 | ||
2045 | might_sleep(); | |
2046 | state_set(&ep->com, DEAD); | |
aadc4df3 | 2047 | c4iw_init_wr_wait(&ep->com.wr_wait); |
cfdda9d7 SW |
2048 | err = listen_stop(ep); |
2049 | if (err) | |
2050 | goto done; | |
aadc4df3 SW |
2051 | err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, |
2052 | __func__); | |
cfdda9d7 SW |
2053 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); |
2054 | done: | |
cfdda9d7 SW |
2055 | cm_id->rem_ref(cm_id); |
2056 | c4iw_put_ep(&ep->com); | |
2057 | return err; | |
2058 | } | |
2059 | ||
2060 | int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | |
2061 | { | |
2062 | int ret = 0; | |
cfdda9d7 SW |
2063 | int close = 0; |
2064 | int fatal = 0; | |
2065 | struct c4iw_rdev *rdev; | |
cfdda9d7 | 2066 | |
2f5b48c3 | 2067 | mutex_lock(&ep->com.mutex); |
cfdda9d7 SW |
2068 | |
2069 | PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, | |
2070 | states[ep->com.state], abrupt); | |
2071 | ||
2072 | rdev = &ep->com.dev->rdev; | |
2073 | if (c4iw_fatal_error(rdev)) { | |
2074 | fatal = 1; | |
2075 | close_complete_upcall(ep); | |
2076 | ep->com.state = DEAD; | |
2077 | } | |
2078 | switch (ep->com.state) { | |
2079 | case MPA_REQ_WAIT: | |
2080 | case MPA_REQ_SENT: | |
2081 | case MPA_REQ_RCVD: | |
2082 | case MPA_REP_SENT: | |
2083 | case FPDU_MODE: | |
2084 | close = 1; | |
2085 | if (abrupt) | |
2086 | ep->com.state = ABORTING; | |
2087 | else { | |
2088 | ep->com.state = CLOSING; | |
ca5a2202 | 2089 | start_ep_timer(ep); |
cfdda9d7 SW |
2090 | } |
2091 | set_bit(CLOSE_SENT, &ep->com.flags); | |
2092 | break; | |
2093 | case CLOSING: | |
2094 | if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { | |
2095 | close = 1; | |
2096 | if (abrupt) { | |
ca5a2202 | 2097 | stop_ep_timer(ep); |
cfdda9d7 SW |
2098 | ep->com.state = ABORTING; |
2099 | } else | |
2100 | ep->com.state = MORIBUND; | |
2101 | } | |
2102 | break; | |
2103 | case MORIBUND: | |
2104 | case ABORTING: | |
2105 | case DEAD: | |
2106 | PDBG("%s ignoring disconnect ep %p state %u\n", | |
2107 | __func__, ep, ep->com.state); | |
2108 | break; | |
2109 | default: | |
2110 | BUG(); | |
2111 | break; | |
2112 | } | |
2113 | ||
cfdda9d7 | 2114 | if (close) { |
8da7e7a5 SW |
2115 | if (abrupt) { |
2116 | close_complete_upcall(ep); | |
2117 | ret = send_abort(ep, NULL, gfp); | |
2118 | } else | |
cfdda9d7 SW |
2119 | ret = send_halfclose(ep, gfp); |
2120 | if (ret) | |
2121 | fatal = 1; | |
2122 | } | |
8da7e7a5 | 2123 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
2124 | if (fatal) |
2125 | release_ep_resources(ep); | |
2126 | return ret; | |
2127 | } | |
2128 | ||
2f5b48c3 SW |
2129 | static int async_event(struct c4iw_dev *dev, struct sk_buff *skb) |
2130 | { | |
2131 | struct cpl_fw6_msg *rpl = cplhdr(skb); | |
2132 | c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); | |
2133 | return 0; | |
2134 | } | |
2135 | ||
be4c9bad RD |
2136 | /* |
2137 | * These are the real handlers that are called from a | |
2138 | * work queue. | |
2139 | */ | |
2140 | static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { | |
2141 | [CPL_ACT_ESTABLISH] = act_establish, | |
2142 | [CPL_ACT_OPEN_RPL] = act_open_rpl, | |
2143 | [CPL_RX_DATA] = rx_data, | |
2144 | [CPL_ABORT_RPL_RSS] = abort_rpl, | |
2145 | [CPL_ABORT_RPL] = abort_rpl, | |
2146 | [CPL_PASS_OPEN_RPL] = pass_open_rpl, | |
2147 | [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, | |
2148 | [CPL_PASS_ACCEPT_REQ] = pass_accept_req, | |
2149 | [CPL_PASS_ESTABLISH] = pass_establish, | |
2150 | [CPL_PEER_CLOSE] = peer_close, | |
2151 | [CPL_ABORT_REQ_RSS] = peer_abort, | |
2152 | [CPL_CLOSE_CON_RPL] = close_con_rpl, | |
2153 | [CPL_RDMA_TERMINATE] = terminate, | |
2f5b48c3 SW |
2154 | [CPL_FW4_ACK] = fw4_ack, |
2155 | [CPL_FW6_MSG] = async_event | |
be4c9bad RD |
2156 | }; |
2157 | ||
2158 | static void process_timeout(struct c4iw_ep *ep) | |
2159 | { | |
2160 | struct c4iw_qp_attributes attrs; | |
2161 | int abort = 1; | |
2162 | ||
2f5b48c3 | 2163 | mutex_lock(&ep->com.mutex); |
be4c9bad RD |
2164 | PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, |
2165 | ep->com.state); | |
2166 | switch (ep->com.state) { | |
2167 | case MPA_REQ_SENT: | |
2168 | __state_set(&ep->com, ABORTING); | |
2169 | connect_reply_upcall(ep, -ETIMEDOUT); | |
2170 | break; | |
2171 | case MPA_REQ_WAIT: | |
2172 | __state_set(&ep->com, ABORTING); | |
2173 | break; | |
2174 | case CLOSING: | |
2175 | case MORIBUND: | |
2176 | if (ep->com.cm_id && ep->com.qp) { | |
2177 | attrs.next_state = C4IW_QP_STATE_ERROR; | |
2178 | c4iw_modify_qp(ep->com.qp->rhp, | |
2179 | ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, | |
2180 | &attrs, 1); | |
2181 | } | |
2182 | __state_set(&ep->com, ABORTING); | |
2183 | break; | |
2184 | default: | |
2185 | printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n", | |
2186 | __func__, ep, ep->hwtid, ep->com.state); | |
2187 | WARN_ON(1); | |
2188 | abort = 0; | |
2189 | } | |
2f5b48c3 | 2190 | mutex_unlock(&ep->com.mutex); |
be4c9bad RD |
2191 | if (abort) |
2192 | abort_connection(ep, NULL, GFP_KERNEL); | |
2193 | c4iw_put_ep(&ep->com); | |
2194 | } | |
2195 | ||
2196 | static void process_timedout_eps(void) | |
2197 | { | |
2198 | struct c4iw_ep *ep; | |
2199 | ||
2200 | spin_lock_irq(&timeout_lock); | |
2201 | while (!list_empty(&timeout_list)) { | |
2202 | struct list_head *tmp; | |
2203 | ||
2204 | tmp = timeout_list.next; | |
2205 | list_del(tmp); | |
2206 | spin_unlock_irq(&timeout_lock); | |
2207 | ep = list_entry(tmp, struct c4iw_ep, entry); | |
2208 | process_timeout(ep); | |
2209 | spin_lock_irq(&timeout_lock); | |
2210 | } | |
2211 | spin_unlock_irq(&timeout_lock); | |
2212 | } | |
2213 | ||
2214 | static void process_work(struct work_struct *work) | |
2215 | { | |
2216 | struct sk_buff *skb = NULL; | |
2217 | struct c4iw_dev *dev; | |
c1d7356c | 2218 | struct cpl_act_establish *rpl; |
be4c9bad RD |
2219 | unsigned int opcode; |
2220 | int ret; | |
2221 | ||
2222 | while ((skb = skb_dequeue(&rxq))) { | |
2223 | rpl = cplhdr(skb); | |
2224 | dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); | |
2225 | opcode = rpl->ot.opcode; | |
2226 | ||
2227 | BUG_ON(!work_handlers[opcode]); | |
2228 | ret = work_handlers[opcode](dev, skb); | |
2229 | if (!ret) | |
2230 | kfree_skb(skb); | |
2231 | } | |
2232 | process_timedout_eps(); | |
2233 | } | |
2234 | ||
2235 | static DECLARE_WORK(skb_work, process_work); | |
2236 | ||
2237 | static void ep_timeout(unsigned long arg) | |
2238 | { | |
2239 | struct c4iw_ep *ep = (struct c4iw_ep *)arg; | |
2240 | ||
2241 | spin_lock(&timeout_lock); | |
2242 | list_add_tail(&ep->entry, &timeout_list); | |
2243 | spin_unlock(&timeout_lock); | |
2244 | queue_work(workq, &skb_work); | |
2245 | } | |
2246 | ||
cfdda9d7 SW |
2247 | /* |
2248 | * All the CM events are handled on a work queue to have a safe context. | |
2249 | */ | |
2250 | static int sched(struct c4iw_dev *dev, struct sk_buff *skb) | |
2251 | { | |
2252 | ||
2253 | /* | |
2254 | * Save dev in the skb->cb area. | |
2255 | */ | |
2256 | *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; | |
2257 | ||
2258 | /* | |
2259 | * Queue the skb and schedule the worker thread. | |
2260 | */ | |
2261 | skb_queue_tail(&rxq, skb); | |
2262 | queue_work(workq, &skb_work); | |
2263 | return 0; | |
2264 | } | |
2265 | ||
2266 | static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
2267 | { | |
2268 | struct cpl_set_tcb_rpl *rpl = cplhdr(skb); | |
2269 | ||
2270 | if (rpl->status != CPL_ERR_NONE) { | |
2271 | printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " | |
2272 | "for tid %u\n", rpl->status, GET_TID(rpl)); | |
2273 | } | |
2f5b48c3 | 2274 | kfree_skb(skb); |
cfdda9d7 SW |
2275 | return 0; |
2276 | } | |
2277 | ||
be4c9bad RD |
2278 | static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) |
2279 | { | |
2280 | struct cpl_fw6_msg *rpl = cplhdr(skb); | |
2281 | struct c4iw_wr_wait *wr_waitp; | |
2282 | int ret; | |
2283 | ||
2284 | PDBG("%s type %u\n", __func__, rpl->type); | |
2285 | ||
2286 | switch (rpl->type) { | |
2287 | case 1: | |
2288 | ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); | |
c8e081a1 | 2289 | wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; |
be4c9bad | 2290 | PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); |
d9594d99 SW |
2291 | if (wr_waitp) |
2292 | c4iw_wake_up(wr_waitp, ret ? -ret : 0); | |
2f5b48c3 | 2293 | kfree_skb(skb); |
be4c9bad RD |
2294 | break; |
2295 | case 2: | |
2f5b48c3 | 2296 | sched(dev, skb); |
be4c9bad RD |
2297 | break; |
2298 | default: | |
2299 | printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, | |
2300 | rpl->type); | |
2f5b48c3 | 2301 | kfree_skb(skb); |
be4c9bad RD |
2302 | break; |
2303 | } | |
2304 | return 0; | |
2305 | } | |
2306 | ||
8da7e7a5 SW |
2307 | static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) |
2308 | { | |
2309 | struct cpl_abort_req_rss *req = cplhdr(skb); | |
2310 | struct c4iw_ep *ep; | |
2311 | struct tid_info *t = dev->rdev.lldi.tids; | |
2312 | unsigned int tid = GET_TID(req); | |
2313 | ||
2314 | ep = lookup_tid(t, tid); | |
2315 | if (is_neg_adv_abort(req->status)) { | |
2316 | PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, | |
2317 | ep->hwtid); | |
2318 | kfree_skb(skb); | |
2319 | return 0; | |
2320 | } | |
2321 | PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, | |
2322 | ep->com.state); | |
2323 | ||
2324 | /* | |
2325 | * Wake up any threads in rdma_init() or rdma_fini(). | |
2326 | */ | |
2327 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | |
2328 | sched(dev, skb); | |
2329 | return 0; | |
2330 | } | |
2331 | ||
be4c9bad RD |
2332 | /* |
2333 | * Most upcalls from the T4 Core go to sched() to | |
2334 | * schedule the processing on a work queue. | |
2335 | */ | |
2336 | c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { | |
2337 | [CPL_ACT_ESTABLISH] = sched, | |
2338 | [CPL_ACT_OPEN_RPL] = sched, | |
2339 | [CPL_RX_DATA] = sched, | |
2340 | [CPL_ABORT_RPL_RSS] = sched, | |
2341 | [CPL_ABORT_RPL] = sched, | |
2342 | [CPL_PASS_OPEN_RPL] = sched, | |
2343 | [CPL_CLOSE_LISTSRV_RPL] = sched, | |
2344 | [CPL_PASS_ACCEPT_REQ] = sched, | |
2345 | [CPL_PASS_ESTABLISH] = sched, | |
2346 | [CPL_PEER_CLOSE] = sched, | |
2347 | [CPL_CLOSE_CON_RPL] = sched, | |
8da7e7a5 | 2348 | [CPL_ABORT_REQ_RSS] = peer_abort_intr, |
be4c9bad RD |
2349 | [CPL_RDMA_TERMINATE] = sched, |
2350 | [CPL_FW4_ACK] = sched, | |
2351 | [CPL_SET_TCB_RPL] = set_tcb_rpl, | |
2352 | [CPL_FW6_MSG] = fw6_msg | |
2353 | }; | |
2354 | ||
cfdda9d7 SW |
2355 | int __init c4iw_cm_init(void) |
2356 | { | |
be4c9bad | 2357 | spin_lock_init(&timeout_lock); |
cfdda9d7 SW |
2358 | skb_queue_head_init(&rxq); |
2359 | ||
2360 | workq = create_singlethread_workqueue("iw_cxgb4"); | |
2361 | if (!workq) | |
2362 | return -ENOMEM; | |
2363 | ||
cfdda9d7 SW |
2364 | return 0; |
2365 | } | |
2366 | ||
2367 | void __exit c4iw_cm_term(void) | |
2368 | { | |
be4c9bad | 2369 | WARN_ON(!list_empty(&timeout_list)); |
cfdda9d7 SW |
2370 | flush_workqueue(workq); |
2371 | destroy_workqueue(workq); | |
2372 | } |