]>
Commit | Line | Data |
---|---|---|
f27b4746 FL |
1 | /******************************************************************************* |
2 | * | |
3 | * Copyright (c) 2015-2016 Intel Corporation. All rights reserved. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenFabrics.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | * | |
33 | *******************************************************************************/ | |
34 | ||
35 | #include <linux/atomic.h> | |
36 | #include <linux/ip.h> | |
37 | #include <linux/tcp.h> | |
38 | #include <linux/init.h> | |
39 | #include <linux/if_arp.h> | |
40 | #include <linux/if_vlan.h> | |
41 | #include <linux/notifier.h> | |
42 | #include <linux/net.h> | |
43 | #include <linux/types.h> | |
44 | #include <linux/timer.h> | |
45 | #include <linux/time.h> | |
46 | #include <linux/delay.h> | |
47 | #include <linux/etherdevice.h> | |
48 | #include <linux/netdevice.h> | |
49 | #include <linux/random.h> | |
50 | #include <linux/list.h> | |
51 | #include <linux/threads.h> | |
52 | #include <linux/highmem.h> | |
53 | #include <net/arp.h> | |
54 | #include <net/ndisc.h> | |
55 | #include <net/neighbour.h> | |
56 | #include <net/route.h> | |
57 | #include <net/addrconf.h> | |
58 | #include <net/ip6_route.h> | |
59 | #include <net/ip_fib.h> | |
60 | #include <net/tcp.h> | |
61 | #include <asm/checksum.h> | |
62 | ||
63 | #include "i40iw.h" | |
64 | ||
65 | static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *); | |
66 | static void i40iw_cm_post_event(struct i40iw_cm_event *event); | |
67 | static void i40iw_disconnect_worker(struct work_struct *work); | |
68 | ||
69 | /** | |
70 | * i40iw_free_sqbuf - put back puda buffer if refcount = 0 | |
71 | * @dev: FPK device | |
72 | * @buf: puda buffer to free | |
73 | */ | |
74 | void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp) | |
75 | { | |
76 | struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp; | |
77 | struct i40iw_puda_rsrc *ilq = dev->ilq; | |
78 | ||
79 | if (!atomic_dec_return(&buf->refcount)) | |
80 | i40iw_puda_ret_bufpool(ilq, buf); | |
81 | } | |
82 | ||
83 | /** | |
84 | * i40iw_derive_hw_ird_setting - Calculate IRD | |
85 | * | |
86 | * @cm_ird: IRD of connection's node | |
87 | * | |
88 | * The ird from the connection is rounded to a supported HW | |
89 | * setting (2,8,32,64) and then encoded for ird_size field of | |
90 | * qp_ctx | |
91 | */ | |
92 | static u8 i40iw_derive_hw_ird_setting(u16 cm_ird) | |
93 | { | |
94 | u8 encoded_ird_size; | |
95 | u8 pof2_cm_ird = 1; | |
96 | ||
97 | /* round-off to next powerof2 */ | |
98 | while (pof2_cm_ird < cm_ird) | |
99 | pof2_cm_ird *= 2; | |
100 | ||
101 | /* ird_size field is encoded in qp_ctx */ | |
102 | switch (pof2_cm_ird) { | |
103 | case I40IW_HW_IRD_SETTING_64: | |
104 | encoded_ird_size = 3; | |
105 | break; | |
106 | case I40IW_HW_IRD_SETTING_32: | |
107 | case I40IW_HW_IRD_SETTING_16: | |
108 | encoded_ird_size = 2; | |
109 | break; | |
110 | case I40IW_HW_IRD_SETTING_8: | |
111 | case I40IW_HW_IRD_SETTING_4: | |
112 | encoded_ird_size = 1; | |
113 | break; | |
114 | case I40IW_HW_IRD_SETTING_2: | |
115 | default: | |
116 | encoded_ird_size = 0; | |
117 | break; | |
118 | } | |
119 | return encoded_ird_size; | |
120 | } | |
121 | ||
122 | /** | |
123 | * i40iw_record_ird_ord - Record IRD/ORD passed in | |
124 | * @cm_node: connection's node | |
125 | * @conn_ird: connection IRD | |
126 | * @conn_ord: connection ORD | |
127 | */ | |
128 | static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord) | |
129 | { | |
130 | if (conn_ird > I40IW_MAX_IRD_SIZE) | |
131 | conn_ird = I40IW_MAX_IRD_SIZE; | |
132 | ||
133 | if (conn_ord > I40IW_MAX_ORD_SIZE) | |
134 | conn_ord = I40IW_MAX_ORD_SIZE; | |
135 | ||
136 | cm_node->ird_size = conn_ird; | |
137 | cm_node->ord_size = conn_ord; | |
138 | } | |
139 | ||
140 | /** | |
141 | * i40iw_copy_ip_ntohl - change network to host ip | |
142 | * @dst: host ip | |
143 | * @src: big endian | |
144 | */ | |
145 | void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src) | |
146 | { | |
147 | *dst++ = ntohl(*src++); | |
148 | *dst++ = ntohl(*src++); | |
149 | *dst++ = ntohl(*src++); | |
150 | *dst = ntohl(*src); | |
151 | } | |
152 | ||
153 | /** | |
154 | * i40iw_copy_ip_htonl - change host addr to network ip | |
155 | * @dst: host ip | |
156 | * @src: little endian | |
157 | */ | |
158 | static inline void i40iw_copy_ip_htonl(__be32 *dst, u32 *src) | |
159 | { | |
160 | *dst++ = htonl(*src++); | |
161 | *dst++ = htonl(*src++); | |
162 | *dst++ = htonl(*src++); | |
163 | *dst = htonl(*src); | |
164 | } | |
165 | ||
166 | /** | |
167 | * i40iw_fill_sockaddr4 - get addr info for passive connection | |
168 | * @cm_node: connection's node | |
169 | * @event: upper layer's cm event | |
170 | */ | |
171 | static inline void i40iw_fill_sockaddr4(struct i40iw_cm_node *cm_node, | |
172 | struct iw_cm_event *event) | |
173 | { | |
174 | struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr; | |
175 | struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr; | |
176 | ||
177 | laddr->sin_family = AF_INET; | |
178 | raddr->sin_family = AF_INET; | |
179 | ||
180 | laddr->sin_port = htons(cm_node->loc_port); | |
181 | raddr->sin_port = htons(cm_node->rem_port); | |
182 | ||
183 | laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]); | |
184 | raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]); | |
185 | } | |
186 | ||
187 | /** | |
188 | * i40iw_fill_sockaddr6 - get ipv6 addr info for passive side | |
189 | * @cm_node: connection's node | |
190 | * @event: upper layer's cm event | |
191 | */ | |
192 | static inline void i40iw_fill_sockaddr6(struct i40iw_cm_node *cm_node, | |
193 | struct iw_cm_event *event) | |
194 | { | |
195 | struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr; | |
196 | struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr; | |
197 | ||
198 | laddr6->sin6_family = AF_INET6; | |
199 | raddr6->sin6_family = AF_INET6; | |
200 | ||
201 | laddr6->sin6_port = htons(cm_node->loc_port); | |
202 | raddr6->sin6_port = htons(cm_node->rem_port); | |
203 | ||
204 | i40iw_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32, | |
205 | cm_node->loc_addr); | |
206 | i40iw_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32, | |
207 | cm_node->rem_addr); | |
208 | } | |
209 | ||
f27b4746 FL |
210 | /** |
211 | * i40iw_get_addr_info | |
212 | * @cm_node: contains ip/tcp info | |
213 | * @cm_info: to get a copy of the cm_node ip/tcp info | |
214 | */ | |
215 | static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node, | |
216 | struct i40iw_cm_info *cm_info) | |
217 | { | |
218 | cm_info->ipv4 = cm_node->ipv4; | |
219 | cm_info->vlan_id = cm_node->vlan_id; | |
220 | memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr)); | |
221 | memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr)); | |
f27b4746 FL |
222 | cm_info->loc_port = cm_node->loc_port; |
223 | cm_info->rem_port = cm_node->rem_port; | |
0fc2dc58 | 224 | cm_info->user_pri = cm_node->user_pri; |
f27b4746 FL |
225 | } |
226 | ||
227 | /** | |
228 | * i40iw_get_cmevent_info - for cm event upcall | |
229 | * @cm_node: connection's node | |
230 | * @cm_id: upper layers cm struct for the event | |
231 | * @event: upper layer's cm event | |
232 | */ | |
233 | static inline void i40iw_get_cmevent_info(struct i40iw_cm_node *cm_node, | |
234 | struct iw_cm_id *cm_id, | |
235 | struct iw_cm_event *event) | |
236 | { | |
8d8cd0bf | 237 | memcpy(&event->local_addr, &cm_id->m_local_addr, |
f27b4746 | 238 | sizeof(event->local_addr)); |
8d8cd0bf | 239 | memcpy(&event->remote_addr, &cm_id->m_remote_addr, |
f27b4746 FL |
240 | sizeof(event->remote_addr)); |
241 | if (cm_node) { | |
242 | event->private_data = (void *)cm_node->pdata_buf; | |
243 | event->private_data_len = (u8)cm_node->pdata.size; | |
244 | event->ird = cm_node->ird_size; | |
245 | event->ord = cm_node->ord_size; | |
246 | } | |
247 | } | |
248 | ||
249 | /** | |
250 | * i40iw_send_cm_event - upcall cm's event handler | |
251 | * @cm_node: connection's node | |
252 | * @cm_id: upper layer's cm info struct | |
253 | * @type: Event type to indicate | |
254 | * @status: status for the event type | |
255 | */ | |
256 | static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node, | |
257 | struct iw_cm_id *cm_id, | |
258 | enum iw_cm_event_type type, | |
259 | int status) | |
260 | { | |
261 | struct iw_cm_event event; | |
262 | ||
263 | memset(&event, 0, sizeof(event)); | |
264 | event.event = type; | |
265 | event.status = status; | |
266 | switch (type) { | |
267 | case IW_CM_EVENT_CONNECT_REQUEST: | |
268 | if (cm_node->ipv4) | |
269 | i40iw_fill_sockaddr4(cm_node, &event); | |
270 | else | |
271 | i40iw_fill_sockaddr6(cm_node, &event); | |
272 | event.provider_data = (void *)cm_node; | |
273 | event.private_data = (void *)cm_node->pdata_buf; | |
274 | event.private_data_len = (u8)cm_node->pdata.size; | |
275 | break; | |
276 | case IW_CM_EVENT_CONNECT_REPLY: | |
277 | i40iw_get_cmevent_info(cm_node, cm_id, &event); | |
278 | break; | |
279 | case IW_CM_EVENT_ESTABLISHED: | |
280 | event.ird = cm_node->ird_size; | |
281 | event.ord = cm_node->ord_size; | |
282 | break; | |
283 | case IW_CM_EVENT_DISCONNECT: | |
284 | break; | |
285 | case IW_CM_EVENT_CLOSE: | |
286 | break; | |
287 | default: | |
288 | i40iw_pr_err("event type received type = %d\n", type); | |
289 | return -1; | |
290 | } | |
291 | return cm_id->event_handler(cm_id, &event); | |
292 | } | |
293 | ||
294 | /** | |
295 | * i40iw_create_event - create cm event | |
296 | * @cm_node: connection's node | |
297 | * @type: Event type to generate | |
298 | */ | |
299 | static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node, | |
300 | enum i40iw_cm_event_type type) | |
301 | { | |
302 | struct i40iw_cm_event *event; | |
303 | ||
304 | if (!cm_node->cm_id) | |
305 | return NULL; | |
306 | ||
307 | event = kzalloc(sizeof(*event), GFP_ATOMIC); | |
308 | ||
309 | if (!event) | |
310 | return NULL; | |
311 | ||
312 | event->type = type; | |
313 | event->cm_node = cm_node; | |
314 | memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr)); | |
315 | memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr)); | |
316 | event->cm_info.rem_port = cm_node->rem_port; | |
317 | event->cm_info.loc_port = cm_node->loc_port; | |
318 | event->cm_info.cm_id = cm_node->cm_id; | |
319 | ||
320 | i40iw_debug(cm_node->dev, | |
321 | I40IW_DEBUG_CM, | |
322 | "node=%p event=%p type=%u dst=%pI4 src=%pI4\n", | |
323 | cm_node, | |
324 | event, | |
325 | type, | |
326 | event->cm_info.loc_addr, | |
327 | event->cm_info.rem_addr); | |
328 | ||
329 | i40iw_cm_post_event(event); | |
330 | return event; | |
331 | } | |
332 | ||
333 | /** | |
334 | * i40iw_free_retrans_entry - free send entry | |
335 | * @cm_node: connection's node | |
336 | */ | |
337 | static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node) | |
338 | { | |
339 | struct i40iw_sc_dev *dev = cm_node->dev; | |
340 | struct i40iw_timer_entry *send_entry; | |
341 | ||
342 | send_entry = cm_node->send_entry; | |
343 | if (send_entry) { | |
344 | cm_node->send_entry = NULL; | |
345 | i40iw_free_sqbuf(dev, (void *)send_entry->sqbuf); | |
346 | kfree(send_entry); | |
347 | atomic_dec(&cm_node->ref_count); | |
348 | } | |
349 | } | |
350 | ||
351 | /** | |
352 | * i40iw_cleanup_retrans_entry - free send entry with lock | |
353 | * @cm_node: connection's node | |
354 | */ | |
355 | static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node) | |
356 | { | |
357 | unsigned long flags; | |
358 | ||
359 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | |
360 | i40iw_free_retrans_entry(cm_node); | |
361 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | |
362 | } | |
363 | ||
f27b4746 FL |
364 | /** |
365 | * i40iw_form_cm_frame - get a free packet and build frame | |
366 | * @cm_node: connection's node ionfo to use in frame | |
367 | * @options: pointer to options info | |
368 | * @hdr: pointer mpa header | |
369 | * @pdata: pointer to private data | |
370 | * @flags: indicates FIN or ACK | |
371 | */ | |
372 | static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node, | |
373 | struct i40iw_kmem_info *options, | |
374 | struct i40iw_kmem_info *hdr, | |
375 | struct i40iw_kmem_info *pdata, | |
376 | u8 flags) | |
377 | { | |
378 | struct i40iw_puda_buf *sqbuf; | |
379 | struct i40iw_sc_dev *dev = cm_node->dev; | |
380 | u8 *buf; | |
381 | ||
382 | struct tcphdr *tcph; | |
383 | struct iphdr *iph; | |
384 | struct ipv6hdr *ip6h; | |
385 | struct ethhdr *ethh; | |
386 | u16 packetsize; | |
387 | u16 eth_hlen = ETH_HLEN; | |
388 | u32 opts_len = 0; | |
389 | u32 pd_len = 0; | |
390 | u32 hdr_len = 0; | |
0fc2dc58 | 391 | u16 vtag; |
f27b4746 FL |
392 | |
393 | sqbuf = i40iw_puda_get_bufpool(dev->ilq); | |
394 | if (!sqbuf) | |
395 | return NULL; | |
396 | buf = sqbuf->mem.va; | |
397 | ||
398 | if (options) | |
399 | opts_len = (u32)options->size; | |
400 | ||
401 | if (hdr) | |
402 | hdr_len = hdr->size; | |
403 | ||
7581e96c | 404 | if (pdata) |
f27b4746 | 405 | pd_len = pdata->size; |
f27b4746 FL |
406 | |
407 | if (cm_node->vlan_id < VLAN_TAG_PRESENT) | |
408 | eth_hlen += 4; | |
409 | ||
410 | if (cm_node->ipv4) | |
411 | packetsize = sizeof(*iph) + sizeof(*tcph); | |
412 | else | |
413 | packetsize = sizeof(*ip6h) + sizeof(*tcph); | |
414 | packetsize += opts_len + hdr_len + pd_len; | |
415 | ||
416 | memset(buf, 0x00, eth_hlen + packetsize); | |
417 | ||
418 | sqbuf->totallen = packetsize + eth_hlen; | |
419 | sqbuf->maclen = eth_hlen; | |
420 | sqbuf->tcphlen = sizeof(*tcph) + opts_len; | |
421 | sqbuf->scratch = (void *)cm_node; | |
422 | ||
423 | ethh = (struct ethhdr *)buf; | |
424 | buf += eth_hlen; | |
425 | ||
426 | if (cm_node->ipv4) { | |
427 | sqbuf->ipv4 = true; | |
428 | ||
429 | iph = (struct iphdr *)buf; | |
430 | buf += sizeof(*iph); | |
431 | tcph = (struct tcphdr *)buf; | |
432 | buf += sizeof(*tcph); | |
433 | ||
434 | ether_addr_copy(ethh->h_dest, cm_node->rem_mac); | |
435 | ether_addr_copy(ethh->h_source, cm_node->loc_mac); | |
436 | if (cm_node->vlan_id < VLAN_TAG_PRESENT) { | |
437 | ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q); | |
0fc2dc58 HO |
438 | vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id; |
439 | ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag); | |
f27b4746 FL |
440 | |
441 | ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP); | |
442 | } else { | |
443 | ethh->h_proto = htons(ETH_P_IP); | |
444 | } | |
445 | ||
446 | iph->version = IPVERSION; | |
447 | iph->ihl = 5; /* 5 * 4Byte words, IP headr len */ | |
7eb2bde7 | 448 | iph->tos = cm_node->tos; |
f27b4746 FL |
449 | iph->tot_len = htons(packetsize); |
450 | iph->id = htons(++cm_node->tcp_cntxt.loc_id); | |
451 | ||
452 | iph->frag_off = htons(0x4000); | |
453 | iph->ttl = 0x40; | |
454 | iph->protocol = IPPROTO_TCP; | |
8d8cd0bf FL |
455 | iph->saddr = htonl(cm_node->loc_addr[0]); |
456 | iph->daddr = htonl(cm_node->rem_addr[0]); | |
f27b4746 FL |
457 | } else { |
458 | sqbuf->ipv4 = false; | |
459 | ip6h = (struct ipv6hdr *)buf; | |
460 | buf += sizeof(*ip6h); | |
461 | tcph = (struct tcphdr *)buf; | |
462 | buf += sizeof(*tcph); | |
463 | ||
464 | ether_addr_copy(ethh->h_dest, cm_node->rem_mac); | |
465 | ether_addr_copy(ethh->h_source, cm_node->loc_mac); | |
466 | if (cm_node->vlan_id < VLAN_TAG_PRESENT) { | |
467 | ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q); | |
0fc2dc58 HO |
468 | vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id; |
469 | ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag); | |
f27b4746 FL |
470 | ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6); |
471 | } else { | |
472 | ethh->h_proto = htons(ETH_P_IPV6); | |
473 | } | |
474 | ip6h->version = 6; | |
7eb2bde7 SS |
475 | ip6h->priority = cm_node->tos >> 4; |
476 | ip6h->flow_lbl[0] = cm_node->tos << 4; | |
f27b4746 FL |
477 | ip6h->flow_lbl[1] = 0; |
478 | ip6h->flow_lbl[2] = 0; | |
479 | ip6h->payload_len = htons(packetsize - sizeof(*ip6h)); | |
480 | ip6h->nexthdr = 6; | |
481 | ip6h->hop_limit = 128; | |
f27b4746 | 482 | i40iw_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32, |
8d8cd0bf | 483 | cm_node->loc_addr); |
f27b4746 | 484 | i40iw_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32, |
8d8cd0bf | 485 | cm_node->rem_addr); |
f27b4746 FL |
486 | } |
487 | ||
8d8cd0bf FL |
488 | tcph->source = htons(cm_node->loc_port); |
489 | tcph->dest = htons(cm_node->rem_port); | |
f27b4746 FL |
490 | |
491 | tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num); | |
492 | ||
493 | if (flags & SET_ACK) { | |
494 | cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt; | |
495 | tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num); | |
496 | tcph->ack = 1; | |
497 | } else { | |
498 | tcph->ack_seq = 0; | |
499 | } | |
500 | ||
501 | if (flags & SET_SYN) { | |
502 | cm_node->tcp_cntxt.loc_seq_num++; | |
503 | tcph->syn = 1; | |
504 | } else { | |
505 | cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len; | |
506 | } | |
507 | ||
508 | if (flags & SET_FIN) { | |
509 | cm_node->tcp_cntxt.loc_seq_num++; | |
510 | tcph->fin = 1; | |
511 | } | |
512 | ||
513 | if (flags & SET_RST) | |
514 | tcph->rst = 1; | |
515 | ||
516 | tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2); | |
517 | sqbuf->tcphlen = tcph->doff << 2; | |
518 | tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd); | |
519 | tcph->urg_ptr = 0; | |
520 | ||
521 | if (opts_len) { | |
522 | memcpy(buf, options->addr, opts_len); | |
523 | buf += opts_len; | |
524 | } | |
525 | ||
526 | if (hdr_len) { | |
527 | memcpy(buf, hdr->addr, hdr_len); | |
528 | buf += hdr_len; | |
529 | } | |
530 | ||
5dfd5e5e SS |
531 | if (pdata && pdata->addr) |
532 | memcpy(buf, pdata->addr, pdata->size); | |
f27b4746 FL |
533 | |
534 | atomic_set(&sqbuf->refcount, 1); | |
535 | ||
536 | return sqbuf; | |
537 | } | |
538 | ||
539 | /** | |
540 | * i40iw_send_reset - Send RST packet | |
541 | * @cm_node: connection's node | |
542 | */ | |
543 | static int i40iw_send_reset(struct i40iw_cm_node *cm_node) | |
544 | { | |
545 | struct i40iw_puda_buf *sqbuf; | |
546 | int flags = SET_RST | SET_ACK; | |
547 | ||
548 | sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, flags); | |
549 | if (!sqbuf) { | |
550 | i40iw_pr_err("no sqbuf\n"); | |
551 | return -1; | |
552 | } | |
553 | ||
554 | return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 0, 1); | |
555 | } | |
556 | ||
557 | /** | |
558 | * i40iw_active_open_err - send event for active side cm error | |
559 | * @cm_node: connection's node | |
560 | * @reset: Flag to send reset or not | |
561 | */ | |
562 | static void i40iw_active_open_err(struct i40iw_cm_node *cm_node, bool reset) | |
563 | { | |
564 | i40iw_cleanup_retrans_entry(cm_node); | |
565 | cm_node->cm_core->stats_connect_errs++; | |
566 | if (reset) { | |
567 | i40iw_debug(cm_node->dev, | |
568 | I40IW_DEBUG_CM, | |
569 | "%s cm_node=%p state=%d\n", | |
570 | __func__, | |
571 | cm_node, | |
572 | cm_node->state); | |
573 | atomic_inc(&cm_node->ref_count); | |
574 | i40iw_send_reset(cm_node); | |
575 | } | |
576 | ||
577 | cm_node->state = I40IW_CM_STATE_CLOSED; | |
578 | i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED); | |
579 | } | |
580 | ||
581 | /** | |
582 | * i40iw_passive_open_err - handle passive side cm error | |
583 | * @cm_node: connection's node | |
584 | * @reset: send reset or just free cm_node | |
585 | */ | |
586 | static void i40iw_passive_open_err(struct i40iw_cm_node *cm_node, bool reset) | |
587 | { | |
588 | i40iw_cleanup_retrans_entry(cm_node); | |
589 | cm_node->cm_core->stats_passive_errs++; | |
590 | cm_node->state = I40IW_CM_STATE_CLOSED; | |
591 | i40iw_debug(cm_node->dev, | |
592 | I40IW_DEBUG_CM, | |
593 | "%s cm_node=%p state =%d\n", | |
594 | __func__, | |
595 | cm_node, | |
596 | cm_node->state); | |
597 | if (reset) | |
598 | i40iw_send_reset(cm_node); | |
599 | else | |
600 | i40iw_rem_ref_cm_node(cm_node); | |
601 | } | |
602 | ||
603 | /** | |
604 | * i40iw_event_connect_error - to create connect error event | |
605 | * @event: cm information for connect event | |
606 | */ | |
607 | static void i40iw_event_connect_error(struct i40iw_cm_event *event) | |
608 | { | |
609 | struct i40iw_qp *iwqp; | |
610 | struct iw_cm_id *cm_id; | |
611 | ||
612 | cm_id = event->cm_node->cm_id; | |
613 | if (!cm_id) | |
614 | return; | |
615 | ||
616 | iwqp = cm_id->provider_data; | |
617 | ||
618 | if (!iwqp || !iwqp->iwdev) | |
619 | return; | |
620 | ||
621 | iwqp->cm_id = NULL; | |
622 | cm_id->provider_data = NULL; | |
623 | i40iw_send_cm_event(event->cm_node, cm_id, | |
624 | IW_CM_EVENT_CONNECT_REPLY, | |
625 | -ECONNRESET); | |
626 | cm_id->rem_ref(cm_id); | |
627 | i40iw_rem_ref_cm_node(event->cm_node); | |
628 | } | |
629 | ||
630 | /** | |
631 | * i40iw_process_options | |
632 | * @cm_node: connection's node | |
633 | * @optionsloc: point to start of options | |
634 | * @optionsize: size of all options | |
635 | * @syn_packet: flag if syn packet | |
636 | */ | |
637 | static int i40iw_process_options(struct i40iw_cm_node *cm_node, | |
638 | u8 *optionsloc, | |
639 | u32 optionsize, | |
640 | u32 syn_packet) | |
641 | { | |
642 | u32 tmp; | |
643 | u32 offset = 0; | |
644 | union all_known_options *all_options; | |
645 | char got_mss_option = 0; | |
646 | ||
647 | while (offset < optionsize) { | |
648 | all_options = (union all_known_options *)(optionsloc + offset); | |
649 | switch (all_options->as_base.optionnum) { | |
650 | case OPTION_NUMBER_END: | |
651 | offset = optionsize; | |
652 | break; | |
653 | case OPTION_NUMBER_NONE: | |
654 | offset += 1; | |
655 | continue; | |
656 | case OPTION_NUMBER_MSS: | |
657 | i40iw_debug(cm_node->dev, | |
658 | I40IW_DEBUG_CM, | |
659 | "%s: MSS Length: %d Offset: %d Size: %d\n", | |
660 | __func__, | |
661 | all_options->as_mss.length, | |
662 | offset, | |
663 | optionsize); | |
664 | got_mss_option = 1; | |
665 | if (all_options->as_mss.length != 4) | |
666 | return -1; | |
667 | tmp = ntohs(all_options->as_mss.mss); | |
668 | if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss) | |
669 | cm_node->tcp_cntxt.mss = tmp; | |
670 | break; | |
671 | case OPTION_NUMBER_WINDOW_SCALE: | |
672 | cm_node->tcp_cntxt.snd_wscale = | |
673 | all_options->as_windowscale.shiftcount; | |
674 | break; | |
675 | default: | |
676 | i40iw_debug(cm_node->dev, | |
677 | I40IW_DEBUG_CM, | |
678 | "TCP Option not understood: %x\n", | |
679 | all_options->as_base.optionnum); | |
680 | break; | |
681 | } | |
682 | offset += all_options->as_base.length; | |
683 | } | |
684 | if (!got_mss_option && syn_packet) | |
685 | cm_node->tcp_cntxt.mss = I40IW_CM_DEFAULT_MSS; | |
686 | return 0; | |
687 | } | |
688 | ||
689 | /** | |
690 | * i40iw_handle_tcp_options - | |
691 | * @cm_node: connection's node | |
692 | * @tcph: pointer tcp header | |
693 | * @optionsize: size of options rcvd | |
694 | * @passive: active or passive flag | |
695 | */ | |
696 | static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node, | |
697 | struct tcphdr *tcph, | |
698 | int optionsize, | |
699 | int passive) | |
700 | { | |
701 | u8 *optionsloc = (u8 *)&tcph[1]; | |
702 | ||
703 | if (optionsize) { | |
704 | if (i40iw_process_options(cm_node, | |
705 | optionsloc, | |
706 | optionsize, | |
707 | (u32)tcph->syn)) { | |
708 | i40iw_debug(cm_node->dev, | |
709 | I40IW_DEBUG_CM, | |
710 | "%s: Node %p, Sending RESET\n", | |
711 | __func__, | |
712 | cm_node); | |
713 | if (passive) | |
714 | i40iw_passive_open_err(cm_node, true); | |
715 | else | |
716 | i40iw_active_open_err(cm_node, true); | |
717 | return -1; | |
718 | } | |
719 | } | |
720 | ||
721 | cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) << | |
722 | cm_node->tcp_cntxt.snd_wscale; | |
723 | ||
724 | if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) | |
725 | cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd; | |
726 | return 0; | |
727 | } | |
728 | ||
729 | /** | |
730 | * i40iw_build_mpa_v1 - build a MPA V1 frame | |
731 | * @cm_node: connection's node | |
732 | * @mpa_key: to do read0 or write0 | |
733 | */ | |
734 | static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node, | |
735 | void *start_addr, | |
736 | u8 mpa_key) | |
737 | { | |
738 | struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr; | |
739 | ||
740 | switch (mpa_key) { | |
741 | case MPA_KEY_REQUEST: | |
742 | memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE); | |
743 | break; | |
744 | case MPA_KEY_REPLY: | |
745 | memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE); | |
746 | break; | |
747 | default: | |
748 | break; | |
749 | } | |
750 | mpa_frame->flags = IETF_MPA_FLAGS_CRC; | |
751 | mpa_frame->rev = cm_node->mpa_frame_rev; | |
752 | mpa_frame->priv_data_len = htons(cm_node->pdata.size); | |
753 | } | |
754 | ||
755 | /** | |
756 | * i40iw_build_mpa_v2 - build a MPA V2 frame | |
757 | * @cm_node: connection's node | |
758 | * @start_addr: buffer start address | |
759 | * @mpa_key: to do read0 or write0 | |
760 | */ | |
761 | static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node, | |
762 | void *start_addr, | |
763 | u8 mpa_key) | |
764 | { | |
765 | struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr; | |
766 | struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg; | |
20c61f7e | 767 | u16 ctrl_ird, ctrl_ord; |
f27b4746 FL |
768 | |
769 | /* initialize the upper 5 bytes of the frame */ | |
770 | i40iw_build_mpa_v1(cm_node, start_addr, mpa_key); | |
771 | mpa_frame->flags |= IETF_MPA_V2_FLAG; | |
772 | mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE); | |
773 | ||
774 | /* initialize RTR msg */ | |
775 | if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) { | |
20c61f7e IM |
776 | ctrl_ird = IETF_NO_IRD_ORD; |
777 | ctrl_ord = IETF_NO_IRD_ORD; | |
f27b4746 | 778 | } else { |
20c61f7e | 779 | ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? |
f27b4746 | 780 | IETF_NO_IRD_ORD : cm_node->ird_size; |
20c61f7e | 781 | ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? |
f27b4746 FL |
782 | IETF_NO_IRD_ORD : cm_node->ord_size; |
783 | } | |
784 | ||
20c61f7e IM |
785 | ctrl_ird |= IETF_PEER_TO_PEER; |
786 | ctrl_ird |= IETF_FLPDU_ZERO_LEN; | |
f27b4746 FL |
787 | |
788 | switch (mpa_key) { | |
789 | case MPA_KEY_REQUEST: | |
20c61f7e IM |
790 | ctrl_ord |= IETF_RDMA0_WRITE; |
791 | ctrl_ord |= IETF_RDMA0_READ; | |
f27b4746 FL |
792 | break; |
793 | case MPA_KEY_REPLY: | |
794 | switch (cm_node->send_rdma0_op) { | |
795 | case SEND_RDMA_WRITE_ZERO: | |
20c61f7e | 796 | ctrl_ord |= IETF_RDMA0_WRITE; |
f27b4746 FL |
797 | break; |
798 | case SEND_RDMA_READ_ZERO: | |
20c61f7e | 799 | ctrl_ord |= IETF_RDMA0_READ; |
f27b4746 FL |
800 | break; |
801 | } | |
802 | break; | |
803 | default: | |
804 | break; | |
805 | } | |
20c61f7e IM |
806 | rtr_msg->ctrl_ird = htons(ctrl_ird); |
807 | rtr_msg->ctrl_ord = htons(ctrl_ord); | |
f27b4746 FL |
808 | } |
809 | ||
810 | /** | |
811 | * i40iw_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2 | |
812 | * @cm_node: connection's node | |
813 | * @mpa: mpa: data buffer | |
814 | * @mpa_key: to do read0 or write0 | |
815 | */ | |
816 | static int i40iw_cm_build_mpa_frame(struct i40iw_cm_node *cm_node, | |
817 | struct i40iw_kmem_info *mpa, | |
818 | u8 mpa_key) | |
819 | { | |
820 | int hdr_len = 0; | |
821 | ||
822 | switch (cm_node->mpa_frame_rev) { | |
823 | case IETF_MPA_V1: | |
824 | hdr_len = sizeof(struct ietf_mpa_v1); | |
825 | i40iw_build_mpa_v1(cm_node, mpa->addr, mpa_key); | |
826 | break; | |
827 | case IETF_MPA_V2: | |
828 | hdr_len = sizeof(struct ietf_mpa_v2); | |
829 | i40iw_build_mpa_v2(cm_node, mpa->addr, mpa_key); | |
830 | break; | |
831 | default: | |
832 | break; | |
833 | } | |
834 | ||
835 | return hdr_len; | |
836 | } | |
837 | ||
838 | /** | |
839 | * i40iw_send_mpa_request - active node send mpa request to passive node | |
840 | * @cm_node: connection's node | |
841 | */ | |
842 | static int i40iw_send_mpa_request(struct i40iw_cm_node *cm_node) | |
843 | { | |
844 | struct i40iw_puda_buf *sqbuf; | |
845 | ||
846 | if (!cm_node) { | |
847 | i40iw_pr_err("cm_node == NULL\n"); | |
848 | return -1; | |
849 | } | |
850 | ||
851 | cm_node->mpa_hdr.addr = &cm_node->mpa_frame; | |
852 | cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node, | |
853 | &cm_node->mpa_hdr, | |
854 | MPA_KEY_REQUEST); | |
855 | if (!cm_node->mpa_hdr.size) { | |
856 | i40iw_pr_err("mpa size = %d\n", cm_node->mpa_hdr.size); | |
857 | return -1; | |
858 | } | |
859 | ||
860 | sqbuf = i40iw_form_cm_frame(cm_node, | |
861 | NULL, | |
862 | &cm_node->mpa_hdr, | |
863 | &cm_node->pdata, | |
864 | SET_ACK); | |
865 | if (!sqbuf) { | |
866 | i40iw_pr_err("sq_buf == NULL\n"); | |
867 | return -1; | |
868 | } | |
f27b4746 FL |
869 | return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0); |
870 | } | |
871 | ||
872 | /** | |
873 | * i40iw_send_mpa_reject - | |
874 | * @cm_node: connection's node | |
875 | * @pdata: reject data for connection | |
876 | * @plen: length of reject data | |
877 | */ | |
878 | static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node, | |
879 | const void *pdata, | |
880 | u8 plen) | |
881 | { | |
882 | struct i40iw_puda_buf *sqbuf; | |
883 | struct i40iw_kmem_info priv_info; | |
884 | ||
885 | cm_node->mpa_hdr.addr = &cm_node->mpa_frame; | |
886 | cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node, | |
887 | &cm_node->mpa_hdr, | |
888 | MPA_KEY_REPLY); | |
889 | ||
890 | cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT; | |
891 | priv_info.addr = (void *)pdata; | |
892 | priv_info.size = plen; | |
893 | ||
894 | sqbuf = i40iw_form_cm_frame(cm_node, | |
895 | NULL, | |
896 | &cm_node->mpa_hdr, | |
897 | &priv_info, | |
898 | SET_ACK | SET_FIN); | |
899 | if (!sqbuf) { | |
900 | i40iw_pr_err("no sqbuf\n"); | |
901 | return -ENOMEM; | |
902 | } | |
903 | cm_node->state = I40IW_CM_STATE_FIN_WAIT1; | |
904 | return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0); | |
905 | } | |
906 | ||
907 | /** | |
908 | * recv_mpa - process an IETF MPA frame | |
909 | * @cm_node: connection's node | |
910 | * @buffer: Data pointer | |
911 | * @type: to return accept or reject | |
912 | * @len: Len of mpa buffer | |
913 | */ | |
914 | static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, u32 len) | |
915 | { | |
916 | struct ietf_mpa_v1 *mpa_frame; | |
917 | struct ietf_mpa_v2 *mpa_v2_frame; | |
918 | struct ietf_rtr_msg *rtr_msg; | |
919 | int mpa_hdr_len; | |
920 | int priv_data_len; | |
921 | ||
922 | *type = I40IW_MPA_REQUEST_ACCEPT; | |
923 | ||
924 | if (len < sizeof(struct ietf_mpa_v1)) { | |
925 | i40iw_pr_err("ietf buffer small (%x)\n", len); | |
926 | return -1; | |
927 | } | |
928 | ||
929 | mpa_frame = (struct ietf_mpa_v1 *)buffer; | |
930 | mpa_hdr_len = sizeof(struct ietf_mpa_v1); | |
931 | priv_data_len = ntohs(mpa_frame->priv_data_len); | |
932 | ||
933 | if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) { | |
934 | i40iw_pr_err("large pri_data %d\n", priv_data_len); | |
935 | return -1; | |
936 | } | |
937 | if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) { | |
938 | i40iw_pr_err("unsupported mpa rev = %d\n", mpa_frame->rev); | |
939 | return -1; | |
940 | } | |
941 | if (mpa_frame->rev > cm_node->mpa_frame_rev) { | |
942 | i40iw_pr_err("rev %d\n", mpa_frame->rev); | |
943 | return -1; | |
944 | } | |
945 | cm_node->mpa_frame_rev = mpa_frame->rev; | |
946 | ||
947 | if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) { | |
948 | if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) { | |
949 | i40iw_pr_err("Unexpected MPA Key received\n"); | |
950 | return -1; | |
951 | } | |
952 | } else { | |
953 | if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) { | |
954 | i40iw_pr_err("Unexpected MPA Key received\n"); | |
955 | return -1; | |
956 | } | |
957 | } | |
958 | ||
959 | if (priv_data_len + mpa_hdr_len > len) { | |
960 | i40iw_pr_err("ietf buffer len(%x + %x != %x)\n", | |
961 | priv_data_len, mpa_hdr_len, len); | |
962 | return -1; | |
963 | } | |
964 | if (len > MAX_CM_BUFFER) { | |
965 | i40iw_pr_err("ietf buffer large len = %d\n", len); | |
966 | return -1; | |
967 | } | |
968 | ||
969 | switch (mpa_frame->rev) { | |
970 | case IETF_MPA_V2:{ | |
971 | u16 ird_size; | |
972 | u16 ord_size; | |
973 | u16 ctrl_ord; | |
974 | u16 ctrl_ird; | |
975 | ||
976 | mpa_v2_frame = (struct ietf_mpa_v2 *)buffer; | |
977 | mpa_hdr_len += IETF_RTR_MSG_SIZE; | |
978 | rtr_msg = &mpa_v2_frame->rtr_msg; | |
979 | ||
980 | /* parse rtr message */ | |
981 | ctrl_ord = ntohs(rtr_msg->ctrl_ord); | |
982 | ctrl_ird = ntohs(rtr_msg->ctrl_ird); | |
983 | ird_size = ctrl_ird & IETF_NO_IRD_ORD; | |
984 | ord_size = ctrl_ord & IETF_NO_IRD_ORD; | |
985 | ||
986 | if (!(ctrl_ird & IETF_PEER_TO_PEER)) | |
987 | return -1; | |
988 | ||
989 | if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) { | |
990 | cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD; | |
991 | goto negotiate_done; | |
992 | } | |
993 | ||
994 | if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) { | |
995 | /* responder */ | |
996 | if (!ord_size && (ctrl_ord & IETF_RDMA0_READ)) | |
997 | cm_node->ird_size = 1; | |
998 | if (cm_node->ord_size > ird_size) | |
999 | cm_node->ord_size = ird_size; | |
1000 | } else { | |
1001 | /* initiator */ | |
1002 | if (!ird_size && (ctrl_ord & IETF_RDMA0_READ)) | |
1003 | return -1; | |
1004 | if (cm_node->ord_size > ird_size) | |
1005 | cm_node->ord_size = ird_size; | |
1006 | ||
1007 | if (cm_node->ird_size < ord_size) | |
1008 | /* no resources available */ | |
1009 | return -1; | |
1010 | } | |
1011 | ||
1012 | negotiate_done: | |
1013 | if (ctrl_ord & IETF_RDMA0_READ) | |
1014 | cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; | |
1015 | else if (ctrl_ord & IETF_RDMA0_WRITE) | |
1016 | cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO; | |
1017 | else /* Not supported RDMA0 operation */ | |
1018 | return -1; | |
1019 | i40iw_debug(cm_node->dev, I40IW_DEBUG_CM, | |
1020 | "MPAV2: Negotiated ORD: %d, IRD: %d\n", | |
1021 | cm_node->ord_size, cm_node->ird_size); | |
1022 | break; | |
1023 | } | |
1024 | break; | |
1025 | case IETF_MPA_V1: | |
1026 | default: | |
1027 | break; | |
1028 | } | |
1029 | ||
1030 | memcpy(cm_node->pdata_buf, buffer + mpa_hdr_len, priv_data_len); | |
1031 | cm_node->pdata.size = priv_data_len; | |
1032 | ||
1033 | if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT) | |
1034 | *type = I40IW_MPA_REQUEST_REJECT; | |
1035 | ||
1036 | if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS) | |
1037 | cm_node->snd_mark_en = true; | |
1038 | ||
1039 | return 0; | |
1040 | } | |
1041 | ||
1042 | /** | |
1043 | * i40iw_schedule_cm_timer | |
1044 | * @@cm_node: connection's node | |
1045 | * @sqbuf: buffer to send | |
1046 | * @type: if it es send ot close | |
1047 | * @send_retrans: if rexmits to be done | |
1048 | * @close_when_complete: is cm_node to be removed | |
1049 | * | |
1050 | * note - cm_node needs to be protected before calling this. Encase in: | |
1051 | * i40iw_rem_ref_cm_node(cm_core, cm_node); | |
1052 | * i40iw_schedule_cm_timer(...) | |
1053 | * atomic_inc(&cm_node->ref_count); | |
1054 | */ | |
1055 | int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node, | |
1056 | struct i40iw_puda_buf *sqbuf, | |
1057 | enum i40iw_timer_type type, | |
1058 | int send_retrans, | |
1059 | int close_when_complete) | |
1060 | { | |
1061 | struct i40iw_sc_dev *dev = cm_node->dev; | |
1062 | struct i40iw_cm_core *cm_core = cm_node->cm_core; | |
1063 | struct i40iw_timer_entry *new_send; | |
1064 | int ret = 0; | |
1065 | u32 was_timer_set; | |
1066 | unsigned long flags; | |
1067 | ||
1068 | new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); | |
1069 | if (!new_send) { | |
1070 | i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf); | |
1071 | return -ENOMEM; | |
1072 | } | |
1073 | new_send->retrycount = I40IW_DEFAULT_RETRYS; | |
1074 | new_send->retranscount = I40IW_DEFAULT_RETRANS; | |
1075 | new_send->sqbuf = sqbuf; | |
1076 | new_send->timetosend = jiffies; | |
1077 | new_send->type = type; | |
1078 | new_send->send_retrans = send_retrans; | |
1079 | new_send->close_when_complete = close_when_complete; | |
1080 | ||
1081 | if (type == I40IW_TIMER_TYPE_CLOSE) { | |
1082 | new_send->timetosend += (HZ / 10); | |
1083 | if (cm_node->close_entry) { | |
1084 | kfree(new_send); | |
1085 | i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf); | |
1086 | i40iw_pr_err("already close entry\n"); | |
1087 | return -EINVAL; | |
1088 | } | |
1089 | cm_node->close_entry = new_send; | |
1090 | } | |
1091 | ||
1092 | if (type == I40IW_TIMER_TYPE_SEND) { | |
1093 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | |
1094 | cm_node->send_entry = new_send; | |
1095 | atomic_inc(&cm_node->ref_count); | |
1096 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | |
1097 | new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT; | |
1098 | ||
1099 | atomic_inc(&sqbuf->refcount); | |
1100 | i40iw_puda_send_buf(dev->ilq, sqbuf); | |
1101 | if (!send_retrans) { | |
1102 | i40iw_cleanup_retrans_entry(cm_node); | |
1103 | if (close_when_complete) | |
1104 | i40iw_rem_ref_cm_node(cm_node); | |
1105 | return ret; | |
1106 | } | |
1107 | } | |
1108 | ||
1109 | spin_lock_irqsave(&cm_core->ht_lock, flags); | |
1110 | was_timer_set = timer_pending(&cm_core->tcp_timer); | |
1111 | ||
1112 | if (!was_timer_set) { | |
1113 | cm_core->tcp_timer.expires = new_send->timetosend; | |
1114 | add_timer(&cm_core->tcp_timer); | |
1115 | } | |
1116 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | |
1117 | ||
1118 | return ret; | |
1119 | } | |
1120 | ||
1121 | /** | |
1122 | * i40iw_retrans_expired - Could not rexmit the packet | |
1123 | * @cm_node: connection's node | |
1124 | */ | |
1125 | static void i40iw_retrans_expired(struct i40iw_cm_node *cm_node) | |
1126 | { | |
1127 | struct iw_cm_id *cm_id = cm_node->cm_id; | |
1128 | enum i40iw_cm_node_state state = cm_node->state; | |
1129 | ||
1130 | cm_node->state = I40IW_CM_STATE_CLOSED; | |
1131 | switch (state) { | |
1132 | case I40IW_CM_STATE_SYN_RCVD: | |
1133 | case I40IW_CM_STATE_CLOSING: | |
1134 | i40iw_rem_ref_cm_node(cm_node); | |
1135 | break; | |
1136 | case I40IW_CM_STATE_FIN_WAIT1: | |
1137 | case I40IW_CM_STATE_LAST_ACK: | |
1138 | if (cm_node->cm_id) | |
1139 | cm_id->rem_ref(cm_id); | |
1140 | i40iw_send_reset(cm_node); | |
1141 | break; | |
1142 | default: | |
1143 | atomic_inc(&cm_node->ref_count); | |
1144 | i40iw_send_reset(cm_node); | |
1145 | i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED); | |
1146 | break; | |
1147 | } | |
1148 | } | |
1149 | ||
1150 | /** | |
1151 | * i40iw_handle_close_entry - for handling retry/timeouts | |
1152 | * @cm_node: connection's node | |
1153 | * @rem_node: flag for remove cm_node | |
1154 | */ | |
1155 | static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node) | |
1156 | { | |
1157 | struct i40iw_timer_entry *close_entry = cm_node->close_entry; | |
1158 | struct iw_cm_id *cm_id = cm_node->cm_id; | |
1159 | struct i40iw_qp *iwqp; | |
1160 | unsigned long flags; | |
1161 | ||
1162 | if (!close_entry) | |
1163 | return; | |
1164 | iwqp = (struct i40iw_qp *)close_entry->sqbuf; | |
1165 | if (iwqp) { | |
1166 | spin_lock_irqsave(&iwqp->lock, flags); | |
1167 | if (iwqp->cm_id) { | |
1168 | iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; | |
1169 | iwqp->hw_iwarp_state = I40IW_QP_STATE_ERROR; | |
1170 | iwqp->last_aeq = I40IW_AE_RESET_SENT; | |
1171 | iwqp->ibqp_state = IB_QPS_ERR; | |
1172 | spin_unlock_irqrestore(&iwqp->lock, flags); | |
1173 | i40iw_cm_disconn(iwqp); | |
1174 | } else { | |
1175 | spin_unlock_irqrestore(&iwqp->lock, flags); | |
1176 | } | |
1177 | } else if (rem_node) { | |
1178 | /* TIME_WAIT state */ | |
1179 | i40iw_rem_ref_cm_node(cm_node); | |
1180 | } | |
1181 | if (cm_id) | |
1182 | cm_id->rem_ref(cm_id); | |
1183 | kfree(close_entry); | |
1184 | cm_node->close_entry = NULL; | |
1185 | } | |
1186 | ||
1187 | /** | |
1188 | * i40iw_cm_timer_tick - system's timer expired callback | |
1189 | * @pass: Pointing to cm_core | |
1190 | */ | |
1191 | static void i40iw_cm_timer_tick(unsigned long pass) | |
1192 | { | |
1193 | unsigned long nexttimeout = jiffies + I40IW_LONG_TIME; | |
1194 | struct i40iw_cm_node *cm_node; | |
1195 | struct i40iw_timer_entry *send_entry, *close_entry; | |
1196 | struct list_head *list_core_temp; | |
1197 | struct list_head *list_node; | |
1198 | struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass; | |
1199 | u32 settimer = 0; | |
1200 | unsigned long timetosend; | |
1201 | struct i40iw_sc_dev *dev; | |
1202 | unsigned long flags; | |
1203 | ||
1204 | struct list_head timer_list; | |
1205 | ||
1206 | INIT_LIST_HEAD(&timer_list); | |
1207 | spin_lock_irqsave(&cm_core->ht_lock, flags); | |
1208 | ||
1209 | list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) { | |
1210 | cm_node = container_of(list_node, struct i40iw_cm_node, list); | |
1211 | if (cm_node->close_entry || cm_node->send_entry) { | |
1212 | atomic_inc(&cm_node->ref_count); | |
1213 | list_add(&cm_node->timer_entry, &timer_list); | |
1214 | } | |
1215 | } | |
1216 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | |
1217 | ||
1218 | list_for_each_safe(list_node, list_core_temp, &timer_list) { | |
1219 | cm_node = container_of(list_node, | |
1220 | struct i40iw_cm_node, | |
1221 | timer_entry); | |
1222 | close_entry = cm_node->close_entry; | |
1223 | ||
1224 | if (close_entry) { | |
1225 | if (time_after(close_entry->timetosend, jiffies)) { | |
1226 | if (nexttimeout > close_entry->timetosend || | |
1227 | !settimer) { | |
1228 | nexttimeout = close_entry->timetosend; | |
1229 | settimer = 1; | |
1230 | } | |
1231 | } else { | |
1232 | i40iw_handle_close_entry(cm_node, 1); | |
1233 | } | |
1234 | } | |
1235 | ||
1236 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | |
1237 | ||
1238 | send_entry = cm_node->send_entry; | |
1239 | if (!send_entry) | |
1240 | goto done; | |
1241 | if (time_after(send_entry->timetosend, jiffies)) { | |
1242 | if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { | |
1243 | if ((nexttimeout > send_entry->timetosend) || | |
1244 | !settimer) { | |
1245 | nexttimeout = send_entry->timetosend; | |
1246 | settimer = 1; | |
1247 | } | |
1248 | } else { | |
1249 | i40iw_free_retrans_entry(cm_node); | |
1250 | } | |
1251 | goto done; | |
1252 | } | |
1253 | ||
1254 | if ((cm_node->state == I40IW_CM_STATE_OFFLOADED) || | |
1255 | (cm_node->state == I40IW_CM_STATE_CLOSED)) { | |
1256 | i40iw_free_retrans_entry(cm_node); | |
1257 | goto done; | |
1258 | } | |
1259 | ||
1260 | if (!send_entry->retranscount || !send_entry->retrycount) { | |
1261 | i40iw_free_retrans_entry(cm_node); | |
1262 | ||
1263 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | |
1264 | i40iw_retrans_expired(cm_node); | |
1265 | cm_node->state = I40IW_CM_STATE_CLOSED; | |
1266 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | |
1267 | goto done; | |
1268 | } | |
1269 | cm_node->cm_core->stats_pkt_retrans++; | |
1270 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | |
1271 | ||
1272 | dev = cm_node->dev; | |
1273 | atomic_inc(&send_entry->sqbuf->refcount); | |
1274 | i40iw_puda_send_buf(dev->ilq, send_entry->sqbuf); | |
1275 | spin_lock_irqsave(&cm_node->retrans_list_lock, flags); | |
1276 | if (send_entry->send_retrans) { | |
1277 | send_entry->retranscount--; | |
1278 | timetosend = (I40IW_RETRY_TIMEOUT << | |
1279 | (I40IW_DEFAULT_RETRANS - | |
1280 | send_entry->retranscount)); | |
1281 | ||
1282 | send_entry->timetosend = jiffies + | |
1283 | min(timetosend, I40IW_MAX_TIMEOUT); | |
1284 | if (nexttimeout > send_entry->timetosend || !settimer) { | |
1285 | nexttimeout = send_entry->timetosend; | |
1286 | settimer = 1; | |
1287 | } | |
1288 | } else { | |
1289 | int close_when_complete; | |
1290 | ||
1291 | close_when_complete = send_entry->close_when_complete; | |
1292 | i40iw_debug(cm_node->dev, | |
1293 | I40IW_DEBUG_CM, | |
1294 | "cm_node=%p state=%d\n", | |
1295 | cm_node, | |
1296 | cm_node->state); | |
1297 | i40iw_free_retrans_entry(cm_node); | |
1298 | if (close_when_complete) | |
1299 | i40iw_rem_ref_cm_node(cm_node); | |
1300 | } | |
1301 | done: | |
1302 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | |
1303 | i40iw_rem_ref_cm_node(cm_node); | |
1304 | } | |
1305 | ||
1306 | if (settimer) { | |
1307 | spin_lock_irqsave(&cm_core->ht_lock, flags); | |
1308 | if (!timer_pending(&cm_core->tcp_timer)) { | |
1309 | cm_core->tcp_timer.expires = nexttimeout; | |
1310 | add_timer(&cm_core->tcp_timer); | |
1311 | } | |
1312 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | |
1313 | } | |
1314 | } | |
1315 | ||
1316 | /** | |
1317 | * i40iw_send_syn - send SYN packet | |
1318 | * @cm_node: connection's node | |
1319 | * @sendack: flag to set ACK bit or not | |
1320 | */ | |
1321 | int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack) | |
1322 | { | |
1323 | struct i40iw_puda_buf *sqbuf; | |
1324 | int flags = SET_SYN; | |
1325 | char optionsbuffer[sizeof(struct option_mss) + | |
1326 | sizeof(struct option_windowscale) + | |
1327 | sizeof(struct option_base) + TCP_OPTIONS_PADDING]; | |
1328 | struct i40iw_kmem_info opts; | |
1329 | ||
1330 | int optionssize = 0; | |
1331 | /* Sending MSS option */ | |
1332 | union all_known_options *options; | |
1333 | ||
1334 | opts.addr = optionsbuffer; | |
1335 | if (!cm_node) { | |
1336 | i40iw_pr_err("no cm_node\n"); | |
1337 | return -EINVAL; | |
1338 | } | |
1339 | ||
1340 | options = (union all_known_options *)&optionsbuffer[optionssize]; | |
1341 | options->as_mss.optionnum = OPTION_NUMBER_MSS; | |
1342 | options->as_mss.length = sizeof(struct option_mss); | |
1343 | options->as_mss.mss = htons(cm_node->tcp_cntxt.mss); | |
1344 | optionssize += sizeof(struct option_mss); | |
1345 | ||
1346 | options = (union all_known_options *)&optionsbuffer[optionssize]; | |
1347 | options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE; | |
1348 | options->as_windowscale.length = sizeof(struct option_windowscale); | |
1349 | options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale; | |
1350 | optionssize += sizeof(struct option_windowscale); | |
1351 | options = (union all_known_options *)&optionsbuffer[optionssize]; | |
1352 | options->as_end = OPTION_NUMBER_END; | |
1353 | optionssize += 1; | |
1354 | ||
1355 | if (sendack) | |
1356 | flags |= SET_ACK; | |
1357 | ||
1358 | opts.size = optionssize; | |
1359 | ||
1360 | sqbuf = i40iw_form_cm_frame(cm_node, &opts, NULL, NULL, flags); | |
1361 | if (!sqbuf) { | |
1362 | i40iw_pr_err("no sqbuf\n"); | |
1363 | return -1; | |
1364 | } | |
1365 | return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0); | |
1366 | } | |
1367 | ||
1368 | /** | |
1369 | * i40iw_send_ack - Send ACK packet | |
1370 | * @cm_node: connection's node | |
1371 | */ | |
1372 | static void i40iw_send_ack(struct i40iw_cm_node *cm_node) | |
1373 | { | |
1374 | struct i40iw_puda_buf *sqbuf; | |
1375 | ||
1376 | sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK); | |
1377 | if (sqbuf) | |
1378 | i40iw_puda_send_buf(cm_node->dev->ilq, sqbuf); | |
1379 | else | |
1380 | i40iw_pr_err("no sqbuf\n"); | |
1381 | } | |
1382 | ||
1383 | /** | |
1384 | * i40iw_send_fin - Send FIN pkt | |
1385 | * @cm_node: connection's node | |
1386 | */ | |
1387 | static int i40iw_send_fin(struct i40iw_cm_node *cm_node) | |
1388 | { | |
1389 | struct i40iw_puda_buf *sqbuf; | |
1390 | ||
1391 | sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN); | |
1392 | if (!sqbuf) { | |
1393 | i40iw_pr_err("no sqbuf\n"); | |
1394 | return -1; | |
1395 | } | |
1396 | return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0); | |
1397 | } | |
1398 | ||
1399 | /** | |
1400 | * i40iw_find_node - find a cm node that matches the reference cm node | |
1401 | * @cm_core: cm's core | |
1402 | * @rem_port: remote tcp port num | |
1403 | * @rem_addr: remote ip addr | |
1404 | * @loc_port: local tcp port num | |
1405 | * @loc_addr: loc ip addr | |
1406 | * @add_refcnt: flag to increment refcount of cm_node | |
1407 | */ | |
1408 | struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core, | |
1409 | u16 rem_port, | |
1410 | u32 *rem_addr, | |
1411 | u16 loc_port, | |
1412 | u32 *loc_addr, | |
1413 | bool add_refcnt) | |
1414 | { | |
1415 | struct list_head *hte; | |
1416 | struct i40iw_cm_node *cm_node; | |
1417 | unsigned long flags; | |
1418 | ||
1419 | hte = &cm_core->connected_nodes; | |
1420 | ||
1421 | /* walk list and find cm_node associated with this session ID */ | |
1422 | spin_lock_irqsave(&cm_core->ht_lock, flags); | |
1423 | list_for_each_entry(cm_node, hte, list) { | |
8d8cd0bf FL |
1424 | if (!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) && |
1425 | (cm_node->loc_port == loc_port) && | |
1426 | !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr)) && | |
1427 | (cm_node->rem_port == rem_port)) { | |
f27b4746 FL |
1428 | if (add_refcnt) |
1429 | atomic_inc(&cm_node->ref_count); | |
1430 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | |
1431 | return cm_node; | |
1432 | } | |
1433 | } | |
1434 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | |
1435 | ||
1436 | /* no owner node */ | |
1437 | return NULL; | |
1438 | } | |
1439 | ||
1440 | /** | |
1441 | * i40iw_find_listener - find a cm node listening on this addr-port pair | |
1442 | * @cm_core: cm's core | |
1443 | * @dst_port: listener tcp port num | |
1444 | * @dst_addr: listener ip addr | |
1445 | * @listener_state: state to match with listen node's | |
1446 | */ | |
1447 | static struct i40iw_cm_listener *i40iw_find_listener( | |
1448 | struct i40iw_cm_core *cm_core, | |
1449 | u32 *dst_addr, | |
1450 | u16 dst_port, | |
1451 | u16 vlan_id, | |
1452 | enum i40iw_cm_listener_state | |
8d8cd0bf | 1453 | listener_state) |
f27b4746 FL |
1454 | { |
1455 | struct i40iw_cm_listener *listen_node; | |
1456 | static const u32 ip_zero[4] = { 0, 0, 0, 0 }; | |
1457 | u32 listen_addr[4]; | |
1458 | u16 listen_port; | |
1459 | unsigned long flags; | |
1460 | ||
1461 | /* walk list and find cm_node associated with this session ID */ | |
1462 | spin_lock_irqsave(&cm_core->listen_list_lock, flags); | |
1463 | list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { | |
8d8cd0bf FL |
1464 | memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr)); |
1465 | listen_port = listen_node->loc_port; | |
f27b4746 FL |
1466 | /* compare node pair, return node handle if a match */ |
1467 | if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) || | |
1468 | !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) && | |
1469 | (listen_port == dst_port) && | |
1470 | (listener_state & listen_node->listener_state)) { | |
1471 | atomic_inc(&listen_node->ref_count); | |
1472 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | |
1473 | return listen_node; | |
1474 | } | |
1475 | } | |
1476 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | |
1477 | return NULL; | |
1478 | } | |
1479 | ||
1480 | /** | |
1481 | * i40iw_add_hte_node - add a cm node to the hash table | |
1482 | * @cm_core: cm's core | |
1483 | * @cm_node: connection's node | |
1484 | */ | |
1485 | static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core, | |
1486 | struct i40iw_cm_node *cm_node) | |
1487 | { | |
1488 | struct list_head *hte; | |
1489 | unsigned long flags; | |
1490 | ||
1491 | if (!cm_node || !cm_core) { | |
1492 | i40iw_pr_err("cm_node or cm_core == NULL\n"); | |
1493 | return; | |
1494 | } | |
1495 | spin_lock_irqsave(&cm_core->ht_lock, flags); | |
1496 | ||
1497 | /* get a handle on the hash table element (list head for this slot) */ | |
1498 | hte = &cm_core->connected_nodes; | |
1499 | list_add_tail(&cm_node->list, hte); | |
1500 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | |
1501 | } | |
1502 | ||
1503 | /** | |
1504 | * listen_port_in_use - determine if port is in use | |
1505 | * @port: Listen port number | |
1506 | */ | |
1507 | static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port) | |
1508 | { | |
1509 | struct i40iw_cm_listener *listen_node; | |
1510 | unsigned long flags; | |
1511 | bool ret = false; | |
1512 | ||
1513 | spin_lock_irqsave(&cm_core->listen_list_lock, flags); | |
1514 | list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { | |
8d8cd0bf | 1515 | if (listen_node->loc_port == port) { |
f27b4746 FL |
1516 | ret = true; |
1517 | break; | |
1518 | } | |
1519 | } | |
1520 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | |
1521 | return ret; | |
1522 | } | |
1523 | ||
1524 | /** | |
1525 | * i40iw_del_multiple_qhash - Remove qhash and child listens | |
1526 | * @iwdev: iWarp device | |
1527 | * @cm_info: CM info for parent listen node | |
1528 | * @cm_parent_listen_node: The parent listen node | |
1529 | */ | |
1530 | static enum i40iw_status_code i40iw_del_multiple_qhash( | |
1531 | struct i40iw_device *iwdev, | |
1532 | struct i40iw_cm_info *cm_info, | |
1533 | struct i40iw_cm_listener *cm_parent_listen_node) | |
1534 | { | |
1535 | struct i40iw_cm_listener *child_listen_node; | |
1536 | enum i40iw_status_code ret = I40IW_ERR_CONFIG; | |
1537 | struct list_head *pos, *tpos; | |
1538 | unsigned long flags; | |
1539 | ||
1540 | spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags); | |
1541 | list_for_each_safe(pos, tpos, &cm_parent_listen_node->child_listen_list) { | |
1542 | child_listen_node = list_entry(pos, struct i40iw_cm_listener, child_listen_list); | |
1543 | if (child_listen_node->ipv4) | |
1544 | i40iw_debug(&iwdev->sc_dev, | |
1545 | I40IW_DEBUG_CM, | |
1546 | "removing child listen for IP=%pI4, port=%d, vlan=%d\n", | |
1547 | child_listen_node->loc_addr, | |
1548 | child_listen_node->loc_port, | |
1549 | child_listen_node->vlan_id); | |
1550 | else | |
1551 | i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, | |
1552 | "removing child listen for IP=%pI6, port=%d, vlan=%d\n", | |
1553 | child_listen_node->loc_addr, | |
1554 | child_listen_node->loc_port, | |
1555 | child_listen_node->vlan_id); | |
1556 | list_del(pos); | |
f27b4746 FL |
1557 | memcpy(cm_info->loc_addr, child_listen_node->loc_addr, |
1558 | sizeof(cm_info->loc_addr)); | |
1559 | cm_info->vlan_id = child_listen_node->vlan_id; | |
e5e74b61 MI |
1560 | if (child_listen_node->qhash_set) { |
1561 | ret = i40iw_manage_qhash(iwdev, cm_info, | |
1562 | I40IW_QHASH_TYPE_TCP_SYN, | |
1563 | I40IW_QHASH_MANAGE_TYPE_DELETE, | |
1564 | NULL, false); | |
1565 | child_listen_node->qhash_set = false; | |
1566 | } else { | |
1567 | ret = I40IW_SUCCESS; | |
1568 | } | |
f27b4746 FL |
1569 | i40iw_debug(&iwdev->sc_dev, |
1570 | I40IW_DEBUG_CM, | |
1571 | "freed pointer = %p\n", | |
1572 | child_listen_node); | |
5ec11ed2 MI |
1573 | kfree(child_listen_node); |
1574 | cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++; | |
f27b4746 FL |
1575 | } |
1576 | spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags); | |
1577 | ||
1578 | return ret; | |
1579 | } | |
1580 | ||
1581 | /** | |
1582 | * i40iw_netdev_vlan_ipv6 - Gets the netdev and mac | |
1583 | * @addr: local IPv6 address | |
1584 | * @vlan_id: vlan id for the given IPv6 address | |
1585 | * @mac: mac address for the given IPv6 address | |
1586 | * | |
1587 | * Returns the net_device of the IPv6 address and also sets the | |
1588 | * vlan id and mac for that address. | |
1589 | */ | |
1590 | static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac) | |
1591 | { | |
1592 | struct net_device *ip_dev = NULL; | |
f27b4746 FL |
1593 | struct in6_addr laddr6; |
1594 | ||
5ebcb0ff HO |
1595 | if (!IS_ENABLED(CONFIG_IPV6)) |
1596 | return NULL; | |
f27b4746 FL |
1597 | i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr); |
1598 | if (vlan_id) | |
1599 | *vlan_id = I40IW_NO_VLAN; | |
1600 | if (mac) | |
1601 | eth_zero_addr(mac); | |
1602 | rcu_read_lock(); | |
1603 | for_each_netdev_rcu(&init_net, ip_dev) { | |
1604 | if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) { | |
1605 | if (vlan_id) | |
1606 | *vlan_id = rdma_vlan_dev_vlan_id(ip_dev); | |
1607 | if (ip_dev->dev_addr && mac) | |
1608 | ether_addr_copy(mac, ip_dev->dev_addr); | |
1609 | break; | |
1610 | } | |
1611 | } | |
1612 | rcu_read_unlock(); | |
f27b4746 FL |
1613 | return ip_dev; |
1614 | } | |
1615 | ||
1616 | /** | |
1617 | * i40iw_get_vlan_ipv4 - Returns the vlan_id for IPv4 address | |
1618 | * @addr: local IPv4 address | |
1619 | */ | |
1620 | static u16 i40iw_get_vlan_ipv4(u32 *addr) | |
1621 | { | |
1622 | struct net_device *netdev; | |
1623 | u16 vlan_id = I40IW_NO_VLAN; | |
1624 | ||
1625 | netdev = ip_dev_find(&init_net, htonl(addr[0])); | |
1626 | if (netdev) { | |
1627 | vlan_id = rdma_vlan_dev_vlan_id(netdev); | |
1628 | dev_put(netdev); | |
1629 | } | |
1630 | return vlan_id; | |
1631 | } | |
1632 | ||
1633 | /** | |
1634 | * i40iw_add_mqh_6 - Adds multiple qhashes for IPv6 | |
1635 | * @iwdev: iWarp device | |
1636 | * @cm_info: CM info for parent listen node | |
1637 | * @cm_parent_listen_node: The parent listen node | |
1638 | * | |
1639 | * Adds a qhash and a child listen node for every IPv6 address | |
1640 | * on the adapter and adds the associated qhash filter | |
1641 | */ | |
1642 | static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev, | |
1643 | struct i40iw_cm_info *cm_info, | |
1644 | struct i40iw_cm_listener *cm_parent_listen_node) | |
1645 | { | |
1646 | struct net_device *ip_dev; | |
1647 | struct inet6_dev *idev; | |
a05e1513 | 1648 | struct inet6_ifaddr *ifp, *tmp; |
f27b4746 FL |
1649 | enum i40iw_status_code ret = 0; |
1650 | struct i40iw_cm_listener *child_listen_node; | |
1651 | unsigned long flags; | |
1652 | ||
1653 | rtnl_lock(); | |
1654 | for_each_netdev_rcu(&init_net, ip_dev) { | |
1655 | if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) && | |
1656 | (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) || | |
1657 | (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) { | |
1658 | idev = __in6_dev_get(ip_dev); | |
1659 | if (!idev) { | |
1660 | i40iw_pr_err("idev == NULL\n"); | |
1661 | break; | |
1662 | } | |
a05e1513 | 1663 | list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) { |
f27b4746 FL |
1664 | i40iw_debug(&iwdev->sc_dev, |
1665 | I40IW_DEBUG_CM, | |
1666 | "IP=%pI6, vlan_id=%d, MAC=%pM\n", | |
1667 | &ifp->addr, | |
1668 | rdma_vlan_dev_vlan_id(ip_dev), | |
1669 | ip_dev->dev_addr); | |
1670 | child_listen_node = | |
1671 | kzalloc(sizeof(*child_listen_node), GFP_ATOMIC); | |
1672 | i40iw_debug(&iwdev->sc_dev, | |
1673 | I40IW_DEBUG_CM, | |
1674 | "Allocating child listener %p\n", | |
1675 | child_listen_node); | |
1676 | if (!child_listen_node) { | |
1677 | i40iw_pr_err("listener memory allocation\n"); | |
1678 | ret = I40IW_ERR_NO_MEMORY; | |
1679 | goto exit; | |
1680 | } | |
1681 | cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev); | |
1682 | cm_parent_listen_node->vlan_id = cm_info->vlan_id; | |
1683 | ||
1684 | memcpy(child_listen_node, cm_parent_listen_node, | |
1685 | sizeof(*child_listen_node)); | |
1686 | ||
1687 | i40iw_copy_ip_ntohl(child_listen_node->loc_addr, | |
1688 | ifp->addr.in6_u.u6_addr32); | |
f27b4746 FL |
1689 | memcpy(cm_info->loc_addr, child_listen_node->loc_addr, |
1690 | sizeof(cm_info->loc_addr)); | |
1691 | ||
1692 | ret = i40iw_manage_qhash(iwdev, cm_info, | |
1693 | I40IW_QHASH_TYPE_TCP_SYN, | |
1694 | I40IW_QHASH_MANAGE_TYPE_ADD, | |
1695 | NULL, true); | |
1696 | if (!ret) { | |
e5e74b61 | 1697 | child_listen_node->qhash_set = true; |
f27b4746 FL |
1698 | spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags); |
1699 | list_add(&child_listen_node->child_listen_list, | |
1700 | &cm_parent_listen_node->child_listen_list); | |
1701 | spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags); | |
1702 | cm_parent_listen_node->cm_core->stats_listen_nodes_created++; | |
1703 | } else { | |
1704 | kfree(child_listen_node); | |
1705 | } | |
1706 | } | |
1707 | } | |
1708 | } | |
1709 | exit: | |
1710 | rtnl_unlock(); | |
1711 | return ret; | |
1712 | } | |
1713 | ||
1714 | /** | |
1715 | * i40iw_add_mqh_4 - Adds multiple qhashes for IPv4 | |
1716 | * @iwdev: iWarp device | |
1717 | * @cm_info: CM info for parent listen node | |
1718 | * @cm_parent_listen_node: The parent listen node | |
1719 | * | |
1720 | * Adds a qhash and a child listen node for every IPv4 address | |
1721 | * on the adapter and adds the associated qhash filter | |
1722 | */ | |
1723 | static enum i40iw_status_code i40iw_add_mqh_4( | |
1724 | struct i40iw_device *iwdev, | |
1725 | struct i40iw_cm_info *cm_info, | |
1726 | struct i40iw_cm_listener *cm_parent_listen_node) | |
1727 | { | |
1728 | struct net_device *dev; | |
1729 | struct in_device *idev; | |
1730 | struct i40iw_cm_listener *child_listen_node; | |
1731 | enum i40iw_status_code ret = 0; | |
1732 | unsigned long flags; | |
1733 | ||
1734 | rtnl_lock(); | |
1735 | for_each_netdev(&init_net, dev) { | |
1736 | if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) && | |
1737 | (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) || | |
1738 | (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) { | |
1739 | idev = in_dev_get(dev); | |
1740 | for_ifa(idev) { | |
1741 | i40iw_debug(&iwdev->sc_dev, | |
1742 | I40IW_DEBUG_CM, | |
1743 | "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n", | |
1744 | &ifa->ifa_address, | |
1745 | rdma_vlan_dev_vlan_id(dev), | |
1746 | dev->dev_addr); | |
1747 | child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC); | |
1748 | cm_parent_listen_node->cm_core->stats_listen_nodes_created++; | |
1749 | i40iw_debug(&iwdev->sc_dev, | |
1750 | I40IW_DEBUG_CM, | |
1751 | "Allocating child listener %p\n", | |
1752 | child_listen_node); | |
1753 | if (!child_listen_node) { | |
1754 | i40iw_pr_err("listener memory allocation\n"); | |
1755 | in_dev_put(idev); | |
1756 | ret = I40IW_ERR_NO_MEMORY; | |
1757 | goto exit; | |
1758 | } | |
1759 | cm_info->vlan_id = rdma_vlan_dev_vlan_id(dev); | |
1760 | cm_parent_listen_node->vlan_id = cm_info->vlan_id; | |
1761 | memcpy(child_listen_node, | |
1762 | cm_parent_listen_node, | |
1763 | sizeof(*child_listen_node)); | |
1764 | ||
1765 | child_listen_node->loc_addr[0] = ntohl(ifa->ifa_address); | |
f27b4746 FL |
1766 | memcpy(cm_info->loc_addr, child_listen_node->loc_addr, |
1767 | sizeof(cm_info->loc_addr)); | |
1768 | ||
1769 | ret = i40iw_manage_qhash(iwdev, | |
1770 | cm_info, | |
1771 | I40IW_QHASH_TYPE_TCP_SYN, | |
1772 | I40IW_QHASH_MANAGE_TYPE_ADD, | |
1773 | NULL, | |
1774 | true); | |
1775 | if (!ret) { | |
e5e74b61 | 1776 | child_listen_node->qhash_set = true; |
f27b4746 FL |
1777 | spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags); |
1778 | list_add(&child_listen_node->child_listen_list, | |
1779 | &cm_parent_listen_node->child_listen_list); | |
1780 | spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags); | |
1781 | } else { | |
1782 | kfree(child_listen_node); | |
1783 | cm_parent_listen_node->cm_core->stats_listen_nodes_created--; | |
1784 | } | |
1785 | } | |
1786 | endfor_ifa(idev); | |
1787 | in_dev_put(idev); | |
1788 | } | |
1789 | } | |
1790 | exit: | |
1791 | rtnl_unlock(); | |
1792 | return ret; | |
1793 | } | |
1794 | ||
1795 | /** | |
1796 | * i40iw_dec_refcnt_listen - delete listener and associated cm nodes | |
1797 | * @cm_core: cm's core | |
1798 | * @free_hanging_nodes: to free associated cm_nodes | |
1799 | * @apbvt_del: flag to delete the apbvt | |
1800 | */ | |
1801 | static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core, | |
1802 | struct i40iw_cm_listener *listener, | |
1803 | int free_hanging_nodes, bool apbvt_del) | |
1804 | { | |
1805 | int ret = -EINVAL; | |
1806 | int err = 0; | |
1807 | struct list_head *list_pos; | |
1808 | struct list_head *list_temp; | |
1809 | struct i40iw_cm_node *cm_node; | |
1810 | struct list_head reset_list; | |
1811 | struct i40iw_cm_info nfo; | |
1812 | struct i40iw_cm_node *loopback; | |
1813 | enum i40iw_cm_node_state old_state; | |
1814 | unsigned long flags; | |
1815 | ||
1816 | /* free non-accelerated child nodes for this listener */ | |
1817 | INIT_LIST_HEAD(&reset_list); | |
1818 | if (free_hanging_nodes) { | |
1819 | spin_lock_irqsave(&cm_core->ht_lock, flags); | |
1820 | list_for_each_safe(list_pos, list_temp, &cm_core->connected_nodes) { | |
1821 | cm_node = container_of(list_pos, struct i40iw_cm_node, list); | |
1822 | if ((cm_node->listener == listener) && !cm_node->accelerated) { | |
1823 | atomic_inc(&cm_node->ref_count); | |
1824 | list_add(&cm_node->reset_entry, &reset_list); | |
1825 | } | |
1826 | } | |
1827 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | |
1828 | } | |
1829 | ||
1830 | list_for_each_safe(list_pos, list_temp, &reset_list) { | |
1831 | cm_node = container_of(list_pos, struct i40iw_cm_node, reset_entry); | |
1832 | loopback = cm_node->loopbackpartner; | |
1833 | if (cm_node->state >= I40IW_CM_STATE_FIN_WAIT1) { | |
1834 | i40iw_rem_ref_cm_node(cm_node); | |
1835 | } else { | |
1836 | if (!loopback) { | |
1837 | i40iw_cleanup_retrans_entry(cm_node); | |
1838 | err = i40iw_send_reset(cm_node); | |
1839 | if (err) { | |
1840 | cm_node->state = I40IW_CM_STATE_CLOSED; | |
1841 | i40iw_pr_err("send reset\n"); | |
1842 | } else { | |
1843 | old_state = cm_node->state; | |
1844 | cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED; | |
1845 | if (old_state != I40IW_CM_STATE_MPAREQ_RCVD) | |
1846 | i40iw_rem_ref_cm_node(cm_node); | |
1847 | } | |
1848 | } else { | |
1849 | struct i40iw_cm_event event; | |
1850 | ||
1851 | event.cm_node = loopback; | |
1852 | memcpy(event.cm_info.rem_addr, | |
1853 | loopback->rem_addr, sizeof(event.cm_info.rem_addr)); | |
1854 | memcpy(event.cm_info.loc_addr, | |
1855 | loopback->loc_addr, sizeof(event.cm_info.loc_addr)); | |
1856 | event.cm_info.rem_port = loopback->rem_port; | |
1857 | event.cm_info.loc_port = loopback->loc_port; | |
1858 | event.cm_info.cm_id = loopback->cm_id; | |
1859 | event.cm_info.ipv4 = loopback->ipv4; | |
1860 | atomic_inc(&loopback->ref_count); | |
1861 | loopback->state = I40IW_CM_STATE_CLOSED; | |
1862 | i40iw_event_connect_error(&event); | |
1863 | cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED; | |
1864 | i40iw_rem_ref_cm_node(cm_node); | |
1865 | } | |
1866 | } | |
1867 | } | |
1868 | ||
1869 | if (!atomic_dec_return(&listener->ref_count)) { | |
1870 | spin_lock_irqsave(&cm_core->listen_list_lock, flags); | |
1871 | list_del(&listener->list); | |
1872 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | |
1873 | ||
1874 | if (listener->iwdev) { | |
8d8cd0bf | 1875 | if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port)) |
f27b4746 | 1876 | i40iw_manage_apbvt(listener->iwdev, |
8d8cd0bf | 1877 | listener->loc_port, |
f27b4746 FL |
1878 | I40IW_MANAGE_APBVT_DEL); |
1879 | ||
1880 | memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr)); | |
f27b4746 | 1881 | nfo.loc_port = listener->loc_port; |
f27b4746 FL |
1882 | nfo.ipv4 = listener->ipv4; |
1883 | nfo.vlan_id = listener->vlan_id; | |
0fc2dc58 | 1884 | nfo.user_pri = listener->user_pri; |
f27b4746 | 1885 | |
f27b4746 FL |
1886 | if (!list_empty(&listener->child_listen_list)) { |
1887 | i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener); | |
1888 | } else { | |
1889 | if (listener->qhash_set) | |
1890 | i40iw_manage_qhash(listener->iwdev, | |
1891 | &nfo, | |
1892 | I40IW_QHASH_TYPE_TCP_SYN, | |
1893 | I40IW_QHASH_MANAGE_TYPE_DELETE, | |
1894 | NULL, | |
1895 | false); | |
1896 | } | |
1897 | } | |
1898 | ||
1899 | cm_core->stats_listen_destroyed++; | |
1900 | kfree(listener); | |
1901 | cm_core->stats_listen_nodes_destroyed++; | |
1902 | listener = NULL; | |
1903 | ret = 0; | |
1904 | } | |
1905 | ||
1906 | if (listener) { | |
1907 | if (atomic_read(&listener->pend_accepts_cnt) > 0) | |
1908 | i40iw_debug(cm_core->dev, | |
1909 | I40IW_DEBUG_CM, | |
1910 | "%s: listener (%p) pending accepts=%u\n", | |
1911 | __func__, | |
1912 | listener, | |
1913 | atomic_read(&listener->pend_accepts_cnt)); | |
1914 | } | |
1915 | ||
1916 | return ret; | |
1917 | } | |
1918 | ||
1919 | /** | |
1920 | * i40iw_cm_del_listen - delete a linstener | |
1921 | * @cm_core: cm's core | |
1922 | * @listener: passive connection's listener | |
1923 | * @apbvt_del: flag to delete apbvt | |
1924 | */ | |
1925 | static int i40iw_cm_del_listen(struct i40iw_cm_core *cm_core, | |
1926 | struct i40iw_cm_listener *listener, | |
1927 | bool apbvt_del) | |
1928 | { | |
1929 | listener->listener_state = I40IW_CM_LISTENER_PASSIVE_STATE; | |
1930 | listener->cm_id = NULL; /* going to be destroyed pretty soon */ | |
1931 | return i40iw_dec_refcnt_listen(cm_core, listener, 1, apbvt_del); | |
1932 | } | |
1933 | ||
1934 | /** | |
1935 | * i40iw_addr_resolve_neigh - resolve neighbor address | |
1936 | * @iwdev: iwarp device structure | |
1937 | * @src_ip: local ip address | |
1938 | * @dst_ip: remote ip address | |
1939 | * @arpindex: if there is an arp entry | |
1940 | */ | |
1941 | static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev, | |
1942 | u32 src_ip, | |
1943 | u32 dst_ip, | |
1944 | int arpindex) | |
1945 | { | |
1946 | struct rtable *rt; | |
1947 | struct neighbour *neigh; | |
1948 | int rc = arpindex; | |
1949 | struct net_device *netdev = iwdev->netdev; | |
1950 | __be32 dst_ipaddr = htonl(dst_ip); | |
1951 | __be32 src_ipaddr = htonl(src_ip); | |
1952 | ||
1953 | rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0); | |
1954 | if (IS_ERR(rt)) { | |
1955 | i40iw_pr_err("ip_route_output\n"); | |
1956 | return rc; | |
1957 | } | |
1958 | ||
1959 | if (netif_is_bond_slave(netdev)) | |
1960 | netdev = netdev_master_upper_dev_get(netdev); | |
1961 | ||
1962 | neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr); | |
1963 | ||
1964 | rcu_read_lock(); | |
1965 | if (neigh) { | |
1966 | if (neigh->nud_state & NUD_VALID) { | |
1967 | if (arpindex >= 0) { | |
1968 | if (ether_addr_equal(iwdev->arp_table[arpindex].mac_addr, | |
1969 | neigh->ha)) | |
1970 | /* Mac address same as arp table */ | |
1971 | goto resolve_neigh_exit; | |
1972 | i40iw_manage_arp_cache(iwdev, | |
1973 | iwdev->arp_table[arpindex].mac_addr, | |
1974 | &dst_ip, | |
1975 | true, | |
1976 | I40IW_ARP_DELETE); | |
1977 | } | |
1978 | ||
1979 | i40iw_manage_arp_cache(iwdev, neigh->ha, &dst_ip, true, I40IW_ARP_ADD); | |
1980 | rc = i40iw_arp_table(iwdev, &dst_ip, true, NULL, I40IW_ARP_RESOLVE); | |
1981 | } else { | |
1982 | neigh_event_send(neigh, NULL); | |
1983 | } | |
1984 | } | |
1985 | resolve_neigh_exit: | |
1986 | ||
1987 | rcu_read_unlock(); | |
1988 | if (neigh) | |
1989 | neigh_release(neigh); | |
1990 | ||
1991 | ip_rt_put(rt); | |
1992 | return rc; | |
1993 | } | |
1994 | ||
1995 | /** | |
1996 | * i40iw_get_dst_ipv6 | |
1997 | */ | |
f27b4746 FL |
1998 | static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr, |
1999 | struct sockaddr_in6 *dst_addr) | |
2000 | { | |
2001 | struct dst_entry *dst; | |
2002 | struct flowi6 fl6; | |
2003 | ||
2004 | memset(&fl6, 0, sizeof(fl6)); | |
2005 | fl6.daddr = dst_addr->sin6_addr; | |
2006 | fl6.saddr = src_addr->sin6_addr; | |
2007 | if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) | |
2008 | fl6.flowi6_oif = dst_addr->sin6_scope_id; | |
2009 | ||
2010 | dst = ip6_route_output(&init_net, NULL, &fl6); | |
2011 | return dst; | |
2012 | } | |
f27b4746 FL |
2013 | |
2014 | /** | |
2015 | * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address | |
2016 | * @iwdev: iwarp device structure | |
2017 | * @dst_ip: remote ip address | |
2018 | * @arpindex: if there is an arp entry | |
2019 | */ | |
f27b4746 FL |
2020 | static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, |
2021 | u32 *src, | |
2022 | u32 *dest, | |
2023 | int arpindex) | |
2024 | { | |
2025 | struct neighbour *neigh; | |
2026 | int rc = arpindex; | |
2027 | struct net_device *netdev = iwdev->netdev; | |
2028 | struct dst_entry *dst; | |
2029 | struct sockaddr_in6 dst_addr; | |
2030 | struct sockaddr_in6 src_addr; | |
2031 | ||
2032 | memset(&dst_addr, 0, sizeof(dst_addr)); | |
2033 | dst_addr.sin6_family = AF_INET6; | |
2034 | i40iw_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest); | |
2035 | memset(&src_addr, 0, sizeof(src_addr)); | |
2036 | src_addr.sin6_family = AF_INET6; | |
2037 | i40iw_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src); | |
2038 | dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr); | |
2039 | if (!dst || dst->error) { | |
2040 | if (dst) { | |
2041 | dst_release(dst); | |
2042 | i40iw_pr_err("ip6_route_output returned dst->error = %d\n", | |
2043 | dst->error); | |
2044 | } | |
2045 | return rc; | |
2046 | } | |
2047 | ||
2048 | if (netif_is_bond_slave(netdev)) | |
2049 | netdev = netdev_master_upper_dev_get(netdev); | |
2050 | ||
2051 | neigh = dst_neigh_lookup(dst, &dst_addr); | |
2052 | ||
2053 | rcu_read_lock(); | |
2054 | if (neigh) { | |
2055 | i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "dst_neigh_lookup MAC=%pM\n", neigh->ha); | |
2056 | if (neigh->nud_state & NUD_VALID) { | |
2057 | if (arpindex >= 0) { | |
2058 | if (ether_addr_equal | |
2059 | (iwdev->arp_table[arpindex].mac_addr, | |
2060 | neigh->ha)) { | |
2061 | /* Mac address same as in arp table */ | |
2062 | goto resolve_neigh_exit6; | |
2063 | } | |
2064 | i40iw_manage_arp_cache(iwdev, | |
2065 | iwdev->arp_table[arpindex].mac_addr, | |
2066 | dest, | |
2067 | false, | |
2068 | I40IW_ARP_DELETE); | |
2069 | } | |
2070 | i40iw_manage_arp_cache(iwdev, | |
2071 | neigh->ha, | |
2072 | dest, | |
2073 | false, | |
2074 | I40IW_ARP_ADD); | |
2075 | rc = i40iw_arp_table(iwdev, | |
2076 | dest, | |
2077 | false, | |
2078 | NULL, | |
2079 | I40IW_ARP_RESOLVE); | |
2080 | } else { | |
2081 | neigh_event_send(neigh, NULL); | |
2082 | } | |
2083 | } | |
2084 | ||
2085 | resolve_neigh_exit6: | |
2086 | rcu_read_unlock(); | |
2087 | if (neigh) | |
2088 | neigh_release(neigh); | |
2089 | dst_release(dst); | |
2090 | return rc; | |
2091 | } | |
f27b4746 FL |
2092 | |
2093 | /** | |
2094 | * i40iw_ipv4_is_loopback - check if loopback | |
2095 | * @loc_addr: local addr to compare | |
2096 | * @rem_addr: remote address | |
2097 | */ | |
2098 | static bool i40iw_ipv4_is_loopback(u32 loc_addr, u32 rem_addr) | |
2099 | { | |
2100 | return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr); | |
2101 | } | |
2102 | ||
2103 | /** | |
2104 | * i40iw_ipv6_is_loopback - check if loopback | |
2105 | * @loc_addr: local addr to compare | |
2106 | * @rem_addr: remote address | |
2107 | */ | |
2108 | static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr) | |
2109 | { | |
2110 | struct in6_addr raddr6; | |
2111 | ||
2112 | i40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr); | |
f606d893 | 2113 | return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6); |
f27b4746 FL |
2114 | } |
2115 | ||
2116 | /** | |
2117 | * i40iw_make_cm_node - create a new instance of a cm node | |
2118 | * @cm_core: cm's core | |
2119 | * @iwdev: iwarp device structure | |
2120 | * @cm_info: quad info for connection | |
2121 | * @listener: passive connection's listener | |
2122 | */ | |
2123 | static struct i40iw_cm_node *i40iw_make_cm_node( | |
2124 | struct i40iw_cm_core *cm_core, | |
2125 | struct i40iw_device *iwdev, | |
2126 | struct i40iw_cm_info *cm_info, | |
2127 | struct i40iw_cm_listener *listener) | |
2128 | { | |
2129 | struct i40iw_cm_node *cm_node; | |
2130 | struct timespec ts; | |
2131 | int oldarpindex; | |
2132 | int arpindex; | |
2133 | struct net_device *netdev = iwdev->netdev; | |
2134 | ||
2135 | /* create an hte and cm_node for this instance */ | |
2136 | cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC); | |
2137 | if (!cm_node) | |
2138 | return NULL; | |
2139 | ||
2140 | /* set our node specific transport info */ | |
2141 | cm_node->ipv4 = cm_info->ipv4; | |
2142 | cm_node->vlan_id = cm_info->vlan_id; | |
0fc2dc58 HO |
2143 | if ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb) |
2144 | cm_node->vlan_id = 0; | |
7eb2bde7 | 2145 | cm_node->tos = cm_info->tos; |
0fc2dc58 | 2146 | cm_node->user_pri = cm_info->user_pri; |
7eb2bde7 SS |
2147 | if (listener) { |
2148 | if (listener->tos != cm_info->tos) | |
2149 | i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, | |
2150 | "application TOS[%d] and remote client TOS[%d] mismatch\n", | |
2151 | listener->tos, cm_info->tos); | |
2152 | cm_node->tos = max(listener->tos, cm_info->tos); | |
2153 | cm_node->user_pri = rt_tos2priority(cm_node->tos); | |
2154 | i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "listener: TOS:[%d] UP:[%d]\n", | |
2155 | cm_node->tos, cm_node->user_pri); | |
2156 | } | |
f27b4746 FL |
2157 | memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr)); |
2158 | memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr)); | |
f27b4746 FL |
2159 | cm_node->loc_port = cm_info->loc_port; |
2160 | cm_node->rem_port = cm_info->rem_port; | |
f27b4746 FL |
2161 | |
2162 | cm_node->mpa_frame_rev = iwdev->mpa_version; | |
2163 | cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; | |
2164 | cm_node->ird_size = I40IW_MAX_IRD_SIZE; | |
2165 | cm_node->ord_size = I40IW_MAX_ORD_SIZE; | |
2166 | ||
2167 | cm_node->listener = listener; | |
2168 | cm_node->cm_id = cm_info->cm_id; | |
2169 | ether_addr_copy(cm_node->loc_mac, netdev->dev_addr); | |
2170 | spin_lock_init(&cm_node->retrans_list_lock); | |
2171 | ||
2172 | atomic_set(&cm_node->ref_count, 1); | |
2173 | /* associate our parent CM core */ | |
2174 | cm_node->cm_core = cm_core; | |
2175 | cm_node->tcp_cntxt.loc_id = I40IW_CM_DEF_LOCAL_ID; | |
2176 | cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE; | |
2177 | cm_node->tcp_cntxt.rcv_wnd = | |
2178 | I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE; | |
2179 | ts = current_kernel_time(); | |
20c61f7e | 2180 | cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec; |
f27b4746 FL |
2181 | cm_node->tcp_cntxt.mss = iwdev->mss; |
2182 | ||
2183 | cm_node->iwdev = iwdev; | |
2184 | cm_node->dev = &iwdev->sc_dev; | |
2185 | ||
2186 | if ((cm_node->ipv4 && | |
2187 | i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) || | |
2188 | (!cm_node->ipv4 && i40iw_ipv6_is_loopback(cm_node->loc_addr, | |
2189 | cm_node->rem_addr))) { | |
2190 | arpindex = i40iw_arp_table(iwdev, | |
8d8cd0bf | 2191 | cm_node->rem_addr, |
f27b4746 FL |
2192 | false, |
2193 | NULL, | |
2194 | I40IW_ARP_RESOLVE); | |
2195 | } else { | |
2196 | oldarpindex = i40iw_arp_table(iwdev, | |
8d8cd0bf | 2197 | cm_node->rem_addr, |
f27b4746 FL |
2198 | false, |
2199 | NULL, | |
2200 | I40IW_ARP_RESOLVE); | |
2201 | if (cm_node->ipv4) | |
2202 | arpindex = i40iw_addr_resolve_neigh(iwdev, | |
8d8cd0bf FL |
2203 | cm_info->loc_addr[0], |
2204 | cm_info->rem_addr[0], | |
f27b4746 | 2205 | oldarpindex); |
2fe78571 | 2206 | else if (IS_ENABLED(CONFIG_IPV6)) |
f27b4746 | 2207 | arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev, |
8d8cd0bf FL |
2208 | cm_info->loc_addr, |
2209 | cm_info->rem_addr, | |
f27b4746 | 2210 | oldarpindex); |
2fe78571 AB |
2211 | else |
2212 | arpindex = -EINVAL; | |
f27b4746 FL |
2213 | } |
2214 | if (arpindex < 0) { | |
2215 | i40iw_pr_err("cm_node arpindex\n"); | |
2216 | kfree(cm_node); | |
2217 | return NULL; | |
2218 | } | |
2219 | ether_addr_copy(cm_node->rem_mac, iwdev->arp_table[arpindex].mac_addr); | |
2220 | i40iw_add_hte_node(cm_core, cm_node); | |
2221 | cm_core->stats_nodes_created++; | |
2222 | return cm_node; | |
2223 | } | |
2224 | ||
2225 | /** | |
2226 | * i40iw_rem_ref_cm_node - destroy an instance of a cm node | |
2227 | * @cm_node: connection's node | |
2228 | */ | |
2229 | static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node) | |
2230 | { | |
2231 | struct i40iw_cm_core *cm_core = cm_node->cm_core; | |
2232 | struct i40iw_qp *iwqp; | |
2233 | struct i40iw_cm_info nfo; | |
2234 | unsigned long flags; | |
2235 | ||
2236 | spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags); | |
2237 | if (atomic_dec_return(&cm_node->ref_count)) { | |
2238 | spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags); | |
2239 | return; | |
2240 | } | |
2241 | list_del(&cm_node->list); | |
2242 | spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags); | |
2243 | ||
2244 | /* if the node is destroyed before connection was accelerated */ | |
2245 | if (!cm_node->accelerated && cm_node->accept_pend) { | |
2246 | pr_err("node destroyed before established\n"); | |
2247 | atomic_dec(&cm_node->listener->pend_accepts_cnt); | |
2248 | } | |
2249 | if (cm_node->close_entry) | |
2250 | i40iw_handle_close_entry(cm_node, 0); | |
2251 | if (cm_node->listener) { | |
2252 | i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true); | |
2253 | } else { | |
20c61f7e | 2254 | if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) && |
1ad19f73 | 2255 | cm_node->apbvt_set) { |
f27b4746 | 2256 | i40iw_manage_apbvt(cm_node->iwdev, |
8d8cd0bf | 2257 | cm_node->loc_port, |
f27b4746 FL |
2258 | I40IW_MANAGE_APBVT_DEL); |
2259 | i40iw_get_addr_info(cm_node, &nfo); | |
f27b4746 FL |
2260 | if (cm_node->qhash_set) { |
2261 | i40iw_manage_qhash(cm_node->iwdev, | |
2262 | &nfo, | |
2263 | I40IW_QHASH_TYPE_TCP_ESTABLISHED, | |
2264 | I40IW_QHASH_MANAGE_TYPE_DELETE, | |
2265 | NULL, | |
2266 | false); | |
2267 | cm_node->qhash_set = 0; | |
2268 | } | |
2269 | } | |
2270 | } | |
2271 | ||
2272 | iwqp = cm_node->iwqp; | |
2273 | if (iwqp) { | |
2274 | iwqp->cm_node = NULL; | |
2275 | i40iw_rem_ref(&iwqp->ibqp); | |
2276 | cm_node->iwqp = NULL; | |
2277 | } else if (cm_node->qhash_set) { | |
2278 | i40iw_get_addr_info(cm_node, &nfo); | |
f27b4746 FL |
2279 | i40iw_manage_qhash(cm_node->iwdev, |
2280 | &nfo, | |
2281 | I40IW_QHASH_TYPE_TCP_ESTABLISHED, | |
2282 | I40IW_QHASH_MANAGE_TYPE_DELETE, | |
2283 | NULL, | |
2284 | false); | |
2285 | cm_node->qhash_set = 0; | |
2286 | } | |
2287 | ||
2288 | cm_node->cm_core->stats_nodes_destroyed++; | |
2289 | kfree(cm_node); | |
2290 | } | |
2291 | ||
2292 | /** | |
2293 | * i40iw_handle_fin_pkt - FIN packet received | |
2294 | * @cm_node: connection's node | |
2295 | */ | |
2296 | static void i40iw_handle_fin_pkt(struct i40iw_cm_node *cm_node) | |
2297 | { | |
2298 | u32 ret; | |
2299 | ||
2300 | switch (cm_node->state) { | |
2301 | case I40IW_CM_STATE_SYN_RCVD: | |
2302 | case I40IW_CM_STATE_SYN_SENT: | |
2303 | case I40IW_CM_STATE_ESTABLISHED: | |
2304 | case I40IW_CM_STATE_MPAREJ_RCVD: | |
2305 | cm_node->tcp_cntxt.rcv_nxt++; | |
2306 | i40iw_cleanup_retrans_entry(cm_node); | |
2307 | cm_node->state = I40IW_CM_STATE_LAST_ACK; | |
2308 | i40iw_send_fin(cm_node); | |
2309 | break; | |
2310 | case I40IW_CM_STATE_MPAREQ_SENT: | |
2311 | i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED); | |
2312 | cm_node->tcp_cntxt.rcv_nxt++; | |
2313 | i40iw_cleanup_retrans_entry(cm_node); | |
2314 | cm_node->state = I40IW_CM_STATE_CLOSED; | |
2315 | atomic_inc(&cm_node->ref_count); | |
2316 | i40iw_send_reset(cm_node); | |
2317 | break; | |
2318 | case I40IW_CM_STATE_FIN_WAIT1: | |
2319 | cm_node->tcp_cntxt.rcv_nxt++; | |
2320 | i40iw_cleanup_retrans_entry(cm_node); | |
2321 | cm_node->state = I40IW_CM_STATE_CLOSING; | |
2322 | i40iw_send_ack(cm_node); | |
2323 | /* | |
2324 | * Wait for ACK as this is simultaneous close. | |
2325 | * After we receive ACK, do not send anything. | |
2326 | * Just rm the node. | |
2327 | */ | |
2328 | break; | |
2329 | case I40IW_CM_STATE_FIN_WAIT2: | |
2330 | cm_node->tcp_cntxt.rcv_nxt++; | |
2331 | i40iw_cleanup_retrans_entry(cm_node); | |
2332 | cm_node->state = I40IW_CM_STATE_TIME_WAIT; | |
2333 | i40iw_send_ack(cm_node); | |
2334 | ret = | |
2335 | i40iw_schedule_cm_timer(cm_node, NULL, I40IW_TIMER_TYPE_CLOSE, 1, 0); | |
2336 | if (ret) | |
2337 | i40iw_pr_err("node %p state = %d\n", cm_node, cm_node->state); | |
2338 | break; | |
2339 | case I40IW_CM_STATE_TIME_WAIT: | |
2340 | cm_node->tcp_cntxt.rcv_nxt++; | |
2341 | i40iw_cleanup_retrans_entry(cm_node); | |
2342 | cm_node->state = I40IW_CM_STATE_CLOSED; | |
2343 | i40iw_rem_ref_cm_node(cm_node); | |
2344 | break; | |
2345 | case I40IW_CM_STATE_OFFLOADED: | |
2346 | default: | |
2347 | i40iw_pr_err("bad state node %p state = %d\n", cm_node, cm_node->state); | |
2348 | break; | |
2349 | } | |
2350 | } | |
2351 | ||
2352 | /** | |
2353 | * i40iw_handle_rst_pkt - process received RST packet | |
2354 | * @cm_node: connection's node | |
2355 | * @rbuf: receive buffer | |
2356 | */ | |
2357 | static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node, | |
2358 | struct i40iw_puda_buf *rbuf) | |
2359 | { | |
2360 | i40iw_cleanup_retrans_entry(cm_node); | |
2361 | switch (cm_node->state) { | |
2362 | case I40IW_CM_STATE_SYN_SENT: | |
2363 | case I40IW_CM_STATE_MPAREQ_SENT: | |
2364 | switch (cm_node->mpa_frame_rev) { | |
2365 | case IETF_MPA_V2: | |
2366 | cm_node->mpa_frame_rev = IETF_MPA_V1; | |
2367 | /* send a syn and goto syn sent state */ | |
2368 | cm_node->state = I40IW_CM_STATE_SYN_SENT; | |
2369 | if (i40iw_send_syn(cm_node, 0)) | |
2370 | i40iw_active_open_err(cm_node, false); | |
2371 | break; | |
2372 | case IETF_MPA_V1: | |
2373 | default: | |
2374 | i40iw_active_open_err(cm_node, false); | |
2375 | break; | |
2376 | } | |
2377 | break; | |
2378 | case I40IW_CM_STATE_MPAREQ_RCVD: | |
2379 | atomic_add_return(1, &cm_node->passive_state); | |
2380 | break; | |
2381 | case I40IW_CM_STATE_ESTABLISHED: | |
2382 | case I40IW_CM_STATE_SYN_RCVD: | |
2383 | case I40IW_CM_STATE_LISTENING: | |
2384 | i40iw_pr_err("Bad state state = %d\n", cm_node->state); | |
2385 | i40iw_passive_open_err(cm_node, false); | |
2386 | break; | |
2387 | case I40IW_CM_STATE_OFFLOADED: | |
2388 | i40iw_active_open_err(cm_node, false); | |
2389 | break; | |
2390 | case I40IW_CM_STATE_CLOSED: | |
2391 | break; | |
2392 | case I40IW_CM_STATE_FIN_WAIT2: | |
2393 | case I40IW_CM_STATE_FIN_WAIT1: | |
2394 | case I40IW_CM_STATE_LAST_ACK: | |
2395 | cm_node->cm_id->rem_ref(cm_node->cm_id); | |
2396 | case I40IW_CM_STATE_TIME_WAIT: | |
2397 | cm_node->state = I40IW_CM_STATE_CLOSED; | |
2398 | i40iw_rem_ref_cm_node(cm_node); | |
2399 | break; | |
2400 | default: | |
2401 | break; | |
2402 | } | |
2403 | } | |
2404 | ||
2405 | /** | |
2406 | * i40iw_handle_rcv_mpa - Process a recv'd mpa buffer | |
2407 | * @cm_node: connection's node | |
2408 | * @rbuf: receive buffer | |
2409 | */ | |
2410 | static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node, | |
2411 | struct i40iw_puda_buf *rbuf) | |
2412 | { | |
2413 | int ret; | |
2414 | int datasize = rbuf->datalen; | |
2415 | u8 *dataloc = rbuf->data; | |
2416 | ||
2417 | enum i40iw_cm_event_type type = I40IW_CM_EVENT_UNKNOWN; | |
2418 | u32 res_type; | |
2419 | ||
2420 | ret = i40iw_parse_mpa(cm_node, dataloc, &res_type, datasize); | |
2421 | if (ret) { | |
2422 | if (cm_node->state == I40IW_CM_STATE_MPAREQ_SENT) | |
2423 | i40iw_active_open_err(cm_node, true); | |
2424 | else | |
2425 | i40iw_passive_open_err(cm_node, true); | |
2426 | return; | |
2427 | } | |
2428 | ||
2429 | switch (cm_node->state) { | |
2430 | case I40IW_CM_STATE_ESTABLISHED: | |
2431 | if (res_type == I40IW_MPA_REQUEST_REJECT) | |
2432 | i40iw_pr_err("state for reject\n"); | |
2433 | cm_node->state = I40IW_CM_STATE_MPAREQ_RCVD; | |
2434 | type = I40IW_CM_EVENT_MPA_REQ; | |
2435 | i40iw_send_ack(cm_node); /* ACK received MPA request */ | |
2436 | atomic_set(&cm_node->passive_state, | |
2437 | I40IW_PASSIVE_STATE_INDICATED); | |
2438 | break; | |
2439 | case I40IW_CM_STATE_MPAREQ_SENT: | |
2440 | i40iw_cleanup_retrans_entry(cm_node); | |
2441 | if (res_type == I40IW_MPA_REQUEST_REJECT) { | |
2442 | type = I40IW_CM_EVENT_MPA_REJECT; | |
2443 | cm_node->state = I40IW_CM_STATE_MPAREJ_RCVD; | |
2444 | } else { | |
2445 | type = I40IW_CM_EVENT_CONNECTED; | |
2446 | cm_node->state = I40IW_CM_STATE_OFFLOADED; | |
2447 | i40iw_send_ack(cm_node); | |
2448 | } | |
2449 | break; | |
2450 | default: | |
2451 | pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state); | |
2452 | break; | |
2453 | } | |
2454 | i40iw_create_event(cm_node, type); | |
2455 | } | |
2456 | ||
2457 | /** | |
2458 | * i40iw_indicate_pkt_err - Send up err event to cm | |
2459 | * @cm_node: connection's node | |
2460 | */ | |
2461 | static void i40iw_indicate_pkt_err(struct i40iw_cm_node *cm_node) | |
2462 | { | |
2463 | switch (cm_node->state) { | |
2464 | case I40IW_CM_STATE_SYN_SENT: | |
2465 | case I40IW_CM_STATE_MPAREQ_SENT: | |
2466 | i40iw_active_open_err(cm_node, true); | |
2467 | break; | |
2468 | case I40IW_CM_STATE_ESTABLISHED: | |
2469 | case I40IW_CM_STATE_SYN_RCVD: | |
2470 | i40iw_passive_open_err(cm_node, true); | |
2471 | break; | |
2472 | case I40IW_CM_STATE_OFFLOADED: | |
2473 | default: | |
2474 | break; | |
2475 | } | |
2476 | } | |
2477 | ||
2478 | /** | |
2479 | * i40iw_check_syn - Check for error on received syn ack | |
2480 | * @cm_node: connection's node | |
2481 | * @tcph: pointer tcp header | |
2482 | */ | |
2483 | static int i40iw_check_syn(struct i40iw_cm_node *cm_node, struct tcphdr *tcph) | |
2484 | { | |
2485 | int err = 0; | |
2486 | ||
2487 | if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) { | |
2488 | err = 1; | |
2489 | i40iw_active_open_err(cm_node, true); | |
2490 | } | |
2491 | return err; | |
2492 | } | |
2493 | ||
2494 | /** | |
2495 | * i40iw_check_seq - check seq numbers if OK | |
2496 | * @cm_node: connection's node | |
2497 | * @tcph: pointer tcp header | |
2498 | */ | |
2499 | static int i40iw_check_seq(struct i40iw_cm_node *cm_node, struct tcphdr *tcph) | |
2500 | { | |
2501 | int err = 0; | |
2502 | u32 seq; | |
2503 | u32 ack_seq; | |
2504 | u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num; | |
2505 | u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt; | |
2506 | u32 rcv_wnd; | |
2507 | ||
2508 | seq = ntohl(tcph->seq); | |
2509 | ack_seq = ntohl(tcph->ack_seq); | |
2510 | rcv_wnd = cm_node->tcp_cntxt.rcv_wnd; | |
2511 | if (ack_seq != loc_seq_num) | |
2512 | err = -1; | |
2513 | else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd))) | |
2514 | err = -1; | |
2515 | if (err) { | |
2516 | i40iw_pr_err("seq number\n"); | |
2517 | i40iw_indicate_pkt_err(cm_node); | |
2518 | } | |
2519 | return err; | |
2520 | } | |
2521 | ||
2522 | /** | |
2523 | * i40iw_handle_syn_pkt - is for Passive node | |
2524 | * @cm_node: connection's node | |
2525 | * @rbuf: receive buffer | |
2526 | */ | |
2527 | static void i40iw_handle_syn_pkt(struct i40iw_cm_node *cm_node, | |
2528 | struct i40iw_puda_buf *rbuf) | |
2529 | { | |
2530 | struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; | |
2531 | int ret; | |
2532 | u32 inc_sequence; | |
2533 | int optionsize; | |
2534 | struct i40iw_cm_info nfo; | |
2535 | ||
2536 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); | |
2537 | inc_sequence = ntohl(tcph->seq); | |
2538 | ||
2539 | switch (cm_node->state) { | |
2540 | case I40IW_CM_STATE_SYN_SENT: | |
2541 | case I40IW_CM_STATE_MPAREQ_SENT: | |
2542 | /* Rcvd syn on active open connection */ | |
2543 | i40iw_active_open_err(cm_node, 1); | |
2544 | break; | |
2545 | case I40IW_CM_STATE_LISTENING: | |
2546 | /* Passive OPEN */ | |
2547 | if (atomic_read(&cm_node->listener->pend_accepts_cnt) > | |
2548 | cm_node->listener->backlog) { | |
2549 | cm_node->cm_core->stats_backlog_drops++; | |
2550 | i40iw_passive_open_err(cm_node, false); | |
2551 | break; | |
2552 | } | |
2553 | ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1); | |
2554 | if (ret) { | |
2555 | i40iw_passive_open_err(cm_node, false); | |
2556 | /* drop pkt */ | |
2557 | break; | |
2558 | } | |
2559 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1; | |
2560 | cm_node->accept_pend = 1; | |
2561 | atomic_inc(&cm_node->listener->pend_accepts_cnt); | |
2562 | ||
2563 | cm_node->state = I40IW_CM_STATE_SYN_RCVD; | |
2564 | i40iw_get_addr_info(cm_node, &nfo); | |
2565 | ret = i40iw_manage_qhash(cm_node->iwdev, | |
2566 | &nfo, | |
2567 | I40IW_QHASH_TYPE_TCP_ESTABLISHED, | |
2568 | I40IW_QHASH_MANAGE_TYPE_ADD, | |
2569 | (void *)cm_node, | |
2570 | false); | |
2571 | cm_node->qhash_set = true; | |
2572 | break; | |
2573 | case I40IW_CM_STATE_CLOSED: | |
2574 | i40iw_cleanup_retrans_entry(cm_node); | |
2575 | atomic_inc(&cm_node->ref_count); | |
2576 | i40iw_send_reset(cm_node); | |
2577 | break; | |
2578 | case I40IW_CM_STATE_OFFLOADED: | |
2579 | case I40IW_CM_STATE_ESTABLISHED: | |
2580 | case I40IW_CM_STATE_FIN_WAIT1: | |
2581 | case I40IW_CM_STATE_FIN_WAIT2: | |
2582 | case I40IW_CM_STATE_MPAREQ_RCVD: | |
2583 | case I40IW_CM_STATE_LAST_ACK: | |
2584 | case I40IW_CM_STATE_CLOSING: | |
2585 | case I40IW_CM_STATE_UNKNOWN: | |
2586 | default: | |
2587 | break; | |
2588 | } | |
2589 | } | |
2590 | ||
2591 | /** | |
2592 | * i40iw_handle_synack_pkt - Process SYN+ACK packet (active side) | |
2593 | * @cm_node: connection's node | |
2594 | * @rbuf: receive buffer | |
2595 | */ | |
2596 | static void i40iw_handle_synack_pkt(struct i40iw_cm_node *cm_node, | |
2597 | struct i40iw_puda_buf *rbuf) | |
2598 | { | |
2599 | struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; | |
2600 | int ret; | |
2601 | u32 inc_sequence; | |
2602 | int optionsize; | |
2603 | ||
2604 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); | |
2605 | inc_sequence = ntohl(tcph->seq); | |
2606 | switch (cm_node->state) { | |
2607 | case I40IW_CM_STATE_SYN_SENT: | |
2608 | i40iw_cleanup_retrans_entry(cm_node); | |
2609 | /* active open */ | |
2610 | if (i40iw_check_syn(cm_node, tcph)) { | |
2611 | i40iw_pr_err("check syn fail\n"); | |
2612 | return; | |
2613 | } | |
2614 | cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); | |
2615 | /* setup options */ | |
2616 | ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 0); | |
2617 | if (ret) { | |
2618 | i40iw_debug(cm_node->dev, | |
2619 | I40IW_DEBUG_CM, | |
2620 | "cm_node=%p tcp_options failed\n", | |
2621 | cm_node); | |
2622 | break; | |
2623 | } | |
2624 | i40iw_cleanup_retrans_entry(cm_node); | |
2625 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1; | |
2626 | i40iw_send_ack(cm_node); /* ACK for the syn_ack */ | |
2627 | ret = i40iw_send_mpa_request(cm_node); | |
2628 | if (ret) { | |
2629 | i40iw_debug(cm_node->dev, | |
2630 | I40IW_DEBUG_CM, | |
2631 | "cm_node=%p i40iw_send_mpa_request failed\n", | |
2632 | cm_node); | |
2633 | break; | |
2634 | } | |
2635 | cm_node->state = I40IW_CM_STATE_MPAREQ_SENT; | |
2636 | break; | |
2637 | case I40IW_CM_STATE_MPAREQ_RCVD: | |
2638 | i40iw_passive_open_err(cm_node, true); | |
2639 | break; | |
2640 | case I40IW_CM_STATE_LISTENING: | |
2641 | cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); | |
2642 | i40iw_cleanup_retrans_entry(cm_node); | |
2643 | cm_node->state = I40IW_CM_STATE_CLOSED; | |
2644 | i40iw_send_reset(cm_node); | |
2645 | break; | |
2646 | case I40IW_CM_STATE_CLOSED: | |
2647 | cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); | |
2648 | i40iw_cleanup_retrans_entry(cm_node); | |
2649 | atomic_inc(&cm_node->ref_count); | |
2650 | i40iw_send_reset(cm_node); | |
2651 | break; | |
2652 | case I40IW_CM_STATE_ESTABLISHED: | |
2653 | case I40IW_CM_STATE_FIN_WAIT1: | |
2654 | case I40IW_CM_STATE_FIN_WAIT2: | |
2655 | case I40IW_CM_STATE_LAST_ACK: | |
2656 | case I40IW_CM_STATE_OFFLOADED: | |
2657 | case I40IW_CM_STATE_CLOSING: | |
2658 | case I40IW_CM_STATE_UNKNOWN: | |
2659 | case I40IW_CM_STATE_MPAREQ_SENT: | |
2660 | default: | |
2661 | break; | |
2662 | } | |
2663 | } | |
2664 | ||
2665 | /** | |
2666 | * i40iw_handle_ack_pkt - process packet with ACK | |
2667 | * @cm_node: connection's node | |
2668 | * @rbuf: receive buffer | |
2669 | */ | |
2670 | static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node, | |
2671 | struct i40iw_puda_buf *rbuf) | |
2672 | { | |
2673 | struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; | |
2674 | u32 inc_sequence; | |
2675 | int ret = 0; | |
2676 | int optionsize; | |
2677 | u32 datasize = rbuf->datalen; | |
2678 | ||
2679 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); | |
2680 | ||
2681 | if (i40iw_check_seq(cm_node, tcph)) | |
2682 | return -EINVAL; | |
2683 | ||
2684 | inc_sequence = ntohl(tcph->seq); | |
2685 | switch (cm_node->state) { | |
2686 | case I40IW_CM_STATE_SYN_RCVD: | |
2687 | i40iw_cleanup_retrans_entry(cm_node); | |
2688 | ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1); | |
2689 | if (ret) | |
2690 | break; | |
2691 | cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); | |
2692 | cm_node->state = I40IW_CM_STATE_ESTABLISHED; | |
2693 | if (datasize) { | |
2694 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; | |
f27b4746 FL |
2695 | i40iw_handle_rcv_mpa(cm_node, rbuf); |
2696 | } | |
2697 | break; | |
2698 | case I40IW_CM_STATE_ESTABLISHED: | |
2699 | i40iw_cleanup_retrans_entry(cm_node); | |
2700 | if (datasize) { | |
2701 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; | |
2702 | i40iw_handle_rcv_mpa(cm_node, rbuf); | |
2703 | } | |
2704 | break; | |
2705 | case I40IW_CM_STATE_MPAREQ_SENT: | |
2706 | cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); | |
2707 | if (datasize) { | |
2708 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; | |
2709 | i40iw_handle_rcv_mpa(cm_node, rbuf); | |
2710 | } | |
2711 | break; | |
2712 | case I40IW_CM_STATE_LISTENING: | |
2713 | i40iw_cleanup_retrans_entry(cm_node); | |
2714 | cm_node->state = I40IW_CM_STATE_CLOSED; | |
2715 | i40iw_send_reset(cm_node); | |
2716 | break; | |
2717 | case I40IW_CM_STATE_CLOSED: | |
2718 | i40iw_cleanup_retrans_entry(cm_node); | |
2719 | atomic_inc(&cm_node->ref_count); | |
2720 | i40iw_send_reset(cm_node); | |
2721 | break; | |
2722 | case I40IW_CM_STATE_LAST_ACK: | |
2723 | case I40IW_CM_STATE_CLOSING: | |
2724 | i40iw_cleanup_retrans_entry(cm_node); | |
2725 | cm_node->state = I40IW_CM_STATE_CLOSED; | |
2726 | if (!cm_node->accept_pend) | |
2727 | cm_node->cm_id->rem_ref(cm_node->cm_id); | |
2728 | i40iw_rem_ref_cm_node(cm_node); | |
2729 | break; | |
2730 | case I40IW_CM_STATE_FIN_WAIT1: | |
2731 | i40iw_cleanup_retrans_entry(cm_node); | |
2732 | cm_node->state = I40IW_CM_STATE_FIN_WAIT2; | |
2733 | break; | |
2734 | case I40IW_CM_STATE_SYN_SENT: | |
2735 | case I40IW_CM_STATE_FIN_WAIT2: | |
2736 | case I40IW_CM_STATE_OFFLOADED: | |
2737 | case I40IW_CM_STATE_MPAREQ_RCVD: | |
2738 | case I40IW_CM_STATE_UNKNOWN: | |
2739 | default: | |
2740 | i40iw_cleanup_retrans_entry(cm_node); | |
2741 | break; | |
2742 | } | |
2743 | return ret; | |
2744 | } | |
2745 | ||
2746 | /** | |
2747 | * i40iw_process_packet - process cm packet | |
2748 | * @cm_node: connection's node | |
2749 | * @rbuf: receive buffer | |
2750 | */ | |
2751 | static void i40iw_process_packet(struct i40iw_cm_node *cm_node, | |
2752 | struct i40iw_puda_buf *rbuf) | |
2753 | { | |
2754 | enum i40iw_tcpip_pkt_type pkt_type = I40IW_PKT_TYPE_UNKNOWN; | |
2755 | struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; | |
2756 | u32 fin_set = 0; | |
2757 | int ret; | |
2758 | ||
2759 | if (tcph->rst) { | |
2760 | pkt_type = I40IW_PKT_TYPE_RST; | |
2761 | } else if (tcph->syn) { | |
2762 | pkt_type = I40IW_PKT_TYPE_SYN; | |
2763 | if (tcph->ack) | |
2764 | pkt_type = I40IW_PKT_TYPE_SYNACK; | |
2765 | } else if (tcph->ack) { | |
2766 | pkt_type = I40IW_PKT_TYPE_ACK; | |
2767 | } | |
2768 | if (tcph->fin) | |
2769 | fin_set = 1; | |
2770 | ||
2771 | switch (pkt_type) { | |
2772 | case I40IW_PKT_TYPE_SYN: | |
2773 | i40iw_handle_syn_pkt(cm_node, rbuf); | |
2774 | break; | |
2775 | case I40IW_PKT_TYPE_SYNACK: | |
2776 | i40iw_handle_synack_pkt(cm_node, rbuf); | |
2777 | break; | |
2778 | case I40IW_PKT_TYPE_ACK: | |
2779 | ret = i40iw_handle_ack_pkt(cm_node, rbuf); | |
2780 | if (fin_set && !ret) | |
2781 | i40iw_handle_fin_pkt(cm_node); | |
2782 | break; | |
2783 | case I40IW_PKT_TYPE_RST: | |
2784 | i40iw_handle_rst_pkt(cm_node, rbuf); | |
2785 | break; | |
2786 | default: | |
2787 | if (fin_set && | |
2788 | (!i40iw_check_seq(cm_node, (struct tcphdr *)rbuf->tcph))) | |
2789 | i40iw_handle_fin_pkt(cm_node); | |
2790 | break; | |
2791 | } | |
2792 | } | |
2793 | ||
2794 | /** | |
2795 | * i40iw_make_listen_node - create a listen node with params | |
2796 | * @cm_core: cm's core | |
2797 | * @iwdev: iwarp device structure | |
2798 | * @cm_info: quad info for connection | |
2799 | */ | |
2800 | static struct i40iw_cm_listener *i40iw_make_listen_node( | |
2801 | struct i40iw_cm_core *cm_core, | |
2802 | struct i40iw_device *iwdev, | |
2803 | struct i40iw_cm_info *cm_info) | |
2804 | { | |
2805 | struct i40iw_cm_listener *listener; | |
f27b4746 FL |
2806 | unsigned long flags; |
2807 | ||
2808 | /* cannot have multiple matching listeners */ | |
2809 | listener = i40iw_find_listener(cm_core, cm_info->loc_addr, | |
2810 | cm_info->loc_port, | |
2811 | cm_info->vlan_id, | |
8d8cd0bf | 2812 | I40IW_CM_LISTENER_EITHER_STATE); |
f27b4746 FL |
2813 | if (listener && |
2814 | (listener->listener_state == I40IW_CM_LISTENER_ACTIVE_STATE)) { | |
2815 | atomic_dec(&listener->ref_count); | |
2816 | i40iw_debug(cm_core->dev, | |
2817 | I40IW_DEBUG_CM, | |
2818 | "Not creating listener since it already exists\n"); | |
2819 | return NULL; | |
2820 | } | |
2821 | ||
2822 | if (!listener) { | |
f27b4746 FL |
2823 | /* create a CM listen node (1/2 node to compare incoming traffic to) */ |
2824 | listener = kzalloc(sizeof(*listener), GFP_ATOMIC); | |
2825 | if (!listener) | |
2826 | return NULL; | |
2827 | cm_core->stats_listen_nodes_created++; | |
2828 | memcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr)); | |
f27b4746 | 2829 | listener->loc_port = cm_info->loc_port; |
f27b4746 FL |
2830 | |
2831 | INIT_LIST_HEAD(&listener->child_listen_list); | |
2832 | ||
2833 | atomic_set(&listener->ref_count, 1); | |
2834 | } else { | |
2835 | listener->reused_node = 1; | |
2836 | } | |
2837 | ||
2838 | listener->cm_id = cm_info->cm_id; | |
2839 | listener->ipv4 = cm_info->ipv4; | |
2840 | listener->vlan_id = cm_info->vlan_id; | |
2841 | atomic_set(&listener->pend_accepts_cnt, 0); | |
2842 | listener->cm_core = cm_core; | |
2843 | listener->iwdev = iwdev; | |
2844 | ||
2845 | listener->backlog = cm_info->backlog; | |
2846 | listener->listener_state = I40IW_CM_LISTENER_ACTIVE_STATE; | |
2847 | ||
2848 | if (!listener->reused_node) { | |
2849 | spin_lock_irqsave(&cm_core->listen_list_lock, flags); | |
2850 | list_add(&listener->list, &cm_core->listen_nodes); | |
2851 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | |
2852 | } | |
2853 | ||
2854 | return listener; | |
2855 | } | |
2856 | ||
2857 | /** | |
2858 | * i40iw_create_cm_node - make a connection node with params | |
2859 | * @cm_core: cm's core | |
2860 | * @iwdev: iwarp device structure | |
2861 | * @private_data_len: len to provate data for mpa request | |
2862 | * @private_data: pointer to private data for connection | |
2863 | * @cm_info: quad info for connection | |
2864 | */ | |
2865 | static struct i40iw_cm_node *i40iw_create_cm_node( | |
2866 | struct i40iw_cm_core *cm_core, | |
2867 | struct i40iw_device *iwdev, | |
2868 | u16 private_data_len, | |
2869 | void *private_data, | |
2870 | struct i40iw_cm_info *cm_info) | |
2871 | { | |
f27b4746 FL |
2872 | struct i40iw_cm_node *cm_node; |
2873 | struct i40iw_cm_listener *loopback_remotelistener; | |
2874 | struct i40iw_cm_node *loopback_remotenode; | |
2875 | struct i40iw_cm_info loopback_cm_info; | |
2876 | ||
2877 | /* create a CM connection node */ | |
2878 | cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL); | |
2879 | if (!cm_node) | |
2880 | return NULL; | |
2881 | /* set our node side to client (active) side */ | |
2882 | cm_node->tcp_cntxt.client = 1; | |
2883 | cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE; | |
2884 | ||
2885 | if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) { | |
2886 | loopback_remotelistener = i40iw_find_listener( | |
2887 | cm_core, | |
8d8cd0bf FL |
2888 | cm_info->rem_addr, |
2889 | cm_node->rem_port, | |
f27b4746 | 2890 | cm_node->vlan_id, |
8d8cd0bf | 2891 | I40IW_CM_LISTENER_ACTIVE_STATE); |
f27b4746 FL |
2892 | if (!loopback_remotelistener) { |
2893 | i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED); | |
2894 | } else { | |
2895 | loopback_cm_info = *cm_info; | |
2896 | loopback_cm_info.loc_port = cm_info->rem_port; | |
2897 | loopback_cm_info.rem_port = cm_info->loc_port; | |
f27b4746 FL |
2898 | loopback_cm_info.cm_id = loopback_remotelistener->cm_id; |
2899 | loopback_cm_info.ipv4 = cm_info->ipv4; | |
2900 | loopback_remotenode = i40iw_make_cm_node(cm_core, | |
2901 | iwdev, | |
2902 | &loopback_cm_info, | |
2903 | loopback_remotelistener); | |
2904 | if (!loopback_remotenode) { | |
2905 | i40iw_rem_ref_cm_node(cm_node); | |
2906 | return NULL; | |
2907 | } | |
2908 | cm_core->stats_loopbacks++; | |
2909 | loopback_remotenode->loopbackpartner = cm_node; | |
2910 | loopback_remotenode->tcp_cntxt.rcv_wscale = | |
2911 | I40IW_CM_DEFAULT_RCV_WND_SCALE; | |
2912 | cm_node->loopbackpartner = loopback_remotenode; | |
2913 | memcpy(loopback_remotenode->pdata_buf, private_data, | |
2914 | private_data_len); | |
2915 | loopback_remotenode->pdata.size = private_data_len; | |
2916 | ||
2917 | cm_node->state = I40IW_CM_STATE_OFFLOADED; | |
2918 | cm_node->tcp_cntxt.rcv_nxt = | |
2919 | loopback_remotenode->tcp_cntxt.loc_seq_num; | |
2920 | loopback_remotenode->tcp_cntxt.rcv_nxt = | |
2921 | cm_node->tcp_cntxt.loc_seq_num; | |
2922 | cm_node->tcp_cntxt.max_snd_wnd = | |
2923 | loopback_remotenode->tcp_cntxt.rcv_wnd; | |
2924 | loopback_remotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd; | |
2925 | cm_node->tcp_cntxt.snd_wnd = loopback_remotenode->tcp_cntxt.rcv_wnd; | |
2926 | loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd; | |
2927 | cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale; | |
2928 | loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale; | |
2929 | loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD; | |
2930 | i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ); | |
2931 | } | |
2932 | return cm_node; | |
2933 | } | |
2934 | ||
2935 | cm_node->pdata.size = private_data_len; | |
2936 | cm_node->pdata.addr = cm_node->pdata_buf; | |
2937 | ||
2938 | memcpy(cm_node->pdata_buf, private_data, private_data_len); | |
2939 | ||
2940 | cm_node->state = I40IW_CM_STATE_SYN_SENT; | |
f27b4746 FL |
2941 | return cm_node; |
2942 | } | |
2943 | ||
2944 | /** | |
2945 | * i40iw_cm_reject - reject and teardown a connection | |
2946 | * @cm_node: connection's node | |
2947 | * @pdate: ptr to private data for reject | |
2948 | * @plen: size of private data | |
2949 | */ | |
2950 | static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen) | |
2951 | { | |
2952 | int ret = 0; | |
2953 | int err; | |
2954 | int passive_state; | |
2955 | struct iw_cm_id *cm_id = cm_node->cm_id; | |
2956 | struct i40iw_cm_node *loopback = cm_node->loopbackpartner; | |
2957 | ||
2958 | if (cm_node->tcp_cntxt.client) | |
2959 | return ret; | |
2960 | i40iw_cleanup_retrans_entry(cm_node); | |
2961 | ||
2962 | if (!loopback) { | |
2963 | passive_state = atomic_add_return(1, &cm_node->passive_state); | |
2964 | if (passive_state == I40IW_SEND_RESET_EVENT) { | |
2965 | cm_node->state = I40IW_CM_STATE_CLOSED; | |
2966 | i40iw_rem_ref_cm_node(cm_node); | |
2967 | } else { | |
2968 | if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) { | |
2969 | i40iw_rem_ref_cm_node(cm_node); | |
2970 | } else { | |
2971 | ret = i40iw_send_mpa_reject(cm_node, pdata, plen); | |
2972 | if (ret) { | |
2973 | cm_node->state = I40IW_CM_STATE_CLOSED; | |
2974 | err = i40iw_send_reset(cm_node); | |
2975 | if (err) | |
2976 | i40iw_pr_err("send reset failed\n"); | |
2977 | } else { | |
2978 | cm_id->add_ref(cm_id); | |
2979 | } | |
2980 | } | |
2981 | } | |
2982 | } else { | |
2983 | cm_node->cm_id = NULL; | |
2984 | if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) { | |
2985 | i40iw_rem_ref_cm_node(cm_node); | |
2986 | i40iw_rem_ref_cm_node(loopback); | |
2987 | } else { | |
2988 | ret = i40iw_send_cm_event(loopback, | |
2989 | loopback->cm_id, | |
2990 | IW_CM_EVENT_CONNECT_REPLY, | |
2991 | -ECONNREFUSED); | |
2992 | i40iw_rem_ref_cm_node(cm_node); | |
2993 | loopback->state = I40IW_CM_STATE_CLOSING; | |
2994 | ||
2995 | cm_id = loopback->cm_id; | |
2996 | i40iw_rem_ref_cm_node(loopback); | |
2997 | cm_id->rem_ref(cm_id); | |
2998 | } | |
2999 | } | |
3000 | ||
3001 | return ret; | |
3002 | } | |
3003 | ||
3004 | /** | |
3005 | * i40iw_cm_close - close of cm connection | |
3006 | * @cm_node: connection's node | |
3007 | */ | |
3008 | static int i40iw_cm_close(struct i40iw_cm_node *cm_node) | |
3009 | { | |
3010 | int ret = 0; | |
3011 | ||
3012 | if (!cm_node) | |
3013 | return -EINVAL; | |
3014 | ||
3015 | switch (cm_node->state) { | |
3016 | case I40IW_CM_STATE_SYN_RCVD: | |
3017 | case I40IW_CM_STATE_SYN_SENT: | |
3018 | case I40IW_CM_STATE_ONE_SIDE_ESTABLISHED: | |
3019 | case I40IW_CM_STATE_ESTABLISHED: | |
3020 | case I40IW_CM_STATE_ACCEPTING: | |
3021 | case I40IW_CM_STATE_MPAREQ_SENT: | |
3022 | case I40IW_CM_STATE_MPAREQ_RCVD: | |
3023 | i40iw_cleanup_retrans_entry(cm_node); | |
3024 | i40iw_send_reset(cm_node); | |
3025 | break; | |
3026 | case I40IW_CM_STATE_CLOSE_WAIT: | |
3027 | cm_node->state = I40IW_CM_STATE_LAST_ACK; | |
3028 | i40iw_send_fin(cm_node); | |
3029 | break; | |
3030 | case I40IW_CM_STATE_FIN_WAIT1: | |
3031 | case I40IW_CM_STATE_FIN_WAIT2: | |
3032 | case I40IW_CM_STATE_LAST_ACK: | |
3033 | case I40IW_CM_STATE_TIME_WAIT: | |
3034 | case I40IW_CM_STATE_CLOSING: | |
3035 | ret = -1; | |
3036 | break; | |
3037 | case I40IW_CM_STATE_LISTENING: | |
3038 | i40iw_cleanup_retrans_entry(cm_node); | |
3039 | i40iw_send_reset(cm_node); | |
3040 | break; | |
3041 | case I40IW_CM_STATE_MPAREJ_RCVD: | |
3042 | case I40IW_CM_STATE_UNKNOWN: | |
3043 | case I40IW_CM_STATE_INITED: | |
3044 | case I40IW_CM_STATE_CLOSED: | |
3045 | case I40IW_CM_STATE_LISTENER_DESTROYED: | |
3046 | i40iw_rem_ref_cm_node(cm_node); | |
3047 | break; | |
3048 | case I40IW_CM_STATE_OFFLOADED: | |
3049 | if (cm_node->send_entry) | |
3050 | i40iw_pr_err("send_entry\n"); | |
3051 | i40iw_rem_ref_cm_node(cm_node); | |
3052 | break; | |
3053 | } | |
3054 | return ret; | |
3055 | } | |
3056 | ||
3057 | /** | |
3058 | * i40iw_receive_ilq - recv an ETHERNET packet, and process it | |
3059 | * through CM | |
3060 | * @dev: FPK dev struct | |
3061 | * @rbuf: receive buffer | |
3062 | */ | |
3063 | void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf) | |
3064 | { | |
3065 | struct i40iw_cm_node *cm_node; | |
3066 | struct i40iw_cm_listener *listener; | |
3067 | struct iphdr *iph; | |
3068 | struct ipv6hdr *ip6h; | |
3069 | struct tcphdr *tcph; | |
3070 | struct i40iw_cm_info cm_info; | |
3071 | struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; | |
3072 | struct i40iw_cm_core *cm_core = &iwdev->cm_core; | |
3073 | struct vlan_ethhdr *ethh; | |
0fc2dc58 | 3074 | u16 vtag; |
f27b4746 FL |
3075 | |
3076 | /* if vlan, then maclen = 18 else 14 */ | |
3077 | iph = (struct iphdr *)rbuf->iph; | |
3078 | memset(&cm_info, 0, sizeof(cm_info)); | |
3079 | ||
3080 | i40iw_debug_buf(dev, | |
3081 | I40IW_DEBUG_ILQ, | |
3082 | "RECEIVE ILQ BUFFER", | |
3083 | rbuf->mem.va, | |
3084 | rbuf->totallen); | |
3085 | ethh = (struct vlan_ethhdr *)rbuf->mem.va; | |
3086 | ||
3087 | if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) { | |
0fc2dc58 HO |
3088 | vtag = ntohs(ethh->h_vlan_TCI); |
3089 | cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; | |
3090 | cm_info.vlan_id = vtag & VLAN_VID_MASK; | |
f27b4746 FL |
3091 | i40iw_debug(cm_core->dev, |
3092 | I40IW_DEBUG_CM, | |
3093 | "%s vlan_id=%d\n", | |
3094 | __func__, | |
3095 | cm_info.vlan_id); | |
3096 | } else { | |
3097 | cm_info.vlan_id = I40IW_NO_VLAN; | |
3098 | } | |
3099 | tcph = (struct tcphdr *)rbuf->tcph; | |
3100 | ||
3101 | if (rbuf->ipv4) { | |
3102 | cm_info.loc_addr[0] = ntohl(iph->daddr); | |
3103 | cm_info.rem_addr[0] = ntohl(iph->saddr); | |
3104 | cm_info.ipv4 = true; | |
7eb2bde7 | 3105 | cm_info.tos = iph->tos; |
f27b4746 FL |
3106 | } else { |
3107 | ip6h = (struct ipv6hdr *)rbuf->iph; | |
3108 | i40iw_copy_ip_ntohl(cm_info.loc_addr, | |
3109 | ip6h->daddr.in6_u.u6_addr32); | |
3110 | i40iw_copy_ip_ntohl(cm_info.rem_addr, | |
3111 | ip6h->saddr.in6_u.u6_addr32); | |
3112 | cm_info.ipv4 = false; | |
7eb2bde7 | 3113 | cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4); |
f27b4746 FL |
3114 | } |
3115 | cm_info.loc_port = ntohs(tcph->dest); | |
3116 | cm_info.rem_port = ntohs(tcph->source); | |
f27b4746 | 3117 | cm_node = i40iw_find_node(cm_core, |
8d8cd0bf FL |
3118 | cm_info.rem_port, |
3119 | cm_info.rem_addr, | |
3120 | cm_info.loc_port, | |
3121 | cm_info.loc_addr, | |
f27b4746 FL |
3122 | true); |
3123 | ||
3124 | if (!cm_node) { | |
3125 | /* Only type of packet accepted are for */ | |
3126 | /* the PASSIVE open (syn only) */ | |
3127 | if (!tcph->syn || tcph->ack) | |
3128 | return; | |
3129 | listener = | |
3130 | i40iw_find_listener(cm_core, | |
8d8cd0bf FL |
3131 | cm_info.loc_addr, |
3132 | cm_info.loc_port, | |
f27b4746 | 3133 | cm_info.vlan_id, |
8d8cd0bf | 3134 | I40IW_CM_LISTENER_ACTIVE_STATE); |
f27b4746 FL |
3135 | if (!listener) { |
3136 | cm_info.cm_id = NULL; | |
3137 | i40iw_debug(cm_core->dev, | |
3138 | I40IW_DEBUG_CM, | |
3139 | "%s no listener found\n", | |
3140 | __func__); | |
3141 | return; | |
3142 | } | |
3143 | cm_info.cm_id = listener->cm_id; | |
3144 | cm_node = i40iw_make_cm_node(cm_core, iwdev, &cm_info, listener); | |
3145 | if (!cm_node) { | |
3146 | i40iw_debug(cm_core->dev, | |
3147 | I40IW_DEBUG_CM, | |
3148 | "%s allocate node failed\n", | |
3149 | __func__); | |
3150 | atomic_dec(&listener->ref_count); | |
3151 | return; | |
3152 | } | |
3153 | if (!tcph->rst && !tcph->fin) { | |
3154 | cm_node->state = I40IW_CM_STATE_LISTENING; | |
3155 | } else { | |
3156 | i40iw_rem_ref_cm_node(cm_node); | |
3157 | return; | |
3158 | } | |
3159 | atomic_inc(&cm_node->ref_count); | |
3160 | } else if (cm_node->state == I40IW_CM_STATE_OFFLOADED) { | |
3161 | i40iw_rem_ref_cm_node(cm_node); | |
3162 | return; | |
3163 | } | |
3164 | i40iw_process_packet(cm_node, rbuf); | |
3165 | i40iw_rem_ref_cm_node(cm_node); | |
3166 | } | |
3167 | ||
3168 | /** | |
3169 | * i40iw_setup_cm_core - allocate a top level instance of a cm | |
3170 | * core | |
3171 | * @iwdev: iwarp device structure | |
3172 | */ | |
3173 | void i40iw_setup_cm_core(struct i40iw_device *iwdev) | |
3174 | { | |
3175 | struct i40iw_cm_core *cm_core = &iwdev->cm_core; | |
3176 | ||
3177 | cm_core->iwdev = iwdev; | |
3178 | cm_core->dev = &iwdev->sc_dev; | |
3179 | ||
3180 | INIT_LIST_HEAD(&cm_core->connected_nodes); | |
3181 | INIT_LIST_HEAD(&cm_core->listen_nodes); | |
3182 | ||
3183 | init_timer(&cm_core->tcp_timer); | |
3184 | cm_core->tcp_timer.function = i40iw_cm_timer_tick; | |
3185 | cm_core->tcp_timer.data = (unsigned long)cm_core; | |
3186 | ||
3187 | spin_lock_init(&cm_core->ht_lock); | |
3188 | spin_lock_init(&cm_core->listen_list_lock); | |
3189 | ||
5e9ff9b0 BS |
3190 | cm_core->event_wq = alloc_ordered_workqueue("iwewq", |
3191 | WQ_MEM_RECLAIM); | |
3192 | ||
3193 | cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq", | |
3194 | WQ_MEM_RECLAIM); | |
f27b4746 FL |
3195 | } |
3196 | ||
3197 | /** | |
3198 | * i40iw_cleanup_cm_core - deallocate a top level instance of a | |
3199 | * cm core | |
3200 | * @cm_core: cm's core | |
3201 | */ | |
3202 | void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core) | |
3203 | { | |
3204 | unsigned long flags; | |
3205 | ||
3206 | if (!cm_core) | |
3207 | return; | |
3208 | ||
3209 | spin_lock_irqsave(&cm_core->ht_lock, flags); | |
3210 | if (timer_pending(&cm_core->tcp_timer)) | |
3211 | del_timer_sync(&cm_core->tcp_timer); | |
3212 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | |
3213 | ||
3214 | destroy_workqueue(cm_core->event_wq); | |
3215 | destroy_workqueue(cm_core->disconn_wq); | |
3216 | } | |
3217 | ||
3218 | /** | |
3219 | * i40iw_init_tcp_ctx - setup qp context | |
3220 | * @cm_node: connection's node | |
3221 | * @tcp_info: offload info for tcp | |
3222 | * @iwqp: associate qp for the connection | |
3223 | */ | |
3224 | static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node, | |
3225 | struct i40iw_tcp_offload_info *tcp_info, | |
3226 | struct i40iw_qp *iwqp) | |
3227 | { | |
3228 | tcp_info->ipv4 = cm_node->ipv4; | |
3229 | tcp_info->drop_ooo_seg = true; | |
3230 | tcp_info->wscale = true; | |
3231 | tcp_info->ignore_tcp_opt = true; | |
3232 | tcp_info->ignore_tcp_uns_opt = true; | |
3233 | tcp_info->no_nagle = false; | |
3234 | ||
3235 | tcp_info->ttl = I40IW_DEFAULT_TTL; | |
3236 | tcp_info->rtt_var = cpu_to_le32(I40IW_DEFAULT_RTT_VAR); | |
3237 | tcp_info->ss_thresh = cpu_to_le32(I40IW_DEFAULT_SS_THRESH); | |
3238 | tcp_info->rexmit_thresh = I40IW_DEFAULT_REXMIT_THRESH; | |
3239 | ||
3240 | tcp_info->tcp_state = I40IW_TCP_STATE_ESTABLISHED; | |
3241 | tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale; | |
3242 | tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale; | |
3243 | ||
3244 | tcp_info->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); | |
3245 | tcp_info->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd); | |
3246 | tcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); | |
3247 | tcp_info->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); | |
3248 | ||
3249 | tcp_info->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); | |
3250 | tcp_info->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss); | |
3251 | tcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); | |
3252 | tcp_info->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); | |
3253 | tcp_info->max_snd_window = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd); | |
3254 | tcp_info->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd << | |
3255 | cm_node->tcp_cntxt.rcv_wscale); | |
3256 | ||
3257 | tcp_info->flow_label = 0; | |
3258 | tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss)); | |
3259 | if (cm_node->vlan_id < VLAN_TAG_PRESENT) { | |
3260 | tcp_info->insert_vlan_tag = true; | |
3261 | tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id); | |
3262 | } | |
3263 | if (cm_node->ipv4) { | |
8d8cd0bf FL |
3264 | tcp_info->src_port = cpu_to_le16(cm_node->loc_port); |
3265 | tcp_info->dst_port = cpu_to_le16(cm_node->rem_port); | |
f27b4746 | 3266 | |
8d8cd0bf FL |
3267 | tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]); |
3268 | tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]); | |
20c61f7e IM |
3269 | tcp_info->arp_idx = |
3270 | cpu_to_le16((u16)i40iw_arp_table( | |
3271 | iwqp->iwdev, | |
3272 | &tcp_info->dest_ip_addr3, | |
3273 | true, | |
3274 | NULL, | |
3275 | I40IW_ARP_RESOLVE)); | |
f27b4746 | 3276 | } else { |
8d8cd0bf FL |
3277 | tcp_info->src_port = cpu_to_le16(cm_node->loc_port); |
3278 | tcp_info->dst_port = cpu_to_le16(cm_node->rem_port); | |
3279 | tcp_info->dest_ip_addr0 = cpu_to_le32(cm_node->rem_addr[0]); | |
3280 | tcp_info->dest_ip_addr1 = cpu_to_le32(cm_node->rem_addr[1]); | |
3281 | tcp_info->dest_ip_addr2 = cpu_to_le32(cm_node->rem_addr[2]); | |
3282 | tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[3]); | |
3283 | tcp_info->local_ipaddr0 = cpu_to_le32(cm_node->loc_addr[0]); | |
3284 | tcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]); | |
3285 | tcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]); | |
3286 | tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]); | |
20c61f7e IM |
3287 | tcp_info->arp_idx = |
3288 | cpu_to_le16((u16)i40iw_arp_table( | |
3289 | iwqp->iwdev, | |
3290 | &tcp_info->dest_ip_addr0, | |
3291 | false, | |
3292 | NULL, | |
3293 | I40IW_ARP_RESOLVE)); | |
f27b4746 FL |
3294 | } |
3295 | } | |
3296 | ||
3297 | /** | |
3298 | * i40iw_cm_init_tsa_conn - setup qp for RTS | |
3299 | * @iwqp: associate qp for the connection | |
3300 | * @cm_node: connection's node | |
3301 | */ | |
3302 | static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp, | |
3303 | struct i40iw_cm_node *cm_node) | |
3304 | { | |
3305 | struct i40iw_tcp_offload_info tcp_info; | |
3306 | struct i40iwarp_offload_info *iwarp_info; | |
3307 | struct i40iw_qp_host_ctx_info *ctx_info; | |
3308 | struct i40iw_device *iwdev = iwqp->iwdev; | |
3309 | struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev; | |
3310 | ||
3311 | memset(&tcp_info, 0x00, sizeof(struct i40iw_tcp_offload_info)); | |
3312 | iwarp_info = &iwqp->iwarp_info; | |
3313 | ctx_info = &iwqp->ctx_info; | |
3314 | ||
3315 | ctx_info->tcp_info = &tcp_info; | |
3316 | ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; | |
3317 | ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; | |
3318 | ||
3319 | iwarp_info->ord_size = cm_node->ord_size; | |
3320 | iwarp_info->ird_size = i40iw_derive_hw_ird_setting(cm_node->ird_size); | |
3321 | ||
3322 | if (iwarp_info->ord_size == 1) | |
3323 | iwarp_info->ord_size = 2; | |
3324 | ||
3325 | iwarp_info->rd_enable = true; | |
3326 | iwarp_info->rdmap_ver = 1; | |
3327 | iwarp_info->ddp_ver = 1; | |
3328 | ||
3329 | iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id; | |
3330 | ||
3331 | ctx_info->tcp_info_valid = true; | |
3332 | ctx_info->iwarp_info_valid = true; | |
0fc2dc58 HO |
3333 | ctx_info->add_to_qoslist = true; |
3334 | ctx_info->user_pri = cm_node->user_pri; | |
f27b4746 FL |
3335 | |
3336 | i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp); | |
3337 | if (cm_node->snd_mark_en) { | |
3338 | iwarp_info->snd_mark_en = true; | |
3339 | iwarp_info->snd_mark_offset = (tcp_info.snd_nxt & | |
3340 | SNDMARKER_SEQNMASK) + cm_node->lsmm_size; | |
3341 | } | |
3342 | ||
3343 | cm_node->state = I40IW_CM_STATE_OFFLOADED; | |
3344 | tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED; | |
3345 | tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx; | |
7eb2bde7 | 3346 | tcp_info.tos = cm_node->tos; |
f27b4746 FL |
3347 | |
3348 | dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info); | |
3349 | ||
3350 | /* once tcp_info is set, no need to do it again */ | |
3351 | ctx_info->tcp_info_valid = false; | |
3352 | ctx_info->iwarp_info_valid = false; | |
0fc2dc58 | 3353 | ctx_info->add_to_qoslist = false; |
f27b4746 FL |
3354 | } |
3355 | ||
3356 | /** | |
3357 | * i40iw_cm_disconn - when a connection is being closed | |
3358 | * @iwqp: associate qp for the connection | |
3359 | */ | |
3360 | int i40iw_cm_disconn(struct i40iw_qp *iwqp) | |
3361 | { | |
3362 | struct disconn_work *work; | |
3363 | struct i40iw_device *iwdev = iwqp->iwdev; | |
3364 | struct i40iw_cm_core *cm_core = &iwdev->cm_core; | |
3365 | ||
3366 | work = kzalloc(sizeof(*work), GFP_ATOMIC); | |
3367 | if (!work) | |
3368 | return -ENOMEM; /* Timer will clean up */ | |
3369 | ||
3370 | i40iw_add_ref(&iwqp->ibqp); | |
3371 | work->iwqp = iwqp; | |
3372 | INIT_WORK(&work->work, i40iw_disconnect_worker); | |
3373 | queue_work(cm_core->disconn_wq, &work->work); | |
3374 | return 0; | |
3375 | } | |
3376 | ||
f27b4746 FL |
3377 | /** |
3378 | * i40iw_qp_disconnect - free qp and close cm | |
3379 | * @iwqp: associate qp for the connection | |
3380 | */ | |
3381 | static void i40iw_qp_disconnect(struct i40iw_qp *iwqp) | |
3382 | { | |
3383 | struct i40iw_device *iwdev; | |
3384 | struct i40iw_ib_device *iwibdev; | |
3385 | ||
3386 | iwdev = to_iwdev(iwqp->ibqp.device); | |
3387 | if (!iwdev) { | |
3388 | i40iw_pr_err("iwdev == NULL\n"); | |
3389 | return; | |
3390 | } | |
3391 | ||
3392 | iwibdev = iwdev->iwibdev; | |
3393 | ||
3394 | if (iwqp->active_conn) { | |
3395 | /* indicate this connection is NOT active */ | |
3396 | iwqp->active_conn = 0; | |
3397 | } else { | |
3398 | /* Need to free the Last Streaming Mode Message */ | |
3399 | if (iwqp->ietf_mem.va) { | |
3400 | if (iwqp->lsmm_mr) | |
3401 | iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr); | |
3402 | i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem); | |
3403 | } | |
3404 | } | |
3405 | ||
3406 | /* close the CM node down if it is still active */ | |
3407 | if (iwqp->cm_node) { | |
3408 | i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "%s Call close API\n", __func__); | |
3409 | i40iw_cm_close(iwqp->cm_node); | |
3410 | } | |
3411 | } | |
3412 | ||
3413 | /** | |
3414 | * i40iw_cm_disconn_true - called by worker thread to disconnect qp | |
3415 | * @iwqp: associate qp for the connection | |
3416 | */ | |
3417 | static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp) | |
3418 | { | |
3419 | struct iw_cm_id *cm_id; | |
3420 | struct i40iw_device *iwdev; | |
3421 | struct i40iw_sc_qp *qp = &iwqp->sc_qp; | |
3422 | u16 last_ae; | |
3423 | u8 original_hw_tcp_state; | |
3424 | u8 original_ibqp_state; | |
3425 | int disconn_status = 0; | |
3426 | int issue_disconn = 0; | |
3427 | int issue_close = 0; | |
3428 | int issue_flush = 0; | |
3429 | struct ib_event ibevent; | |
3430 | unsigned long flags; | |
3431 | int ret; | |
3432 | ||
3433 | if (!iwqp) { | |
3434 | i40iw_pr_err("iwqp == NULL\n"); | |
3435 | return; | |
3436 | } | |
3437 | ||
3438 | spin_lock_irqsave(&iwqp->lock, flags); | |
3439 | cm_id = iwqp->cm_id; | |
3440 | /* make sure we havent already closed this connection */ | |
3441 | if (!cm_id) { | |
3442 | spin_unlock_irqrestore(&iwqp->lock, flags); | |
3443 | return; | |
3444 | } | |
3445 | ||
3446 | iwdev = to_iwdev(iwqp->ibqp.device); | |
3447 | ||
3448 | original_hw_tcp_state = iwqp->hw_tcp_state; | |
3449 | original_ibqp_state = iwqp->ibqp_state; | |
3450 | last_ae = iwqp->last_aeq; | |
3451 | ||
3452 | if (qp->term_flags) { | |
3453 | issue_disconn = 1; | |
3454 | issue_close = 1; | |
3455 | iwqp->cm_id = NULL; | |
3456 | /*When term timer expires after cm_timer, don't want | |
3457 | *terminate-handler to issue cm_disconn which can re-free | |
3458 | *a QP even after its refcnt=0. | |
3459 | */ | |
3460 | del_timer(&iwqp->terminate_timer); | |
3461 | if (!iwqp->flush_issued) { | |
3462 | iwqp->flush_issued = 1; | |
3463 | issue_flush = 1; | |
3464 | } | |
3465 | } else if ((original_hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) || | |
3466 | ((original_ibqp_state == IB_QPS_RTS) && | |
3467 | (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) { | |
3468 | issue_disconn = 1; | |
3469 | if (last_ae == I40IW_AE_LLP_CONNECTION_RESET) | |
3470 | disconn_status = -ECONNRESET; | |
3471 | } | |
3472 | ||
3473 | if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) || | |
3474 | (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) || | |
3475 | (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) || | |
3476 | (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) { | |
3477 | issue_close = 1; | |
3478 | iwqp->cm_id = NULL; | |
3479 | if (!iwqp->flush_issued) { | |
3480 | iwqp->flush_issued = 1; | |
3481 | issue_flush = 1; | |
3482 | } | |
3483 | } | |
3484 | ||
3485 | spin_unlock_irqrestore(&iwqp->lock, flags); | |
3486 | if (issue_flush && !iwqp->destroyed) { | |
3487 | /* Flush the queues */ | |
3488 | i40iw_flush_wqes(iwdev, iwqp); | |
3489 | ||
e0b010da | 3490 | if (qp->term_flags && iwqp->ibqp.event_handler) { |
f27b4746 FL |
3491 | ibevent.device = iwqp->ibqp.device; |
3492 | ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ? | |
3493 | IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR; | |
3494 | ibevent.element.qp = &iwqp->ibqp; | |
3495 | iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); | |
3496 | } | |
3497 | } | |
3498 | ||
3499 | if (cm_id && cm_id->event_handler) { | |
3500 | if (issue_disconn) { | |
3501 | ret = i40iw_send_cm_event(NULL, | |
3502 | cm_id, | |
3503 | IW_CM_EVENT_DISCONNECT, | |
3504 | disconn_status); | |
3505 | ||
3506 | if (ret) | |
3507 | i40iw_debug(&iwdev->sc_dev, | |
3508 | I40IW_DEBUG_CM, | |
3509 | "disconnect event failed %s: - cm_id = %p\n", | |
3510 | __func__, cm_id); | |
3511 | } | |
3512 | if (issue_close) { | |
3513 | i40iw_qp_disconnect(iwqp); | |
3514 | cm_id->provider_data = iwqp; | |
3515 | ret = i40iw_send_cm_event(NULL, cm_id, IW_CM_EVENT_CLOSE, 0); | |
3516 | if (ret) | |
3517 | i40iw_debug(&iwdev->sc_dev, | |
3518 | I40IW_DEBUG_CM, | |
3519 | "close event failed %s: - cm_id = %p\n", | |
3520 | __func__, cm_id); | |
3521 | cm_id->rem_ref(cm_id); | |
3522 | } | |
3523 | } | |
3524 | } | |
3525 | ||
3526 | /** | |
3527 | * i40iw_disconnect_worker - worker for connection close | |
3528 | * @work: points or disconn structure | |
3529 | */ | |
3530 | static void i40iw_disconnect_worker(struct work_struct *work) | |
3531 | { | |
3532 | struct disconn_work *dwork = container_of(work, struct disconn_work, work); | |
3533 | struct i40iw_qp *iwqp = dwork->iwqp; | |
3534 | ||
3535 | kfree(dwork); | |
3536 | i40iw_cm_disconn_true(iwqp); | |
3537 | i40iw_rem_ref(&iwqp->ibqp); | |
3538 | } | |
3539 | ||
3540 | /** | |
3541 | * i40iw_accept - registered call for connection to be accepted | |
3542 | * @cm_id: cm information for passive connection | |
3543 | * @conn_param: accpet parameters | |
3544 | */ | |
3545 | int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |
3546 | { | |
3547 | struct ib_qp *ibqp; | |
3548 | struct i40iw_qp *iwqp; | |
3549 | struct i40iw_device *iwdev; | |
3550 | struct i40iw_sc_dev *dev; | |
3551 | struct i40iw_cm_node *cm_node; | |
3552 | struct ib_qp_attr attr; | |
3553 | int passive_state; | |
f27b4746 FL |
3554 | struct ib_mr *ibmr; |
3555 | struct i40iw_pd *iwpd; | |
3556 | u16 buf_len = 0; | |
3557 | struct i40iw_kmem_info accept; | |
3558 | enum i40iw_status_code status; | |
3559 | u64 tagged_offset; | |
3560 | ||
3561 | memset(&attr, 0, sizeof(attr)); | |
3562 | ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); | |
3563 | if (!ibqp) | |
3564 | return -EINVAL; | |
3565 | ||
3566 | iwqp = to_iwqp(ibqp); | |
3567 | iwdev = iwqp->iwdev; | |
3568 | dev = &iwdev->sc_dev; | |
3569 | cm_node = (struct i40iw_cm_node *)cm_id->provider_data; | |
3570 | ||
3571 | if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) { | |
3572 | cm_node->ipv4 = true; | |
3573 | cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr); | |
3574 | } else { | |
3575 | cm_node->ipv4 = false; | |
3576 | i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id, NULL); | |
3577 | } | |
3578 | i40iw_debug(cm_node->dev, | |
3579 | I40IW_DEBUG_CM, | |
3580 | "Accept vlan_id=%d\n", | |
3581 | cm_node->vlan_id); | |
3582 | if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) { | |
3583 | if (cm_node->loopbackpartner) | |
3584 | i40iw_rem_ref_cm_node(cm_node->loopbackpartner); | |
3585 | i40iw_rem_ref_cm_node(cm_node); | |
3586 | return -EINVAL; | |
3587 | } | |
3588 | ||
3589 | passive_state = atomic_add_return(1, &cm_node->passive_state); | |
3590 | if (passive_state == I40IW_SEND_RESET_EVENT) { | |
3591 | i40iw_rem_ref_cm_node(cm_node); | |
3592 | return -ECONNRESET; | |
3593 | } | |
3594 | ||
3595 | cm_node->cm_core->stats_accepts++; | |
3596 | iwqp->cm_node = (void *)cm_node; | |
3597 | cm_node->iwqp = iwqp; | |
3598 | ||
7581e96c | 3599 | buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE; |
f27b4746 FL |
3600 | |
3601 | status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1); | |
3602 | ||
3603 | if (status) | |
3604 | return -ENOMEM; | |
3605 | cm_node->pdata.size = conn_param->private_data_len; | |
3606 | accept.addr = iwqp->ietf_mem.va; | |
3607 | accept.size = i40iw_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY); | |
3608 | memcpy(accept.addr + accept.size, conn_param->private_data, | |
3609 | conn_param->private_data_len); | |
3610 | ||
3611 | /* setup our first outgoing iWarp send WQE (the IETF frame response) */ | |
3612 | if ((cm_node->ipv4 && | |
3613 | !i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) || | |
3614 | (!cm_node->ipv4 && | |
3615 | !i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) { | |
f27b4746 FL |
3616 | iwpd = iwqp->iwpd; |
3617 | tagged_offset = (uintptr_t)iwqp->ietf_mem.va; | |
3618 | ibmr = i40iw_reg_phys_mr(&iwpd->ibpd, | |
3619 | iwqp->ietf_mem.pa, | |
3620 | buf_len, | |
3621 | IB_ACCESS_LOCAL_WRITE, | |
3622 | &tagged_offset); | |
3623 | if (IS_ERR(ibmr)) { | |
3624 | i40iw_free_dma_mem(dev->hw, &iwqp->ietf_mem); | |
3625 | return -ENOMEM; | |
3626 | } | |
3627 | ||
3628 | ibmr->pd = &iwpd->ibpd; | |
3629 | ibmr->device = iwpd->ibpd.device; | |
3630 | iwqp->lsmm_mr = ibmr; | |
3631 | if (iwqp->page) | |
3632 | iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page); | |
7581e96c | 3633 | dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, |
f27b4746 FL |
3634 | iwqp->ietf_mem.va, |
3635 | (accept.size + conn_param->private_data_len), | |
3636 | ibmr->lkey); | |
f27b4746 FL |
3637 | |
3638 | } else { | |
3639 | if (iwqp->page) | |
3640 | iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page); | |
07c72d7d | 3641 | dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0); |
f27b4746 FL |
3642 | } |
3643 | ||
3644 | if (iwqp->page) | |
3645 | kunmap(iwqp->page); | |
3646 | ||
3647 | iwqp->cm_id = cm_id; | |
3648 | cm_node->cm_id = cm_id; | |
3649 | ||
3650 | cm_id->provider_data = (void *)iwqp; | |
3651 | iwqp->active_conn = 0; | |
3652 | ||
3653 | cm_node->lsmm_size = accept.size + conn_param->private_data_len; | |
3654 | i40iw_cm_init_tsa_conn(iwqp, cm_node); | |
3655 | cm_id->add_ref(cm_id); | |
3656 | i40iw_add_ref(&iwqp->ibqp); | |
3657 | ||
3658 | i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0); | |
3659 | ||
3660 | attr.qp_state = IB_QPS_RTS; | |
3661 | cm_node->qhash_set = false; | |
3662 | i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); | |
3663 | if (cm_node->loopbackpartner) { | |
3664 | cm_node->loopbackpartner->pdata.size = conn_param->private_data_len; | |
3665 | ||
3666 | /* copy entire MPA frame to our cm_node's frame */ | |
3667 | memcpy(cm_node->loopbackpartner->pdata_buf, | |
3668 | conn_param->private_data, | |
3669 | conn_param->private_data_len); | |
3670 | i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED); | |
3671 | } | |
3672 | ||
3673 | cm_node->accelerated = 1; | |
3674 | if (cm_node->accept_pend) { | |
3675 | if (!cm_node->listener) | |
3676 | i40iw_pr_err("cm_node->listener NULL for passive node\n"); | |
3677 | atomic_dec(&cm_node->listener->pend_accepts_cnt); | |
3678 | cm_node->accept_pend = 0; | |
3679 | } | |
3680 | return 0; | |
3681 | } | |
3682 | ||
3683 | /** | |
3684 | * i40iw_reject - registered call for connection to be rejected | |
3685 | * @cm_id: cm information for passive connection | |
3686 | * @pdata: private data to be sent | |
3687 | * @pdata_len: private data length | |
3688 | */ | |
3689 | int i40iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) | |
3690 | { | |
3691 | struct i40iw_device *iwdev; | |
3692 | struct i40iw_cm_node *cm_node; | |
3693 | struct i40iw_cm_node *loopback; | |
3694 | ||
3695 | cm_node = (struct i40iw_cm_node *)cm_id->provider_data; | |
3696 | loopback = cm_node->loopbackpartner; | |
3697 | cm_node->cm_id = cm_id; | |
3698 | cm_node->pdata.size = pdata_len; | |
3699 | ||
3700 | iwdev = to_iwdev(cm_id->device); | |
3701 | if (!iwdev) | |
3702 | return -EINVAL; | |
3703 | cm_node->cm_core->stats_rejects++; | |
3704 | ||
3705 | if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER) | |
3706 | return -EINVAL; | |
3707 | ||
3708 | if (loopback) { | |
3709 | memcpy(&loopback->pdata_buf, pdata, pdata_len); | |
3710 | loopback->pdata.size = pdata_len; | |
3711 | } | |
3712 | ||
3713 | return i40iw_cm_reject(cm_node, pdata, pdata_len); | |
3714 | } | |
3715 | ||
3716 | /** | |
3717 | * i40iw_connect - registered call for connection to be established | |
3718 | * @cm_id: cm information for passive connection | |
3719 | * @conn_param: Information about the connection | |
3720 | */ | |
3721 | int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |
3722 | { | |
3723 | struct ib_qp *ibqp; | |
3724 | struct i40iw_qp *iwqp; | |
3725 | struct i40iw_device *iwdev; | |
3726 | struct i40iw_cm_node *cm_node; | |
3727 | struct i40iw_cm_info cm_info; | |
3728 | struct sockaddr_in *laddr; | |
3729 | struct sockaddr_in *raddr; | |
3730 | struct sockaddr_in6 *laddr6; | |
3731 | struct sockaddr_in6 *raddr6; | |
ccea5f0f | 3732 | bool qhash_set = false; |
f27b4746 FL |
3733 | int apbvt_set = 0; |
3734 | enum i40iw_status_code status; | |
f27b4746 FL |
3735 | |
3736 | ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); | |
3737 | if (!ibqp) | |
3738 | return -EINVAL; | |
3739 | iwqp = to_iwqp(ibqp); | |
3740 | if (!iwqp) | |
3741 | return -EINVAL; | |
3742 | iwdev = to_iwdev(iwqp->ibqp.device); | |
3743 | if (!iwdev) | |
3744 | return -EINVAL; | |
3745 | ||
8d8cd0bf FL |
3746 | laddr = (struct sockaddr_in *)&cm_id->m_local_addr; |
3747 | raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; | |
3748 | laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; | |
3749 | raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; | |
f27b4746 FL |
3750 | |
3751 | if (!(laddr->sin_port) || !(raddr->sin_port)) | |
3752 | return -EINVAL; | |
3753 | ||
3754 | iwqp->active_conn = 1; | |
3755 | iwqp->cm_id = NULL; | |
3756 | cm_id->provider_data = iwqp; | |
3757 | ||
3758 | /* set up the connection params for the node */ | |
3759 | if (cm_id->remote_addr.ss_family == AF_INET) { | |
3760 | cm_info.ipv4 = true; | |
3761 | memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr)); | |
3762 | memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr)); | |
3763 | cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr); | |
3764 | cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr); | |
3765 | cm_info.loc_port = ntohs(laddr->sin_port); | |
3766 | cm_info.rem_port = ntohs(raddr->sin_port); | |
3767 | cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr); | |
3768 | } else { | |
3769 | cm_info.ipv4 = false; | |
3770 | i40iw_copy_ip_ntohl(cm_info.loc_addr, | |
3771 | laddr6->sin6_addr.in6_u.u6_addr32); | |
3772 | i40iw_copy_ip_ntohl(cm_info.rem_addr, | |
3773 | raddr6->sin6_addr.in6_u.u6_addr32); | |
3774 | cm_info.loc_port = ntohs(laddr6->sin6_port); | |
3775 | cm_info.rem_port = ntohs(raddr6->sin6_port); | |
3776 | i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL); | |
3777 | } | |
f27b4746 | 3778 | cm_info.cm_id = cm_id; |
7eb2bde7 | 3779 | cm_info.tos = cm_id->tos; |
0fc2dc58 HO |
3780 | cm_info.user_pri = rt_tos2priority(cm_id->tos); |
3781 | i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n", | |
3782 | __func__, cm_id->tos, cm_info.user_pri); | |
f27b4746 FL |
3783 | if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) || |
3784 | (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32, | |
3785 | raddr6->sin6_addr.in6_u.u6_addr32, | |
3786 | sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) { | |
3787 | status = i40iw_manage_qhash(iwdev, | |
3788 | &cm_info, | |
3789 | I40IW_QHASH_TYPE_TCP_ESTABLISHED, | |
3790 | I40IW_QHASH_MANAGE_TYPE_ADD, | |
3791 | NULL, | |
3792 | true); | |
8d8cd0bf | 3793 | if (status) |
f27b4746 | 3794 | return -EINVAL; |
ccea5f0f | 3795 | qhash_set = true; |
f27b4746 | 3796 | } |
8d8cd0bf | 3797 | status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD); |
f27b4746 | 3798 | if (status) { |
f27b4746 FL |
3799 | i40iw_manage_qhash(iwdev, |
3800 | &cm_info, | |
3801 | I40IW_QHASH_TYPE_TCP_ESTABLISHED, | |
3802 | I40IW_QHASH_MANAGE_TYPE_DELETE, | |
3803 | NULL, | |
3804 | false); | |
3805 | return -EINVAL; | |
3806 | } | |
3807 | ||
3808 | apbvt_set = 1; | |
3809 | cm_id->add_ref(cm_id); | |
3810 | cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev, | |
3811 | conn_param->private_data_len, | |
3812 | (void *)conn_param->private_data, | |
3813 | &cm_info); | |
b3437e0d IM |
3814 | if (!cm_node) |
3815 | goto err; | |
f27b4746 FL |
3816 | |
3817 | i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); | |
3818 | if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && | |
3819 | !cm_node->ord_size) | |
3820 | cm_node->ord_size = 1; | |
3821 | ||
3822 | cm_node->apbvt_set = apbvt_set; | |
ccea5f0f | 3823 | cm_node->qhash_set = qhash_set; |
f27b4746 FL |
3824 | iwqp->cm_node = cm_node; |
3825 | cm_node->iwqp = iwqp; | |
3826 | iwqp->cm_id = cm_id; | |
3827 | i40iw_add_ref(&iwqp->ibqp); | |
b3437e0d IM |
3828 | |
3829 | if (cm_node->state == I40IW_CM_STATE_SYN_SENT) { | |
3830 | if (i40iw_send_syn(cm_node, 0)) { | |
3831 | i40iw_rem_ref_cm_node(cm_node); | |
3832 | goto err; | |
3833 | } | |
3834 | } | |
3835 | ||
3836 | i40iw_debug(cm_node->dev, | |
3837 | I40IW_DEBUG_CM, | |
3838 | "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n", | |
3839 | cm_node->rem_port, | |
3840 | cm_node, | |
3841 | cm_node->cm_id); | |
f27b4746 | 3842 | return 0; |
b3437e0d IM |
3843 | |
3844 | err: | |
3845 | if (cm_node) { | |
3846 | if (cm_node->ipv4) | |
3847 | i40iw_debug(cm_node->dev, | |
3848 | I40IW_DEBUG_CM, | |
3849 | "Api - connect() FAILED: dest addr=%pI4", | |
3850 | cm_node->rem_addr); | |
3851 | else | |
3852 | i40iw_debug(cm_node->dev, I40IW_DEBUG_CM, | |
3853 | "Api - connect() FAILED: dest addr=%pI6", | |
3854 | cm_node->rem_addr); | |
3855 | } | |
3856 | i40iw_manage_qhash(iwdev, | |
3857 | &cm_info, | |
3858 | I40IW_QHASH_TYPE_TCP_ESTABLISHED, | |
3859 | I40IW_QHASH_MANAGE_TYPE_DELETE, | |
3860 | NULL, | |
3861 | false); | |
3862 | ||
3863 | if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core, | |
3864 | cm_info.loc_port)) | |
3865 | i40iw_manage_apbvt(iwdev, | |
3866 | cm_info.loc_port, | |
3867 | I40IW_MANAGE_APBVT_DEL); | |
3868 | cm_id->rem_ref(cm_id); | |
3869 | iwdev->cm_core.stats_connect_errs++; | |
3870 | return -ENOMEM; | |
f27b4746 FL |
3871 | } |
3872 | ||
3873 | /** | |
3874 | * i40iw_create_listen - registered call creating listener | |
3875 | * @cm_id: cm information for passive connection | |
3876 | * @backlog: to max accept pending count | |
3877 | */ | |
3878 | int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |
3879 | { | |
3880 | struct i40iw_device *iwdev; | |
3881 | struct i40iw_cm_listener *cm_listen_node; | |
3882 | struct i40iw_cm_info cm_info; | |
3883 | enum i40iw_status_code ret; | |
3884 | struct sockaddr_in *laddr; | |
3885 | struct sockaddr_in6 *laddr6; | |
3886 | bool wildcard = false; | |
3887 | ||
3888 | iwdev = to_iwdev(cm_id->device); | |
3889 | if (!iwdev) | |
3890 | return -EINVAL; | |
3891 | ||
8d8cd0bf FL |
3892 | laddr = (struct sockaddr_in *)&cm_id->m_local_addr; |
3893 | laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; | |
f27b4746 FL |
3894 | memset(&cm_info, 0, sizeof(cm_info)); |
3895 | if (laddr->sin_family == AF_INET) { | |
3896 | cm_info.ipv4 = true; | |
3897 | cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr); | |
3898 | cm_info.loc_port = ntohs(laddr->sin_port); | |
3899 | ||
3900 | if (laddr->sin_addr.s_addr != INADDR_ANY) | |
3901 | cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr); | |
3902 | else | |
3903 | wildcard = true; | |
3904 | ||
3905 | } else { | |
3906 | cm_info.ipv4 = false; | |
3907 | i40iw_copy_ip_ntohl(cm_info.loc_addr, | |
3908 | laddr6->sin6_addr.in6_u.u6_addr32); | |
3909 | cm_info.loc_port = ntohs(laddr6->sin6_port); | |
3910 | if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY) | |
3911 | i40iw_netdev_vlan_ipv6(cm_info.loc_addr, | |
3912 | &cm_info.vlan_id, | |
3913 | NULL); | |
3914 | else | |
3915 | wildcard = true; | |
3916 | } | |
f27b4746 FL |
3917 | cm_info.backlog = backlog; |
3918 | cm_info.cm_id = cm_id; | |
3919 | ||
3920 | cm_listen_node = i40iw_make_listen_node(&iwdev->cm_core, iwdev, &cm_info); | |
3921 | if (!cm_listen_node) { | |
3922 | i40iw_pr_err("cm_listen_node == NULL\n"); | |
3923 | return -ENOMEM; | |
3924 | } | |
3925 | ||
3926 | cm_id->provider_data = cm_listen_node; | |
3927 | ||
7eb2bde7 | 3928 | cm_listen_node->tos = cm_id->tos; |
0fc2dc58 HO |
3929 | cm_listen_node->user_pri = rt_tos2priority(cm_id->tos); |
3930 | cm_info.user_pri = cm_listen_node->user_pri; | |
0fc2dc58 | 3931 | |
f27b4746 | 3932 | if (!cm_listen_node->reused_node) { |
f27b4746 FL |
3933 | if (wildcard) { |
3934 | if (cm_info.ipv4) | |
3935 | ret = i40iw_add_mqh_4(iwdev, | |
3936 | &cm_info, | |
3937 | cm_listen_node); | |
3938 | else | |
3939 | ret = i40iw_add_mqh_6(iwdev, | |
3940 | &cm_info, | |
3941 | cm_listen_node); | |
3942 | if (ret) | |
3943 | goto error; | |
3944 | ||
3945 | ret = i40iw_manage_apbvt(iwdev, | |
8d8cd0bf | 3946 | cm_info.loc_port, |
f27b4746 FL |
3947 | I40IW_MANAGE_APBVT_ADD); |
3948 | ||
3949 | if (ret) | |
3950 | goto error; | |
3951 | } else { | |
3952 | ret = i40iw_manage_qhash(iwdev, | |
3953 | &cm_info, | |
3954 | I40IW_QHASH_TYPE_TCP_SYN, | |
3955 | I40IW_QHASH_MANAGE_TYPE_ADD, | |
3956 | NULL, | |
3957 | true); | |
3958 | if (ret) | |
3959 | goto error; | |
3960 | cm_listen_node->qhash_set = true; | |
3961 | ret = i40iw_manage_apbvt(iwdev, | |
8d8cd0bf | 3962 | cm_info.loc_port, |
f27b4746 FL |
3963 | I40IW_MANAGE_APBVT_ADD); |
3964 | if (ret) | |
3965 | goto error; | |
3966 | } | |
3967 | } | |
3968 | cm_id->add_ref(cm_id); | |
3969 | cm_listen_node->cm_core->stats_listen_created++; | |
3970 | return 0; | |
3971 | error: | |
3972 | i40iw_cm_del_listen(&iwdev->cm_core, (void *)cm_listen_node, false); | |
3973 | return -EINVAL; | |
3974 | } | |
3975 | ||
3976 | /** | |
3977 | * i40iw_destroy_listen - registered call to destroy listener | |
3978 | * @cm_id: cm information for passive connection | |
3979 | */ | |
3980 | int i40iw_destroy_listen(struct iw_cm_id *cm_id) | |
3981 | { | |
3982 | struct i40iw_device *iwdev; | |
3983 | ||
3984 | iwdev = to_iwdev(cm_id->device); | |
3985 | if (cm_id->provider_data) | |
3986 | i40iw_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true); | |
3987 | else | |
3988 | i40iw_pr_err("cm_id->provider_data was NULL\n"); | |
3989 | ||
3990 | cm_id->rem_ref(cm_id); | |
3991 | ||
3992 | return 0; | |
3993 | } | |
3994 | ||
3995 | /** | |
3996 | * i40iw_cm_event_connected - handle connected active node | |
3997 | * @event: the info for cm_node of connection | |
3998 | */ | |
3999 | static void i40iw_cm_event_connected(struct i40iw_cm_event *event) | |
4000 | { | |
4001 | struct i40iw_qp *iwqp; | |
4002 | struct i40iw_device *iwdev; | |
4003 | struct i40iw_cm_node *cm_node; | |
4004 | struct i40iw_sc_dev *dev; | |
4005 | struct ib_qp_attr attr; | |
4006 | struct iw_cm_id *cm_id; | |
4007 | int status; | |
4008 | bool read0; | |
4009 | ||
4010 | cm_node = event->cm_node; | |
4011 | cm_id = cm_node->cm_id; | |
4012 | iwqp = (struct i40iw_qp *)cm_id->provider_data; | |
4013 | iwdev = to_iwdev(iwqp->ibqp.device); | |
4014 | dev = &iwdev->sc_dev; | |
4015 | ||
4016 | if (iwqp->destroyed) { | |
4017 | status = -ETIMEDOUT; | |
4018 | goto error; | |
4019 | } | |
4020 | i40iw_cm_init_tsa_conn(iwqp, cm_node); | |
4021 | read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO); | |
4022 | if (iwqp->page) | |
4023 | iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page); | |
4024 | dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0); | |
4025 | if (iwqp->page) | |
4026 | kunmap(iwqp->page); | |
4027 | status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0); | |
4028 | if (status) | |
4029 | i40iw_pr_err("send cm event\n"); | |
4030 | ||
4031 | memset(&attr, 0, sizeof(attr)); | |
4032 | attr.qp_state = IB_QPS_RTS; | |
4033 | cm_node->qhash_set = false; | |
4034 | i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); | |
4035 | ||
4036 | cm_node->accelerated = 1; | |
4037 | if (cm_node->accept_pend) { | |
4038 | if (!cm_node->listener) | |
4039 | i40iw_pr_err("listener is null for passive node\n"); | |
4040 | atomic_dec(&cm_node->listener->pend_accepts_cnt); | |
4041 | cm_node->accept_pend = 0; | |
4042 | } | |
4043 | return; | |
4044 | ||
4045 | error: | |
4046 | iwqp->cm_id = NULL; | |
4047 | cm_id->provider_data = NULL; | |
4048 | i40iw_send_cm_event(event->cm_node, | |
4049 | cm_id, | |
4050 | IW_CM_EVENT_CONNECT_REPLY, | |
4051 | status); | |
4052 | cm_id->rem_ref(cm_id); | |
4053 | i40iw_rem_ref_cm_node(event->cm_node); | |
4054 | } | |
4055 | ||
4056 | /** | |
4057 | * i40iw_cm_event_reset - handle reset | |
4058 | * @event: the info for cm_node of connection | |
4059 | */ | |
4060 | static void i40iw_cm_event_reset(struct i40iw_cm_event *event) | |
4061 | { | |
4062 | struct i40iw_cm_node *cm_node = event->cm_node; | |
4063 | struct iw_cm_id *cm_id = cm_node->cm_id; | |
4064 | struct i40iw_qp *iwqp; | |
4065 | ||
4066 | if (!cm_id) | |
4067 | return; | |
4068 | ||
4069 | iwqp = cm_id->provider_data; | |
4070 | if (!iwqp) | |
4071 | return; | |
4072 | ||
4073 | i40iw_debug(cm_node->dev, | |
4074 | I40IW_DEBUG_CM, | |
4075 | "reset event %p - cm_id = %p\n", | |
4076 | event->cm_node, cm_id); | |
4077 | iwqp->cm_id = NULL; | |
4078 | ||
4079 | i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET); | |
4080 | i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0); | |
4081 | } | |
4082 | ||
4083 | /** | |
4084 | * i40iw_cm_event_handler - worker thread callback to send event to cm upper layer | |
4085 | * @work: pointer of cm event info. | |
4086 | */ | |
4087 | static void i40iw_cm_event_handler(struct work_struct *work) | |
4088 | { | |
4089 | struct i40iw_cm_event *event = container_of(work, | |
4090 | struct i40iw_cm_event, | |
4091 | event_work); | |
4092 | struct i40iw_cm_node *cm_node; | |
4093 | ||
4094 | if (!event || !event->cm_node || !event->cm_node->cm_core) | |
4095 | return; | |
4096 | ||
4097 | cm_node = event->cm_node; | |
4098 | ||
4099 | switch (event->type) { | |
4100 | case I40IW_CM_EVENT_MPA_REQ: | |
4101 | i40iw_send_cm_event(cm_node, | |
4102 | cm_node->cm_id, | |
4103 | IW_CM_EVENT_CONNECT_REQUEST, | |
4104 | 0); | |
4105 | break; | |
4106 | case I40IW_CM_EVENT_RESET: | |
4107 | i40iw_cm_event_reset(event); | |
4108 | break; | |
4109 | case I40IW_CM_EVENT_CONNECTED: | |
4110 | if (!event->cm_node->cm_id || | |
4111 | (event->cm_node->state != I40IW_CM_STATE_OFFLOADED)) | |
4112 | break; | |
4113 | i40iw_cm_event_connected(event); | |
4114 | break; | |
4115 | case I40IW_CM_EVENT_MPA_REJECT: | |
4116 | if (!event->cm_node->cm_id || | |
4117 | (cm_node->state == I40IW_CM_STATE_OFFLOADED)) | |
4118 | break; | |
4119 | i40iw_send_cm_event(cm_node, | |
4120 | cm_node->cm_id, | |
4121 | IW_CM_EVENT_CONNECT_REPLY, | |
4122 | -ECONNREFUSED); | |
4123 | break; | |
4124 | case I40IW_CM_EVENT_ABORTED: | |
4125 | if (!event->cm_node->cm_id || | |
4126 | (event->cm_node->state == I40IW_CM_STATE_OFFLOADED)) | |
4127 | break; | |
4128 | i40iw_event_connect_error(event); | |
4129 | break; | |
4130 | default: | |
4131 | i40iw_pr_err("event type = %d\n", event->type); | |
4132 | break; | |
4133 | } | |
4134 | ||
4135 | event->cm_info.cm_id->rem_ref(event->cm_info.cm_id); | |
4136 | i40iw_rem_ref_cm_node(event->cm_node); | |
4137 | kfree(event); | |
4138 | } | |
4139 | ||
4140 | /** | |
4141 | * i40iw_cm_post_event - queue event request for worker thread | |
4142 | * @event: cm node's info for up event call | |
4143 | */ | |
4144 | static void i40iw_cm_post_event(struct i40iw_cm_event *event) | |
4145 | { | |
4146 | atomic_inc(&event->cm_node->ref_count); | |
4147 | event->cm_info.cm_id->add_ref(event->cm_info.cm_id); | |
4148 | INIT_WORK(&event->event_work, i40iw_cm_event_handler); | |
4149 | ||
4150 | queue_work(event->cm_node->cm_core->event_wq, &event->event_work); | |
4151 | } | |
d5965934 | 4152 | |
e5e74b61 MI |
4153 | /** |
4154 | * i40iw_qhash_ctrl - enable/disable qhash for list | |
4155 | * @iwdev: device pointer | |
4156 | * @parent_listen_node: parent listen node | |
4157 | * @nfo: cm info node | |
4158 | * @ipaddr: Pointer to IPv4 or IPv6 address | |
4159 | * @ipv4: flag indicating IPv4 when true | |
4160 | * @ifup: flag indicating interface up when true | |
4161 | * | |
4162 | * Enables or disables the qhash for the node in the child | |
4163 | * listen list that matches ipaddr. If no matching IP was found | |
4164 | * it will allocate and add a new child listen node to the | |
4165 | * parent listen node. The listen_list_lock is assumed to be | |
4166 | * held when called. | |
4167 | */ | |
4168 | static void i40iw_qhash_ctrl(struct i40iw_device *iwdev, | |
4169 | struct i40iw_cm_listener *parent_listen_node, | |
4170 | struct i40iw_cm_info *nfo, | |
4171 | u32 *ipaddr, bool ipv4, bool ifup) | |
4172 | { | |
4173 | struct list_head *child_listen_list = &parent_listen_node->child_listen_list; | |
4174 | struct i40iw_cm_listener *child_listen_node; | |
4175 | struct list_head *pos, *tpos; | |
4176 | enum i40iw_status_code ret; | |
4177 | bool node_allocated = false; | |
4178 | enum i40iw_quad_hash_manage_type op = | |
4179 | ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE; | |
4180 | ||
4181 | list_for_each_safe(pos, tpos, child_listen_list) { | |
4182 | child_listen_node = | |
4183 | list_entry(pos, | |
4184 | struct i40iw_cm_listener, | |
4185 | child_listen_list); | |
4186 | if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16)) | |
4187 | goto set_qhash; | |
4188 | } | |
4189 | ||
4190 | /* if not found then add a child listener if interface is going up */ | |
4191 | if (!ifup) | |
4192 | return; | |
4193 | child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC); | |
4194 | if (!child_listen_node) | |
4195 | return; | |
4196 | node_allocated = true; | |
4197 | memcpy(child_listen_node, parent_listen_node, sizeof(*child_listen_node)); | |
4198 | ||
4199 | memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16); | |
4200 | ||
4201 | set_qhash: | |
4202 | memcpy(nfo->loc_addr, | |
4203 | child_listen_node->loc_addr, | |
4204 | sizeof(nfo->loc_addr)); | |
4205 | nfo->vlan_id = child_listen_node->vlan_id; | |
4206 | ret = i40iw_manage_qhash(iwdev, nfo, | |
4207 | I40IW_QHASH_TYPE_TCP_SYN, | |
4208 | op, | |
4209 | NULL, false); | |
4210 | if (!ret) { | |
4211 | child_listen_node->qhash_set = ifup; | |
4212 | if (node_allocated) | |
4213 | list_add(&child_listen_node->child_listen_list, | |
4214 | &parent_listen_node->child_listen_list); | |
4215 | } else if (node_allocated) { | |
4216 | kfree(child_listen_node); | |
4217 | } | |
4218 | } | |
4219 | ||
d5965934 MI |
4220 | /** |
4221 | * i40iw_cm_disconnect_all - disconnect all connected qp's | |
4222 | * @iwdev: device pointer | |
4223 | */ | |
4224 | void i40iw_cm_disconnect_all(struct i40iw_device *iwdev) | |
4225 | { | |
4226 | struct i40iw_cm_core *cm_core = &iwdev->cm_core; | |
4227 | struct list_head *list_core_temp; | |
4228 | struct list_head *list_node; | |
4229 | struct i40iw_cm_node *cm_node; | |
4230 | unsigned long flags; | |
4231 | struct list_head connected_list; | |
4232 | struct ib_qp_attr attr; | |
4233 | ||
4234 | INIT_LIST_HEAD(&connected_list); | |
4235 | spin_lock_irqsave(&cm_core->ht_lock, flags); | |
4236 | list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) { | |
4237 | cm_node = container_of(list_node, struct i40iw_cm_node, list); | |
4238 | atomic_inc(&cm_node->ref_count); | |
4239 | list_add(&cm_node->connected_entry, &connected_list); | |
4240 | } | |
4241 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | |
4242 | ||
4243 | list_for_each_safe(list_node, list_core_temp, &connected_list) { | |
4244 | cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry); | |
4245 | attr.qp_state = IB_QPS_ERR; | |
4246 | i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL); | |
4247 | i40iw_rem_ref_cm_node(cm_node); | |
4248 | } | |
4249 | } | |
e5e74b61 MI |
4250 | |
4251 | /** | |
4252 | * i40iw_ifdown_notify - process an ifdown on an interface | |
4253 | * @iwdev: device pointer | |
4254 | * @ipaddr: Pointer to IPv4 or IPv6 address | |
4255 | * @ipv4: flag indicating IPv4 when true | |
4256 | * @ifup: flag indicating interface up when true | |
4257 | */ | |
4258 | void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev, | |
4259 | u32 *ipaddr, bool ipv4, bool ifup) | |
4260 | { | |
4261 | struct i40iw_cm_core *cm_core = &iwdev->cm_core; | |
4262 | unsigned long flags; | |
4263 | struct i40iw_cm_listener *listen_node; | |
4264 | static const u32 ip_zero[4] = { 0, 0, 0, 0 }; | |
4265 | struct i40iw_cm_info nfo; | |
4266 | u16 vlan_id = rdma_vlan_dev_vlan_id(netdev); | |
4267 | enum i40iw_status_code ret; | |
4268 | enum i40iw_quad_hash_manage_type op = | |
4269 | ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE; | |
4270 | ||
4271 | /* Disable or enable qhash for listeners */ | |
4272 | spin_lock_irqsave(&cm_core->listen_list_lock, flags); | |
4273 | list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { | |
4274 | if (vlan_id == listen_node->vlan_id && | |
4275 | (!memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) || | |
4276 | !memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) { | |
4277 | memcpy(nfo.loc_addr, listen_node->loc_addr, | |
4278 | sizeof(nfo.loc_addr)); | |
4279 | nfo.loc_port = listen_node->loc_port; | |
4280 | nfo.ipv4 = listen_node->ipv4; | |
4281 | nfo.vlan_id = listen_node->vlan_id; | |
4282 | nfo.user_pri = listen_node->user_pri; | |
4283 | if (!list_empty(&listen_node->child_listen_list)) { | |
4284 | i40iw_qhash_ctrl(iwdev, | |
4285 | listen_node, | |
4286 | &nfo, | |
4287 | ipaddr, ipv4, ifup); | |
4288 | } else if (memcmp(listen_node->loc_addr, ip_zero, | |
4289 | ipv4 ? 4 : 16)) { | |
4290 | ret = i40iw_manage_qhash(iwdev, | |
4291 | &nfo, | |
4292 | I40IW_QHASH_TYPE_TCP_SYN, | |
4293 | op, | |
4294 | NULL, | |
4295 | false); | |
4296 | if (!ret) | |
4297 | listen_node->qhash_set = ifup; | |
4298 | } | |
4299 | } | |
4300 | } | |
4301 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | |
4302 | ||
4303 | /* disconnect any connected qp's on ifdown */ | |
4304 | if (!ifup) | |
4305 | i40iw_cm_disconnect_all(iwdev); | |
4306 | } |