]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/infiniband/hw/i40iw/i40iw_cm.c
f3bc01bce483fe5d81ba08e2a60212e4f0f32b98
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / hw / i40iw / i40iw_cm.c
1 /*******************************************************************************
2 *
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 *******************************************************************************/
34
35 #include <linux/atomic.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/init.h>
39 #include <linux/if_arp.h>
40 #include <linux/if_vlan.h>
41 #include <linux/notifier.h>
42 #include <linux/net.h>
43 #include <linux/types.h>
44 #include <linux/timer.h>
45 #include <linux/time.h>
46 #include <linux/delay.h>
47 #include <linux/etherdevice.h>
48 #include <linux/netdevice.h>
49 #include <linux/random.h>
50 #include <linux/list.h>
51 #include <linux/threads.h>
52 #include <linux/highmem.h>
53 #include <net/arp.h>
54 #include <net/ndisc.h>
55 #include <net/neighbour.h>
56 #include <net/route.h>
57 #include <net/addrconf.h>
58 #include <net/ip6_route.h>
59 #include <net/ip_fib.h>
60 #include <net/tcp.h>
61 #include <asm/checksum.h>
62
63 #include "i40iw.h"
64
65 static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *);
66 static void i40iw_cm_post_event(struct i40iw_cm_event *event);
67 static void i40iw_disconnect_worker(struct work_struct *work);
68
69 /**
70 * i40iw_free_sqbuf - put back puda buffer if refcount = 0
71 * @vsi: pointer to vsi structure
72 * @buf: puda buffer to free
73 */
74 void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
75 {
76 struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
77 struct i40iw_puda_rsrc *ilq = vsi->ilq;
78
79 if (!atomic_dec_return(&buf->refcount))
80 i40iw_puda_ret_bufpool(ilq, buf);
81 }
82
83 /**
84 * i40iw_derive_hw_ird_setting - Calculate IRD
85 *
86 * @cm_ird: IRD of connection's node
87 *
88 * The ird from the connection is rounded to a supported HW
89 * setting (2,8,32,64) and then encoded for ird_size field of
90 * qp_ctx
91 */
92 static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
93 {
94 u8 encoded_ird_size;
95 u8 pof2_cm_ird = 1;
96
97 /* round-off to next powerof2 */
98 while (pof2_cm_ird < cm_ird)
99 pof2_cm_ird *= 2;
100
101 /* ird_size field is encoded in qp_ctx */
102 switch (pof2_cm_ird) {
103 case I40IW_HW_IRD_SETTING_64:
104 encoded_ird_size = 3;
105 break;
106 case I40IW_HW_IRD_SETTING_32:
107 case I40IW_HW_IRD_SETTING_16:
108 encoded_ird_size = 2;
109 break;
110 case I40IW_HW_IRD_SETTING_8:
111 case I40IW_HW_IRD_SETTING_4:
112 encoded_ird_size = 1;
113 break;
114 case I40IW_HW_IRD_SETTING_2:
115 default:
116 encoded_ird_size = 0;
117 break;
118 }
119 return encoded_ird_size;
120 }
121
122 /**
123 * i40iw_record_ird_ord - Record IRD/ORD passed in
124 * @cm_node: connection's node
125 * @conn_ird: connection IRD
126 * @conn_ord: connection ORD
127 */
128 static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord)
129 {
130 if (conn_ird > I40IW_MAX_IRD_SIZE)
131 conn_ird = I40IW_MAX_IRD_SIZE;
132
133 if (conn_ord > I40IW_MAX_ORD_SIZE)
134 conn_ord = I40IW_MAX_ORD_SIZE;
135
136 cm_node->ird_size = conn_ird;
137 cm_node->ord_size = conn_ord;
138 }
139
140 /**
141 * i40iw_copy_ip_ntohl - change network to host ip
142 * @dst: host ip
143 * @src: big endian
144 */
145 void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src)
146 {
147 *dst++ = ntohl(*src++);
148 *dst++ = ntohl(*src++);
149 *dst++ = ntohl(*src++);
150 *dst = ntohl(*src);
151 }
152
153 /**
154 * i40iw_copy_ip_htonl - change host addr to network ip
155 * @dst: host ip
156 * @src: little endian
157 */
158 static inline void i40iw_copy_ip_htonl(__be32 *dst, u32 *src)
159 {
160 *dst++ = htonl(*src++);
161 *dst++ = htonl(*src++);
162 *dst++ = htonl(*src++);
163 *dst = htonl(*src);
164 }
165
166 /**
167 * i40iw_fill_sockaddr4 - get addr info for passive connection
168 * @cm_node: connection's node
169 * @event: upper layer's cm event
170 */
171 static inline void i40iw_fill_sockaddr4(struct i40iw_cm_node *cm_node,
172 struct iw_cm_event *event)
173 {
174 struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
175 struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
176
177 laddr->sin_family = AF_INET;
178 raddr->sin_family = AF_INET;
179
180 laddr->sin_port = htons(cm_node->loc_port);
181 raddr->sin_port = htons(cm_node->rem_port);
182
183 laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
184 raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
185 }
186
187 /**
188 * i40iw_fill_sockaddr6 - get ipv6 addr info for passive side
189 * @cm_node: connection's node
190 * @event: upper layer's cm event
191 */
192 static inline void i40iw_fill_sockaddr6(struct i40iw_cm_node *cm_node,
193 struct iw_cm_event *event)
194 {
195 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
196 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
197
198 laddr6->sin6_family = AF_INET6;
199 raddr6->sin6_family = AF_INET6;
200
201 laddr6->sin6_port = htons(cm_node->loc_port);
202 raddr6->sin6_port = htons(cm_node->rem_port);
203
204 i40iw_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,
205 cm_node->loc_addr);
206 i40iw_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,
207 cm_node->rem_addr);
208 }
209
210 /**
211 * i40iw_get_addr_info
212 * @cm_node: contains ip/tcp info
213 * @cm_info: to get a copy of the cm_node ip/tcp info
214 */
215 static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
216 struct i40iw_cm_info *cm_info)
217 {
218 cm_info->ipv4 = cm_node->ipv4;
219 cm_info->vlan_id = cm_node->vlan_id;
220 memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
221 memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
222 cm_info->loc_port = cm_node->loc_port;
223 cm_info->rem_port = cm_node->rem_port;
224 cm_info->user_pri = cm_node->user_pri;
225 }
226
227 /**
228 * i40iw_get_cmevent_info - for cm event upcall
229 * @cm_node: connection's node
230 * @cm_id: upper layers cm struct for the event
231 * @event: upper layer's cm event
232 */
233 static inline void i40iw_get_cmevent_info(struct i40iw_cm_node *cm_node,
234 struct iw_cm_id *cm_id,
235 struct iw_cm_event *event)
236 {
237 memcpy(&event->local_addr, &cm_id->m_local_addr,
238 sizeof(event->local_addr));
239 memcpy(&event->remote_addr, &cm_id->m_remote_addr,
240 sizeof(event->remote_addr));
241 if (cm_node) {
242 event->private_data = (void *)cm_node->pdata_buf;
243 event->private_data_len = (u8)cm_node->pdata.size;
244 event->ird = cm_node->ird_size;
245 event->ord = cm_node->ord_size;
246 }
247 }
248
249 /**
250 * i40iw_send_cm_event - upcall cm's event handler
251 * @cm_node: connection's node
252 * @cm_id: upper layer's cm info struct
253 * @type: Event type to indicate
254 * @status: status for the event type
255 */
256 static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
257 struct iw_cm_id *cm_id,
258 enum iw_cm_event_type type,
259 int status)
260 {
261 struct iw_cm_event event;
262
263 memset(&event, 0, sizeof(event));
264 event.event = type;
265 event.status = status;
266 switch (type) {
267 case IW_CM_EVENT_CONNECT_REQUEST:
268 if (cm_node->ipv4)
269 i40iw_fill_sockaddr4(cm_node, &event);
270 else
271 i40iw_fill_sockaddr6(cm_node, &event);
272 event.provider_data = (void *)cm_node;
273 event.private_data = (void *)cm_node->pdata_buf;
274 event.private_data_len = (u8)cm_node->pdata.size;
275 event.ird = cm_node->ird_size;
276 break;
277 case IW_CM_EVENT_CONNECT_REPLY:
278 i40iw_get_cmevent_info(cm_node, cm_id, &event);
279 break;
280 case IW_CM_EVENT_ESTABLISHED:
281 event.ird = cm_node->ird_size;
282 event.ord = cm_node->ord_size;
283 break;
284 case IW_CM_EVENT_DISCONNECT:
285 break;
286 case IW_CM_EVENT_CLOSE:
287 break;
288 default:
289 i40iw_pr_err("event type received type = %d\n", type);
290 return -1;
291 }
292 return cm_id->event_handler(cm_id, &event);
293 }
294
295 /**
296 * i40iw_create_event - create cm event
297 * @cm_node: connection's node
298 * @type: Event type to generate
299 */
300 static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
301 enum i40iw_cm_event_type type)
302 {
303 struct i40iw_cm_event *event;
304
305 if (!cm_node->cm_id)
306 return NULL;
307
308 event = kzalloc(sizeof(*event), GFP_ATOMIC);
309
310 if (!event)
311 return NULL;
312
313 event->type = type;
314 event->cm_node = cm_node;
315 memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr));
316 memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr));
317 event->cm_info.rem_port = cm_node->rem_port;
318 event->cm_info.loc_port = cm_node->loc_port;
319 event->cm_info.cm_id = cm_node->cm_id;
320
321 i40iw_debug(cm_node->dev,
322 I40IW_DEBUG_CM,
323 "node=%p event=%p type=%u dst=%pI4 src=%pI4\n",
324 cm_node,
325 event,
326 type,
327 event->cm_info.loc_addr,
328 event->cm_info.rem_addr);
329
330 i40iw_cm_post_event(event);
331 return event;
332 }
333
334 /**
335 * i40iw_free_retrans_entry - free send entry
336 * @cm_node: connection's node
337 */
338 static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
339 {
340 struct i40iw_device *iwdev = cm_node->iwdev;
341 struct i40iw_timer_entry *send_entry;
342
343 send_entry = cm_node->send_entry;
344 if (send_entry) {
345 cm_node->send_entry = NULL;
346 i40iw_free_sqbuf(&iwdev->vsi, (void *)send_entry->sqbuf);
347 kfree(send_entry);
348 atomic_dec(&cm_node->ref_count);
349 }
350 }
351
352 /**
353 * i40iw_cleanup_retrans_entry - free send entry with lock
354 * @cm_node: connection's node
355 */
356 static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
357 {
358 unsigned long flags;
359
360 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
361 i40iw_free_retrans_entry(cm_node);
362 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
363 }
364
365 /**
366 * i40iw_form_cm_frame - get a free packet and build frame
367 * @cm_node: connection's node ionfo to use in frame
368 * @options: pointer to options info
369 * @hdr: pointer mpa header
370 * @pdata: pointer to private data
371 * @flags: indicates FIN or ACK
372 */
373 static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
374 struct i40iw_kmem_info *options,
375 struct i40iw_kmem_info *hdr,
376 struct i40iw_kmem_info *pdata,
377 u8 flags)
378 {
379 struct i40iw_puda_buf *sqbuf;
380 struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
381 u8 *buf;
382
383 struct tcphdr *tcph;
384 struct iphdr *iph;
385 struct ipv6hdr *ip6h;
386 struct ethhdr *ethh;
387 u16 packetsize;
388 u16 eth_hlen = ETH_HLEN;
389 u32 opts_len = 0;
390 u32 pd_len = 0;
391 u32 hdr_len = 0;
392 u16 vtag;
393
394 sqbuf = i40iw_puda_get_bufpool(vsi->ilq);
395 if (!sqbuf)
396 return NULL;
397 buf = sqbuf->mem.va;
398
399 if (options)
400 opts_len = (u32)options->size;
401
402 if (hdr)
403 hdr_len = hdr->size;
404
405 if (pdata)
406 pd_len = pdata->size;
407
408 if (cm_node->vlan_id < VLAN_TAG_PRESENT)
409 eth_hlen += 4;
410
411 if (cm_node->ipv4)
412 packetsize = sizeof(*iph) + sizeof(*tcph);
413 else
414 packetsize = sizeof(*ip6h) + sizeof(*tcph);
415 packetsize += opts_len + hdr_len + pd_len;
416
417 memset(buf, 0x00, eth_hlen + packetsize);
418
419 sqbuf->totallen = packetsize + eth_hlen;
420 sqbuf->maclen = eth_hlen;
421 sqbuf->tcphlen = sizeof(*tcph) + opts_len;
422 sqbuf->scratch = (void *)cm_node;
423
424 ethh = (struct ethhdr *)buf;
425 buf += eth_hlen;
426
427 if (cm_node->ipv4) {
428 sqbuf->ipv4 = true;
429
430 iph = (struct iphdr *)buf;
431 buf += sizeof(*iph);
432 tcph = (struct tcphdr *)buf;
433 buf += sizeof(*tcph);
434
435 ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
436 ether_addr_copy(ethh->h_source, cm_node->loc_mac);
437 if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
438 ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
439 vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
440 ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
441
442 ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
443 } else {
444 ethh->h_proto = htons(ETH_P_IP);
445 }
446
447 iph->version = IPVERSION;
448 iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
449 iph->tos = cm_node->tos;
450 iph->tot_len = htons(packetsize);
451 iph->id = htons(++cm_node->tcp_cntxt.loc_id);
452
453 iph->frag_off = htons(0x4000);
454 iph->ttl = 0x40;
455 iph->protocol = IPPROTO_TCP;
456 iph->saddr = htonl(cm_node->loc_addr[0]);
457 iph->daddr = htonl(cm_node->rem_addr[0]);
458 } else {
459 sqbuf->ipv4 = false;
460 ip6h = (struct ipv6hdr *)buf;
461 buf += sizeof(*ip6h);
462 tcph = (struct tcphdr *)buf;
463 buf += sizeof(*tcph);
464
465 ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
466 ether_addr_copy(ethh->h_source, cm_node->loc_mac);
467 if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
468 ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
469 vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
470 ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
471 ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
472 } else {
473 ethh->h_proto = htons(ETH_P_IPV6);
474 }
475 ip6h->version = 6;
476 ip6h->priority = cm_node->tos >> 4;
477 ip6h->flow_lbl[0] = cm_node->tos << 4;
478 ip6h->flow_lbl[1] = 0;
479 ip6h->flow_lbl[2] = 0;
480 ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
481 ip6h->nexthdr = 6;
482 ip6h->hop_limit = 128;
483 i40iw_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,
484 cm_node->loc_addr);
485 i40iw_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,
486 cm_node->rem_addr);
487 }
488
489 tcph->source = htons(cm_node->loc_port);
490 tcph->dest = htons(cm_node->rem_port);
491
492 tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
493
494 if (flags & SET_ACK) {
495 cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
496 tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
497 tcph->ack = 1;
498 } else {
499 tcph->ack_seq = 0;
500 }
501
502 if (flags & SET_SYN) {
503 cm_node->tcp_cntxt.loc_seq_num++;
504 tcph->syn = 1;
505 } else {
506 cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
507 }
508
509 if (flags & SET_FIN) {
510 cm_node->tcp_cntxt.loc_seq_num++;
511 tcph->fin = 1;
512 }
513
514 if (flags & SET_RST)
515 tcph->rst = 1;
516
517 tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
518 sqbuf->tcphlen = tcph->doff << 2;
519 tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
520 tcph->urg_ptr = 0;
521
522 if (opts_len) {
523 memcpy(buf, options->addr, opts_len);
524 buf += opts_len;
525 }
526
527 if (hdr_len) {
528 memcpy(buf, hdr->addr, hdr_len);
529 buf += hdr_len;
530 }
531
532 if (pdata && pdata->addr)
533 memcpy(buf, pdata->addr, pdata->size);
534
535 atomic_set(&sqbuf->refcount, 1);
536
537 return sqbuf;
538 }
539
540 /**
541 * i40iw_send_reset - Send RST packet
542 * @cm_node: connection's node
543 */
544 static int i40iw_send_reset(struct i40iw_cm_node *cm_node)
545 {
546 struct i40iw_puda_buf *sqbuf;
547 int flags = SET_RST | SET_ACK;
548
549 sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, flags);
550 if (!sqbuf) {
551 i40iw_pr_err("no sqbuf\n");
552 return -1;
553 }
554
555 return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 0, 1);
556 }
557
558 /**
559 * i40iw_active_open_err - send event for active side cm error
560 * @cm_node: connection's node
561 * @reset: Flag to send reset or not
562 */
563 static void i40iw_active_open_err(struct i40iw_cm_node *cm_node, bool reset)
564 {
565 i40iw_cleanup_retrans_entry(cm_node);
566 cm_node->cm_core->stats_connect_errs++;
567 if (reset) {
568 i40iw_debug(cm_node->dev,
569 I40IW_DEBUG_CM,
570 "%s cm_node=%p state=%d\n",
571 __func__,
572 cm_node,
573 cm_node->state);
574 atomic_inc(&cm_node->ref_count);
575 i40iw_send_reset(cm_node);
576 }
577
578 cm_node->state = I40IW_CM_STATE_CLOSED;
579 i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
580 }
581
582 /**
583 * i40iw_passive_open_err - handle passive side cm error
584 * @cm_node: connection's node
585 * @reset: send reset or just free cm_node
586 */
587 static void i40iw_passive_open_err(struct i40iw_cm_node *cm_node, bool reset)
588 {
589 i40iw_cleanup_retrans_entry(cm_node);
590 cm_node->cm_core->stats_passive_errs++;
591 cm_node->state = I40IW_CM_STATE_CLOSED;
592 i40iw_debug(cm_node->dev,
593 I40IW_DEBUG_CM,
594 "%s cm_node=%p state =%d\n",
595 __func__,
596 cm_node,
597 cm_node->state);
598 if (reset)
599 i40iw_send_reset(cm_node);
600 else
601 i40iw_rem_ref_cm_node(cm_node);
602 }
603
604 /**
605 * i40iw_event_connect_error - to create connect error event
606 * @event: cm information for connect event
607 */
608 static void i40iw_event_connect_error(struct i40iw_cm_event *event)
609 {
610 struct i40iw_qp *iwqp;
611 struct iw_cm_id *cm_id;
612
613 cm_id = event->cm_node->cm_id;
614 if (!cm_id)
615 return;
616
617 iwqp = cm_id->provider_data;
618
619 if (!iwqp || !iwqp->iwdev)
620 return;
621
622 iwqp->cm_id = NULL;
623 cm_id->provider_data = NULL;
624 i40iw_send_cm_event(event->cm_node, cm_id,
625 IW_CM_EVENT_CONNECT_REPLY,
626 -ECONNRESET);
627 cm_id->rem_ref(cm_id);
628 i40iw_rem_ref_cm_node(event->cm_node);
629 }
630
631 /**
632 * i40iw_process_options
633 * @cm_node: connection's node
634 * @optionsloc: point to start of options
635 * @optionsize: size of all options
636 * @syn_packet: flag if syn packet
637 */
638 static int i40iw_process_options(struct i40iw_cm_node *cm_node,
639 u8 *optionsloc,
640 u32 optionsize,
641 u32 syn_packet)
642 {
643 u32 tmp;
644 u32 offset = 0;
645 union all_known_options *all_options;
646 char got_mss_option = 0;
647
648 while (offset < optionsize) {
649 all_options = (union all_known_options *)(optionsloc + offset);
650 switch (all_options->as_base.optionnum) {
651 case OPTION_NUMBER_END:
652 offset = optionsize;
653 break;
654 case OPTION_NUMBER_NONE:
655 offset += 1;
656 continue;
657 case OPTION_NUMBER_MSS:
658 i40iw_debug(cm_node->dev,
659 I40IW_DEBUG_CM,
660 "%s: MSS Length: %d Offset: %d Size: %d\n",
661 __func__,
662 all_options->as_mss.length,
663 offset,
664 optionsize);
665 got_mss_option = 1;
666 if (all_options->as_mss.length != 4)
667 return -1;
668 tmp = ntohs(all_options->as_mss.mss);
669 if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
670 cm_node->tcp_cntxt.mss = tmp;
671 break;
672 case OPTION_NUMBER_WINDOW_SCALE:
673 cm_node->tcp_cntxt.snd_wscale =
674 all_options->as_windowscale.shiftcount;
675 break;
676 default:
677 i40iw_debug(cm_node->dev,
678 I40IW_DEBUG_CM,
679 "TCP Option not understood: %x\n",
680 all_options->as_base.optionnum);
681 break;
682 }
683 offset += all_options->as_base.length;
684 }
685 if (!got_mss_option && syn_packet)
686 cm_node->tcp_cntxt.mss = I40IW_CM_DEFAULT_MSS;
687 return 0;
688 }
689
690 /**
691 * i40iw_handle_tcp_options -
692 * @cm_node: connection's node
693 * @tcph: pointer tcp header
694 * @optionsize: size of options rcvd
695 * @passive: active or passive flag
696 */
697 static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node,
698 struct tcphdr *tcph,
699 int optionsize,
700 int passive)
701 {
702 u8 *optionsloc = (u8 *)&tcph[1];
703
704 if (optionsize) {
705 if (i40iw_process_options(cm_node,
706 optionsloc,
707 optionsize,
708 (u32)tcph->syn)) {
709 i40iw_debug(cm_node->dev,
710 I40IW_DEBUG_CM,
711 "%s: Node %p, Sending RESET\n",
712 __func__,
713 cm_node);
714 if (passive)
715 i40iw_passive_open_err(cm_node, true);
716 else
717 i40iw_active_open_err(cm_node, true);
718 return -1;
719 }
720 }
721
722 cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
723 cm_node->tcp_cntxt.snd_wscale;
724
725 if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
726 cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
727 return 0;
728 }
729
730 /**
731 * i40iw_build_mpa_v1 - build a MPA V1 frame
732 * @cm_node: connection's node
733 * @mpa_key: to do read0 or write0
734 */
735 static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node,
736 void *start_addr,
737 u8 mpa_key)
738 {
739 struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr;
740
741 switch (mpa_key) {
742 case MPA_KEY_REQUEST:
743 memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
744 break;
745 case MPA_KEY_REPLY:
746 memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
747 break;
748 default:
749 break;
750 }
751 mpa_frame->flags = IETF_MPA_FLAGS_CRC;
752 mpa_frame->rev = cm_node->mpa_frame_rev;
753 mpa_frame->priv_data_len = htons(cm_node->pdata.size);
754 }
755
756 /**
757 * i40iw_build_mpa_v2 - build a MPA V2 frame
758 * @cm_node: connection's node
759 * @start_addr: buffer start address
760 * @mpa_key: to do read0 or write0
761 */
762 static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
763 void *start_addr,
764 u8 mpa_key)
765 {
766 struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
767 struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
768 u16 ctrl_ird, ctrl_ord;
769
770 /* initialize the upper 5 bytes of the frame */
771 i40iw_build_mpa_v1(cm_node, start_addr, mpa_key);
772 mpa_frame->flags |= IETF_MPA_V2_FLAG;
773 mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
774
775 /* initialize RTR msg */
776 if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
777 ctrl_ird = IETF_NO_IRD_ORD;
778 ctrl_ord = IETF_NO_IRD_ORD;
779 } else {
780 ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
781 IETF_NO_IRD_ORD : cm_node->ird_size;
782 ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
783 IETF_NO_IRD_ORD : cm_node->ord_size;
784 }
785
786 ctrl_ird |= IETF_PEER_TO_PEER;
787 ctrl_ird |= IETF_FLPDU_ZERO_LEN;
788
789 switch (mpa_key) {
790 case MPA_KEY_REQUEST:
791 ctrl_ord |= IETF_RDMA0_WRITE;
792 ctrl_ord |= IETF_RDMA0_READ;
793 break;
794 case MPA_KEY_REPLY:
795 switch (cm_node->send_rdma0_op) {
796 case SEND_RDMA_WRITE_ZERO:
797 ctrl_ord |= IETF_RDMA0_WRITE;
798 break;
799 case SEND_RDMA_READ_ZERO:
800 ctrl_ord |= IETF_RDMA0_READ;
801 break;
802 }
803 break;
804 default:
805 break;
806 }
807 rtr_msg->ctrl_ird = htons(ctrl_ird);
808 rtr_msg->ctrl_ord = htons(ctrl_ord);
809 }
810
811 /**
812 * i40iw_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
813 * @cm_node: connection's node
814 * @mpa: mpa: data buffer
815 * @mpa_key: to do read0 or write0
816 */
817 static int i40iw_cm_build_mpa_frame(struct i40iw_cm_node *cm_node,
818 struct i40iw_kmem_info *mpa,
819 u8 mpa_key)
820 {
821 int hdr_len = 0;
822
823 switch (cm_node->mpa_frame_rev) {
824 case IETF_MPA_V1:
825 hdr_len = sizeof(struct ietf_mpa_v1);
826 i40iw_build_mpa_v1(cm_node, mpa->addr, mpa_key);
827 break;
828 case IETF_MPA_V2:
829 hdr_len = sizeof(struct ietf_mpa_v2);
830 i40iw_build_mpa_v2(cm_node, mpa->addr, mpa_key);
831 break;
832 default:
833 break;
834 }
835
836 return hdr_len;
837 }
838
839 /**
840 * i40iw_send_mpa_request - active node send mpa request to passive node
841 * @cm_node: connection's node
842 */
843 static int i40iw_send_mpa_request(struct i40iw_cm_node *cm_node)
844 {
845 struct i40iw_puda_buf *sqbuf;
846
847 if (!cm_node) {
848 i40iw_pr_err("cm_node == NULL\n");
849 return -1;
850 }
851
852 cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
853 cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
854 &cm_node->mpa_hdr,
855 MPA_KEY_REQUEST);
856 if (!cm_node->mpa_hdr.size) {
857 i40iw_pr_err("mpa size = %d\n", cm_node->mpa_hdr.size);
858 return -1;
859 }
860
861 sqbuf = i40iw_form_cm_frame(cm_node,
862 NULL,
863 &cm_node->mpa_hdr,
864 &cm_node->pdata,
865 SET_ACK);
866 if (!sqbuf) {
867 i40iw_pr_err("sq_buf == NULL\n");
868 return -1;
869 }
870 return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
871 }
872
873 /**
874 * i40iw_send_mpa_reject -
875 * @cm_node: connection's node
876 * @pdata: reject data for connection
877 * @plen: length of reject data
878 */
879 static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node,
880 const void *pdata,
881 u8 plen)
882 {
883 struct i40iw_puda_buf *sqbuf;
884 struct i40iw_kmem_info priv_info;
885
886 cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
887 cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
888 &cm_node->mpa_hdr,
889 MPA_KEY_REPLY);
890
891 cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;
892 priv_info.addr = (void *)pdata;
893 priv_info.size = plen;
894
895 sqbuf = i40iw_form_cm_frame(cm_node,
896 NULL,
897 &cm_node->mpa_hdr,
898 &priv_info,
899 SET_ACK | SET_FIN);
900 if (!sqbuf) {
901 i40iw_pr_err("no sqbuf\n");
902 return -ENOMEM;
903 }
904 cm_node->state = I40IW_CM_STATE_FIN_WAIT1;
905 return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
906 }
907
908 /**
909 * recv_mpa - process an IETF MPA frame
910 * @cm_node: connection's node
911 * @buffer: Data pointer
912 * @type: to return accept or reject
913 * @len: Len of mpa buffer
914 */
915 static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, u32 len)
916 {
917 struct ietf_mpa_v1 *mpa_frame;
918 struct ietf_mpa_v2 *mpa_v2_frame;
919 struct ietf_rtr_msg *rtr_msg;
920 int mpa_hdr_len;
921 int priv_data_len;
922
923 *type = I40IW_MPA_REQUEST_ACCEPT;
924
925 if (len < sizeof(struct ietf_mpa_v1)) {
926 i40iw_pr_err("ietf buffer small (%x)\n", len);
927 return -1;
928 }
929
930 mpa_frame = (struct ietf_mpa_v1 *)buffer;
931 mpa_hdr_len = sizeof(struct ietf_mpa_v1);
932 priv_data_len = ntohs(mpa_frame->priv_data_len);
933
934 if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
935 i40iw_pr_err("large pri_data %d\n", priv_data_len);
936 return -1;
937 }
938 if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
939 i40iw_pr_err("unsupported mpa rev = %d\n", mpa_frame->rev);
940 return -1;
941 }
942 if (mpa_frame->rev > cm_node->mpa_frame_rev) {
943 i40iw_pr_err("rev %d\n", mpa_frame->rev);
944 return -1;
945 }
946 cm_node->mpa_frame_rev = mpa_frame->rev;
947
948 if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
949 if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
950 i40iw_pr_err("Unexpected MPA Key received\n");
951 return -1;
952 }
953 } else {
954 if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
955 i40iw_pr_err("Unexpected MPA Key received\n");
956 return -1;
957 }
958 }
959
960 if (priv_data_len + mpa_hdr_len > len) {
961 i40iw_pr_err("ietf buffer len(%x + %x != %x)\n",
962 priv_data_len, mpa_hdr_len, len);
963 return -1;
964 }
965 if (len > MAX_CM_BUFFER) {
966 i40iw_pr_err("ietf buffer large len = %d\n", len);
967 return -1;
968 }
969
970 switch (mpa_frame->rev) {
971 case IETF_MPA_V2:{
972 u16 ird_size;
973 u16 ord_size;
974 u16 ctrl_ord;
975 u16 ctrl_ird;
976
977 mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
978 mpa_hdr_len += IETF_RTR_MSG_SIZE;
979 rtr_msg = &mpa_v2_frame->rtr_msg;
980
981 /* parse rtr message */
982 ctrl_ord = ntohs(rtr_msg->ctrl_ord);
983 ctrl_ird = ntohs(rtr_msg->ctrl_ird);
984 ird_size = ctrl_ird & IETF_NO_IRD_ORD;
985 ord_size = ctrl_ord & IETF_NO_IRD_ORD;
986
987 if (!(ctrl_ird & IETF_PEER_TO_PEER))
988 return -1;
989
990 if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
991 cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
992 goto negotiate_done;
993 }
994
995 if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
996 /* responder */
997 if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
998 cm_node->ird_size = 1;
999 if (cm_node->ord_size > ird_size)
1000 cm_node->ord_size = ird_size;
1001 } else {
1002 /* initiator */
1003 if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
1004 return -1;
1005 if (cm_node->ord_size > ird_size)
1006 cm_node->ord_size = ird_size;
1007
1008 if (cm_node->ird_size < ord_size)
1009 /* no resources available */
1010 return -1;
1011 }
1012
1013 negotiate_done:
1014 if (ctrl_ord & IETF_RDMA0_READ)
1015 cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
1016 else if (ctrl_ord & IETF_RDMA0_WRITE)
1017 cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
1018 else /* Not supported RDMA0 operation */
1019 return -1;
1020 i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
1021 "MPAV2: Negotiated ORD: %d, IRD: %d\n",
1022 cm_node->ord_size, cm_node->ird_size);
1023 break;
1024 }
1025 break;
1026 case IETF_MPA_V1:
1027 default:
1028 break;
1029 }
1030
1031 memcpy(cm_node->pdata_buf, buffer + mpa_hdr_len, priv_data_len);
1032 cm_node->pdata.size = priv_data_len;
1033
1034 if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
1035 *type = I40IW_MPA_REQUEST_REJECT;
1036
1037 if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
1038 cm_node->snd_mark_en = true;
1039
1040 return 0;
1041 }
1042
1043 /**
1044 * i40iw_schedule_cm_timer
1045 * @@cm_node: connection's node
1046 * @sqbuf: buffer to send
1047 * @type: if it es send ot close
1048 * @send_retrans: if rexmits to be done
1049 * @close_when_complete: is cm_node to be removed
1050 *
1051 * note - cm_node needs to be protected before calling this. Encase in:
1052 * i40iw_rem_ref_cm_node(cm_core, cm_node);
1053 * i40iw_schedule_cm_timer(...)
1054 * atomic_inc(&cm_node->ref_count);
1055 */
1056 int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
1057 struct i40iw_puda_buf *sqbuf,
1058 enum i40iw_timer_type type,
1059 int send_retrans,
1060 int close_when_complete)
1061 {
1062 struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
1063 struct i40iw_cm_core *cm_core = cm_node->cm_core;
1064 struct i40iw_timer_entry *new_send;
1065 int ret = 0;
1066 u32 was_timer_set;
1067 unsigned long flags;
1068
1069 new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
1070 if (!new_send) {
1071 i40iw_free_sqbuf(vsi, (void *)sqbuf);
1072 return -ENOMEM;
1073 }
1074 new_send->retrycount = I40IW_DEFAULT_RETRYS;
1075 new_send->retranscount = I40IW_DEFAULT_RETRANS;
1076 new_send->sqbuf = sqbuf;
1077 new_send->timetosend = jiffies;
1078 new_send->type = type;
1079 new_send->send_retrans = send_retrans;
1080 new_send->close_when_complete = close_when_complete;
1081
1082 if (type == I40IW_TIMER_TYPE_CLOSE) {
1083 new_send->timetosend += (HZ / 10);
1084 if (cm_node->close_entry) {
1085 kfree(new_send);
1086 i40iw_free_sqbuf(vsi, (void *)sqbuf);
1087 i40iw_pr_err("already close entry\n");
1088 return -EINVAL;
1089 }
1090 cm_node->close_entry = new_send;
1091 }
1092
1093 if (type == I40IW_TIMER_TYPE_SEND) {
1094 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1095 cm_node->send_entry = new_send;
1096 atomic_inc(&cm_node->ref_count);
1097 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1098 new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
1099
1100 atomic_inc(&sqbuf->refcount);
1101 i40iw_puda_send_buf(vsi->ilq, sqbuf);
1102 if (!send_retrans) {
1103 i40iw_cleanup_retrans_entry(cm_node);
1104 if (close_when_complete)
1105 i40iw_rem_ref_cm_node(cm_node);
1106 return ret;
1107 }
1108 }
1109
1110 spin_lock_irqsave(&cm_core->ht_lock, flags);
1111 was_timer_set = timer_pending(&cm_core->tcp_timer);
1112
1113 if (!was_timer_set) {
1114 cm_core->tcp_timer.expires = new_send->timetosend;
1115 add_timer(&cm_core->tcp_timer);
1116 }
1117 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1118
1119 return ret;
1120 }
1121
1122 /**
1123 * i40iw_retrans_expired - Could not rexmit the packet
1124 * @cm_node: connection's node
1125 */
1126 static void i40iw_retrans_expired(struct i40iw_cm_node *cm_node)
1127 {
1128 struct iw_cm_id *cm_id = cm_node->cm_id;
1129 enum i40iw_cm_node_state state = cm_node->state;
1130
1131 cm_node->state = I40IW_CM_STATE_CLOSED;
1132 switch (state) {
1133 case I40IW_CM_STATE_SYN_RCVD:
1134 case I40IW_CM_STATE_CLOSING:
1135 i40iw_rem_ref_cm_node(cm_node);
1136 break;
1137 case I40IW_CM_STATE_FIN_WAIT1:
1138 case I40IW_CM_STATE_LAST_ACK:
1139 if (cm_node->cm_id)
1140 cm_id->rem_ref(cm_id);
1141 i40iw_send_reset(cm_node);
1142 break;
1143 default:
1144 atomic_inc(&cm_node->ref_count);
1145 i40iw_send_reset(cm_node);
1146 i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
1147 break;
1148 }
1149 }
1150
1151 /**
1152 * i40iw_handle_close_entry - for handling retry/timeouts
1153 * @cm_node: connection's node
1154 * @rem_node: flag for remove cm_node
1155 */
1156 static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node)
1157 {
1158 struct i40iw_timer_entry *close_entry = cm_node->close_entry;
1159 struct iw_cm_id *cm_id = cm_node->cm_id;
1160 struct i40iw_qp *iwqp;
1161 unsigned long flags;
1162
1163 if (!close_entry)
1164 return;
1165 iwqp = (struct i40iw_qp *)close_entry->sqbuf;
1166 if (iwqp) {
1167 spin_lock_irqsave(&iwqp->lock, flags);
1168 if (iwqp->cm_id) {
1169 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
1170 iwqp->hw_iwarp_state = I40IW_QP_STATE_ERROR;
1171 iwqp->last_aeq = I40IW_AE_RESET_SENT;
1172 iwqp->ibqp_state = IB_QPS_ERR;
1173 spin_unlock_irqrestore(&iwqp->lock, flags);
1174 i40iw_cm_disconn(iwqp);
1175 } else {
1176 spin_unlock_irqrestore(&iwqp->lock, flags);
1177 }
1178 } else if (rem_node) {
1179 /* TIME_WAIT state */
1180 i40iw_rem_ref_cm_node(cm_node);
1181 }
1182 if (cm_id)
1183 cm_id->rem_ref(cm_id);
1184 kfree(close_entry);
1185 cm_node->close_entry = NULL;
1186 }
1187
1188 /**
1189 * i40iw_cm_timer_tick - system's timer expired callback
1190 * @pass: Pointing to cm_core
1191 */
1192 static void i40iw_cm_timer_tick(unsigned long pass)
1193 {
1194 unsigned long nexttimeout = jiffies + I40IW_LONG_TIME;
1195 struct i40iw_cm_node *cm_node;
1196 struct i40iw_timer_entry *send_entry, *close_entry;
1197 struct list_head *list_core_temp;
1198 struct i40iw_sc_vsi *vsi;
1199 struct list_head *list_node;
1200 struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
1201 u32 settimer = 0;
1202 unsigned long timetosend;
1203 struct i40iw_sc_dev *dev;
1204 unsigned long flags;
1205
1206 struct list_head timer_list;
1207
1208 INIT_LIST_HEAD(&timer_list);
1209 spin_lock_irqsave(&cm_core->ht_lock, flags);
1210
1211 list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
1212 cm_node = container_of(list_node, struct i40iw_cm_node, list);
1213 if (cm_node->close_entry || cm_node->send_entry) {
1214 atomic_inc(&cm_node->ref_count);
1215 list_add(&cm_node->timer_entry, &timer_list);
1216 }
1217 }
1218 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1219
1220 list_for_each_safe(list_node, list_core_temp, &timer_list) {
1221 cm_node = container_of(list_node,
1222 struct i40iw_cm_node,
1223 timer_entry);
1224 close_entry = cm_node->close_entry;
1225
1226 if (close_entry) {
1227 if (time_after(close_entry->timetosend, jiffies)) {
1228 if (nexttimeout > close_entry->timetosend ||
1229 !settimer) {
1230 nexttimeout = close_entry->timetosend;
1231 settimer = 1;
1232 }
1233 } else {
1234 i40iw_handle_close_entry(cm_node, 1);
1235 }
1236 }
1237
1238 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1239
1240 send_entry = cm_node->send_entry;
1241 if (!send_entry)
1242 goto done;
1243 if (time_after(send_entry->timetosend, jiffies)) {
1244 if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
1245 if ((nexttimeout > send_entry->timetosend) ||
1246 !settimer) {
1247 nexttimeout = send_entry->timetosend;
1248 settimer = 1;
1249 }
1250 } else {
1251 i40iw_free_retrans_entry(cm_node);
1252 }
1253 goto done;
1254 }
1255
1256 if ((cm_node->state == I40IW_CM_STATE_OFFLOADED) ||
1257 (cm_node->state == I40IW_CM_STATE_CLOSED)) {
1258 i40iw_free_retrans_entry(cm_node);
1259 goto done;
1260 }
1261
1262 if (!send_entry->retranscount || !send_entry->retrycount) {
1263 i40iw_free_retrans_entry(cm_node);
1264
1265 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1266 i40iw_retrans_expired(cm_node);
1267 cm_node->state = I40IW_CM_STATE_CLOSED;
1268 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1269 goto done;
1270 }
1271 cm_node->cm_core->stats_pkt_retrans++;
1272 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1273
1274 vsi = &cm_node->iwdev->vsi;
1275 dev = cm_node->dev;
1276 atomic_inc(&send_entry->sqbuf->refcount);
1277 i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
1278 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1279 if (send_entry->send_retrans) {
1280 send_entry->retranscount--;
1281 timetosend = (I40IW_RETRY_TIMEOUT <<
1282 (I40IW_DEFAULT_RETRANS -
1283 send_entry->retranscount));
1284
1285 send_entry->timetosend = jiffies +
1286 min(timetosend, I40IW_MAX_TIMEOUT);
1287 if (nexttimeout > send_entry->timetosend || !settimer) {
1288 nexttimeout = send_entry->timetosend;
1289 settimer = 1;
1290 }
1291 } else {
1292 int close_when_complete;
1293
1294 close_when_complete = send_entry->close_when_complete;
1295 i40iw_debug(cm_node->dev,
1296 I40IW_DEBUG_CM,
1297 "cm_node=%p state=%d\n",
1298 cm_node,
1299 cm_node->state);
1300 i40iw_free_retrans_entry(cm_node);
1301 if (close_when_complete)
1302 i40iw_rem_ref_cm_node(cm_node);
1303 }
1304 done:
1305 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1306 i40iw_rem_ref_cm_node(cm_node);
1307 }
1308
1309 if (settimer) {
1310 spin_lock_irqsave(&cm_core->ht_lock, flags);
1311 if (!timer_pending(&cm_core->tcp_timer)) {
1312 cm_core->tcp_timer.expires = nexttimeout;
1313 add_timer(&cm_core->tcp_timer);
1314 }
1315 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1316 }
1317 }
1318
1319 /**
1320 * i40iw_send_syn - send SYN packet
1321 * @cm_node: connection's node
1322 * @sendack: flag to set ACK bit or not
1323 */
1324 int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
1325 {
1326 struct i40iw_puda_buf *sqbuf;
1327 int flags = SET_SYN;
1328 char optionsbuffer[sizeof(struct option_mss) +
1329 sizeof(struct option_windowscale) +
1330 sizeof(struct option_base) + TCP_OPTIONS_PADDING];
1331 struct i40iw_kmem_info opts;
1332
1333 int optionssize = 0;
1334 /* Sending MSS option */
1335 union all_known_options *options;
1336
1337 opts.addr = optionsbuffer;
1338 if (!cm_node) {
1339 i40iw_pr_err("no cm_node\n");
1340 return -EINVAL;
1341 }
1342
1343 options = (union all_known_options *)&optionsbuffer[optionssize];
1344 options->as_mss.optionnum = OPTION_NUMBER_MSS;
1345 options->as_mss.length = sizeof(struct option_mss);
1346 options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
1347 optionssize += sizeof(struct option_mss);
1348
1349 options = (union all_known_options *)&optionsbuffer[optionssize];
1350 options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
1351 options->as_windowscale.length = sizeof(struct option_windowscale);
1352 options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
1353 optionssize += sizeof(struct option_windowscale);
1354 options = (union all_known_options *)&optionsbuffer[optionssize];
1355 options->as_end = OPTION_NUMBER_END;
1356 optionssize += 1;
1357
1358 if (sendack)
1359 flags |= SET_ACK;
1360
1361 opts.size = optionssize;
1362
1363 sqbuf = i40iw_form_cm_frame(cm_node, &opts, NULL, NULL, flags);
1364 if (!sqbuf) {
1365 i40iw_pr_err("no sqbuf\n");
1366 return -1;
1367 }
1368 return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
1369 }
1370
1371 /**
1372 * i40iw_send_ack - Send ACK packet
1373 * @cm_node: connection's node
1374 */
1375 static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
1376 {
1377 struct i40iw_puda_buf *sqbuf;
1378 struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
1379
1380 sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
1381 if (sqbuf)
1382 i40iw_puda_send_buf(vsi->ilq, sqbuf);
1383 else
1384 i40iw_pr_err("no sqbuf\n");
1385 }
1386
1387 /**
1388 * i40iw_send_fin - Send FIN pkt
1389 * @cm_node: connection's node
1390 */
1391 static int i40iw_send_fin(struct i40iw_cm_node *cm_node)
1392 {
1393 struct i40iw_puda_buf *sqbuf;
1394
1395 sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN);
1396 if (!sqbuf) {
1397 i40iw_pr_err("no sqbuf\n");
1398 return -1;
1399 }
1400 return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
1401 }
1402
1403 /**
1404 * i40iw_find_node - find a cm node that matches the reference cm node
1405 * @cm_core: cm's core
1406 * @rem_port: remote tcp port num
1407 * @rem_addr: remote ip addr
1408 * @loc_port: local tcp port num
1409 * @loc_addr: loc ip addr
1410 * @add_refcnt: flag to increment refcount of cm_node
1411 */
1412 struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
1413 u16 rem_port,
1414 u32 *rem_addr,
1415 u16 loc_port,
1416 u32 *loc_addr,
1417 bool add_refcnt)
1418 {
1419 struct list_head *hte;
1420 struct i40iw_cm_node *cm_node;
1421 unsigned long flags;
1422
1423 hte = &cm_core->connected_nodes;
1424
1425 /* walk list and find cm_node associated with this session ID */
1426 spin_lock_irqsave(&cm_core->ht_lock, flags);
1427 list_for_each_entry(cm_node, hte, list) {
1428 if (!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
1429 (cm_node->loc_port == loc_port) &&
1430 !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr)) &&
1431 (cm_node->rem_port == rem_port)) {
1432 if (add_refcnt)
1433 atomic_inc(&cm_node->ref_count);
1434 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1435 return cm_node;
1436 }
1437 }
1438 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1439
1440 /* no owner node */
1441 return NULL;
1442 }
1443
1444 /**
1445 * i40iw_find_listener - find a cm node listening on this addr-port pair
1446 * @cm_core: cm's core
1447 * @dst_port: listener tcp port num
1448 * @dst_addr: listener ip addr
1449 * @listener_state: state to match with listen node's
1450 */
1451 static struct i40iw_cm_listener *i40iw_find_listener(
1452 struct i40iw_cm_core *cm_core,
1453 u32 *dst_addr,
1454 u16 dst_port,
1455 u16 vlan_id,
1456 enum i40iw_cm_listener_state
1457 listener_state)
1458 {
1459 struct i40iw_cm_listener *listen_node;
1460 static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1461 u32 listen_addr[4];
1462 u16 listen_port;
1463 unsigned long flags;
1464
1465 /* walk list and find cm_node associated with this session ID */
1466 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
1467 list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
1468 memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
1469 listen_port = listen_node->loc_port;
1470 /* compare node pair, return node handle if a match */
1471 if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
1472 !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
1473 (listen_port == dst_port) &&
1474 (listener_state & listen_node->listener_state)) {
1475 atomic_inc(&listen_node->ref_count);
1476 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1477 return listen_node;
1478 }
1479 }
1480 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1481 return NULL;
1482 }
1483
1484 /**
1485 * i40iw_add_hte_node - add a cm node to the hash table
1486 * @cm_core: cm's core
1487 * @cm_node: connection's node
1488 */
1489 static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
1490 struct i40iw_cm_node *cm_node)
1491 {
1492 struct list_head *hte;
1493 unsigned long flags;
1494
1495 if (!cm_node || !cm_core) {
1496 i40iw_pr_err("cm_node or cm_core == NULL\n");
1497 return;
1498 }
1499 spin_lock_irqsave(&cm_core->ht_lock, flags);
1500
1501 /* get a handle on the hash table element (list head for this slot) */
1502 hte = &cm_core->connected_nodes;
1503 list_add_tail(&cm_node->list, hte);
1504 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1505 }
1506
1507 /**
1508 * listen_port_in_use - determine if port is in use
1509 * @port: Listen port number
1510 */
1511 static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port)
1512 {
1513 struct i40iw_cm_listener *listen_node;
1514 unsigned long flags;
1515 bool ret = false;
1516
1517 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
1518 list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
1519 if (listen_node->loc_port == port) {
1520 ret = true;
1521 break;
1522 }
1523 }
1524 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1525 return ret;
1526 }
1527
1528 /**
1529 * i40iw_del_multiple_qhash - Remove qhash and child listens
1530 * @iwdev: iWarp device
1531 * @cm_info: CM info for parent listen node
1532 * @cm_parent_listen_node: The parent listen node
1533 */
1534 static enum i40iw_status_code i40iw_del_multiple_qhash(
1535 struct i40iw_device *iwdev,
1536 struct i40iw_cm_info *cm_info,
1537 struct i40iw_cm_listener *cm_parent_listen_node)
1538 {
1539 struct i40iw_cm_listener *child_listen_node;
1540 enum i40iw_status_code ret = I40IW_ERR_CONFIG;
1541 struct list_head *pos, *tpos;
1542 unsigned long flags;
1543
1544 spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
1545 list_for_each_safe(pos, tpos, &cm_parent_listen_node->child_listen_list) {
1546 child_listen_node = list_entry(pos, struct i40iw_cm_listener, child_listen_list);
1547 if (child_listen_node->ipv4)
1548 i40iw_debug(&iwdev->sc_dev,
1549 I40IW_DEBUG_CM,
1550 "removing child listen for IP=%pI4, port=%d, vlan=%d\n",
1551 child_listen_node->loc_addr,
1552 child_listen_node->loc_port,
1553 child_listen_node->vlan_id);
1554 else
1555 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
1556 "removing child listen for IP=%pI6, port=%d, vlan=%d\n",
1557 child_listen_node->loc_addr,
1558 child_listen_node->loc_port,
1559 child_listen_node->vlan_id);
1560 list_del(pos);
1561 memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
1562 sizeof(cm_info->loc_addr));
1563 cm_info->vlan_id = child_listen_node->vlan_id;
1564 if (child_listen_node->qhash_set) {
1565 ret = i40iw_manage_qhash(iwdev, cm_info,
1566 I40IW_QHASH_TYPE_TCP_SYN,
1567 I40IW_QHASH_MANAGE_TYPE_DELETE,
1568 NULL, false);
1569 child_listen_node->qhash_set = false;
1570 } else {
1571 ret = I40IW_SUCCESS;
1572 }
1573 i40iw_debug(&iwdev->sc_dev,
1574 I40IW_DEBUG_CM,
1575 "freed pointer = %p\n",
1576 child_listen_node);
1577 kfree(child_listen_node);
1578 cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
1579 }
1580 spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
1581
1582 return ret;
1583 }
1584
1585 /**
1586 * i40iw_netdev_vlan_ipv6 - Gets the netdev and mac
1587 * @addr: local IPv6 address
1588 * @vlan_id: vlan id for the given IPv6 address
1589 * @mac: mac address for the given IPv6 address
1590 *
1591 * Returns the net_device of the IPv6 address and also sets the
1592 * vlan id and mac for that address.
1593 */
1594 static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
1595 {
1596 struct net_device *ip_dev = NULL;
1597 struct in6_addr laddr6;
1598
1599 if (!IS_ENABLED(CONFIG_IPV6))
1600 return NULL;
1601 i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
1602 if (vlan_id)
1603 *vlan_id = I40IW_NO_VLAN;
1604 if (mac)
1605 eth_zero_addr(mac);
1606 rcu_read_lock();
1607 for_each_netdev_rcu(&init_net, ip_dev) {
1608 if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
1609 if (vlan_id)
1610 *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
1611 if (ip_dev->dev_addr && mac)
1612 ether_addr_copy(mac, ip_dev->dev_addr);
1613 break;
1614 }
1615 }
1616 rcu_read_unlock();
1617 return ip_dev;
1618 }
1619
1620 /**
1621 * i40iw_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
1622 * @addr: local IPv4 address
1623 */
1624 static u16 i40iw_get_vlan_ipv4(u32 *addr)
1625 {
1626 struct net_device *netdev;
1627 u16 vlan_id = I40IW_NO_VLAN;
1628
1629 netdev = ip_dev_find(&init_net, htonl(addr[0]));
1630 if (netdev) {
1631 vlan_id = rdma_vlan_dev_vlan_id(netdev);
1632 dev_put(netdev);
1633 }
1634 return vlan_id;
1635 }
1636
1637 /**
1638 * i40iw_add_mqh_6 - Adds multiple qhashes for IPv6
1639 * @iwdev: iWarp device
1640 * @cm_info: CM info for parent listen node
1641 * @cm_parent_listen_node: The parent listen node
1642 *
1643 * Adds a qhash and a child listen node for every IPv6 address
1644 * on the adapter and adds the associated qhash filter
1645 */
1646 static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
1647 struct i40iw_cm_info *cm_info,
1648 struct i40iw_cm_listener *cm_parent_listen_node)
1649 {
1650 struct net_device *ip_dev;
1651 struct inet6_dev *idev;
1652 struct inet6_ifaddr *ifp, *tmp;
1653 enum i40iw_status_code ret = 0;
1654 struct i40iw_cm_listener *child_listen_node;
1655 unsigned long flags;
1656
1657 rtnl_lock();
1658 for_each_netdev_rcu(&init_net, ip_dev) {
1659 if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&
1660 (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
1661 (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
1662 idev = __in6_dev_get(ip_dev);
1663 if (!idev) {
1664 i40iw_pr_err("idev == NULL\n");
1665 break;
1666 }
1667 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
1668 i40iw_debug(&iwdev->sc_dev,
1669 I40IW_DEBUG_CM,
1670 "IP=%pI6, vlan_id=%d, MAC=%pM\n",
1671 &ifp->addr,
1672 rdma_vlan_dev_vlan_id(ip_dev),
1673 ip_dev->dev_addr);
1674 child_listen_node =
1675 kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
1676 i40iw_debug(&iwdev->sc_dev,
1677 I40IW_DEBUG_CM,
1678 "Allocating child listener %p\n",
1679 child_listen_node);
1680 if (!child_listen_node) {
1681 ret = I40IW_ERR_NO_MEMORY;
1682 goto exit;
1683 }
1684 cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
1685 cm_parent_listen_node->vlan_id = cm_info->vlan_id;
1686
1687 memcpy(child_listen_node, cm_parent_listen_node,
1688 sizeof(*child_listen_node));
1689
1690 i40iw_copy_ip_ntohl(child_listen_node->loc_addr,
1691 ifp->addr.in6_u.u6_addr32);
1692 memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
1693 sizeof(cm_info->loc_addr));
1694
1695 ret = i40iw_manage_qhash(iwdev, cm_info,
1696 I40IW_QHASH_TYPE_TCP_SYN,
1697 I40IW_QHASH_MANAGE_TYPE_ADD,
1698 NULL, true);
1699 if (!ret) {
1700 child_listen_node->qhash_set = true;
1701 spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
1702 list_add(&child_listen_node->child_listen_list,
1703 &cm_parent_listen_node->child_listen_list);
1704 spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
1705 cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
1706 } else {
1707 kfree(child_listen_node);
1708 }
1709 }
1710 }
1711 }
1712 exit:
1713 rtnl_unlock();
1714 return ret;
1715 }
1716
1717 /**
1718 * i40iw_add_mqh_4 - Adds multiple qhashes for IPv4
1719 * @iwdev: iWarp device
1720 * @cm_info: CM info for parent listen node
1721 * @cm_parent_listen_node: The parent listen node
1722 *
1723 * Adds a qhash and a child listen node for every IPv4 address
1724 * on the adapter and adds the associated qhash filter
1725 */
1726 static enum i40iw_status_code i40iw_add_mqh_4(
1727 struct i40iw_device *iwdev,
1728 struct i40iw_cm_info *cm_info,
1729 struct i40iw_cm_listener *cm_parent_listen_node)
1730 {
1731 struct net_device *dev;
1732 struct in_device *idev;
1733 struct i40iw_cm_listener *child_listen_node;
1734 enum i40iw_status_code ret = 0;
1735 unsigned long flags;
1736
1737 rtnl_lock();
1738 for_each_netdev(&init_net, dev) {
1739 if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) &&
1740 (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
1741 (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
1742 idev = in_dev_get(dev);
1743 for_ifa(idev) {
1744 i40iw_debug(&iwdev->sc_dev,
1745 I40IW_DEBUG_CM,
1746 "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
1747 &ifa->ifa_address,
1748 rdma_vlan_dev_vlan_id(dev),
1749 dev->dev_addr);
1750 child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
1751 cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
1752 i40iw_debug(&iwdev->sc_dev,
1753 I40IW_DEBUG_CM,
1754 "Allocating child listener %p\n",
1755 child_listen_node);
1756 if (!child_listen_node) {
1757 in_dev_put(idev);
1758 ret = I40IW_ERR_NO_MEMORY;
1759 goto exit;
1760 }
1761 cm_info->vlan_id = rdma_vlan_dev_vlan_id(dev);
1762 cm_parent_listen_node->vlan_id = cm_info->vlan_id;
1763 memcpy(child_listen_node,
1764 cm_parent_listen_node,
1765 sizeof(*child_listen_node));
1766
1767 child_listen_node->loc_addr[0] = ntohl(ifa->ifa_address);
1768 memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
1769 sizeof(cm_info->loc_addr));
1770
1771 ret = i40iw_manage_qhash(iwdev,
1772 cm_info,
1773 I40IW_QHASH_TYPE_TCP_SYN,
1774 I40IW_QHASH_MANAGE_TYPE_ADD,
1775 NULL,
1776 true);
1777 if (!ret) {
1778 child_listen_node->qhash_set = true;
1779 spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
1780 list_add(&child_listen_node->child_listen_list,
1781 &cm_parent_listen_node->child_listen_list);
1782 spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
1783 } else {
1784 kfree(child_listen_node);
1785 cm_parent_listen_node->cm_core->stats_listen_nodes_created--;
1786 }
1787 }
1788 endfor_ifa(idev);
1789 in_dev_put(idev);
1790 }
1791 }
1792 exit:
1793 rtnl_unlock();
1794 return ret;
1795 }
1796
1797 /**
1798 * i40iw_dec_refcnt_listen - delete listener and associated cm nodes
1799 * @cm_core: cm's core
1800 * @free_hanging_nodes: to free associated cm_nodes
1801 * @apbvt_del: flag to delete the apbvt
1802 */
1803 static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
1804 struct i40iw_cm_listener *listener,
1805 int free_hanging_nodes, bool apbvt_del)
1806 {
1807 int ret = -EINVAL;
1808 int err = 0;
1809 struct list_head *list_pos;
1810 struct list_head *list_temp;
1811 struct i40iw_cm_node *cm_node;
1812 struct list_head reset_list;
1813 struct i40iw_cm_info nfo;
1814 struct i40iw_cm_node *loopback;
1815 enum i40iw_cm_node_state old_state;
1816 unsigned long flags;
1817
1818 /* free non-accelerated child nodes for this listener */
1819 INIT_LIST_HEAD(&reset_list);
1820 if (free_hanging_nodes) {
1821 spin_lock_irqsave(&cm_core->ht_lock, flags);
1822 list_for_each_safe(list_pos, list_temp, &cm_core->connected_nodes) {
1823 cm_node = container_of(list_pos, struct i40iw_cm_node, list);
1824 if ((cm_node->listener == listener) && !cm_node->accelerated) {
1825 atomic_inc(&cm_node->ref_count);
1826 list_add(&cm_node->reset_entry, &reset_list);
1827 }
1828 }
1829 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1830 }
1831
1832 list_for_each_safe(list_pos, list_temp, &reset_list) {
1833 cm_node = container_of(list_pos, struct i40iw_cm_node, reset_entry);
1834 loopback = cm_node->loopbackpartner;
1835 if (cm_node->state >= I40IW_CM_STATE_FIN_WAIT1) {
1836 i40iw_rem_ref_cm_node(cm_node);
1837 } else {
1838 if (!loopback) {
1839 i40iw_cleanup_retrans_entry(cm_node);
1840 err = i40iw_send_reset(cm_node);
1841 if (err) {
1842 cm_node->state = I40IW_CM_STATE_CLOSED;
1843 i40iw_pr_err("send reset\n");
1844 } else {
1845 old_state = cm_node->state;
1846 cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
1847 if (old_state != I40IW_CM_STATE_MPAREQ_RCVD)
1848 i40iw_rem_ref_cm_node(cm_node);
1849 }
1850 } else {
1851 struct i40iw_cm_event event;
1852
1853 event.cm_node = loopback;
1854 memcpy(event.cm_info.rem_addr,
1855 loopback->rem_addr, sizeof(event.cm_info.rem_addr));
1856 memcpy(event.cm_info.loc_addr,
1857 loopback->loc_addr, sizeof(event.cm_info.loc_addr));
1858 event.cm_info.rem_port = loopback->rem_port;
1859 event.cm_info.loc_port = loopback->loc_port;
1860 event.cm_info.cm_id = loopback->cm_id;
1861 event.cm_info.ipv4 = loopback->ipv4;
1862 atomic_inc(&loopback->ref_count);
1863 loopback->state = I40IW_CM_STATE_CLOSED;
1864 i40iw_event_connect_error(&event);
1865 cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
1866 i40iw_rem_ref_cm_node(cm_node);
1867 }
1868 }
1869 }
1870
1871 if (!atomic_dec_return(&listener->ref_count)) {
1872 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
1873 list_del(&listener->list);
1874 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1875
1876 if (listener->iwdev) {
1877 if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port))
1878 i40iw_manage_apbvt(listener->iwdev,
1879 listener->loc_port,
1880 I40IW_MANAGE_APBVT_DEL);
1881
1882 memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
1883 nfo.loc_port = listener->loc_port;
1884 nfo.ipv4 = listener->ipv4;
1885 nfo.vlan_id = listener->vlan_id;
1886 nfo.user_pri = listener->user_pri;
1887
1888 if (!list_empty(&listener->child_listen_list)) {
1889 i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
1890 } else {
1891 if (listener->qhash_set)
1892 i40iw_manage_qhash(listener->iwdev,
1893 &nfo,
1894 I40IW_QHASH_TYPE_TCP_SYN,
1895 I40IW_QHASH_MANAGE_TYPE_DELETE,
1896 NULL,
1897 false);
1898 }
1899 }
1900
1901 cm_core->stats_listen_destroyed++;
1902 kfree(listener);
1903 cm_core->stats_listen_nodes_destroyed++;
1904 listener = NULL;
1905 ret = 0;
1906 }
1907
1908 if (listener) {
1909 if (atomic_read(&listener->pend_accepts_cnt) > 0)
1910 i40iw_debug(cm_core->dev,
1911 I40IW_DEBUG_CM,
1912 "%s: listener (%p) pending accepts=%u\n",
1913 __func__,
1914 listener,
1915 atomic_read(&listener->pend_accepts_cnt));
1916 }
1917
1918 return ret;
1919 }
1920
1921 /**
1922 * i40iw_cm_del_listen - delete a linstener
1923 * @cm_core: cm's core
1924 * @listener: passive connection's listener
1925 * @apbvt_del: flag to delete apbvt
1926 */
1927 static int i40iw_cm_del_listen(struct i40iw_cm_core *cm_core,
1928 struct i40iw_cm_listener *listener,
1929 bool apbvt_del)
1930 {
1931 listener->listener_state = I40IW_CM_LISTENER_PASSIVE_STATE;
1932 listener->cm_id = NULL; /* going to be destroyed pretty soon */
1933 return i40iw_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
1934 }
1935
1936 /**
1937 * i40iw_addr_resolve_neigh - resolve neighbor address
1938 * @iwdev: iwarp device structure
1939 * @src_ip: local ip address
1940 * @dst_ip: remote ip address
1941 * @arpindex: if there is an arp entry
1942 */
1943 static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
1944 u32 src_ip,
1945 u32 dst_ip,
1946 int arpindex)
1947 {
1948 struct rtable *rt;
1949 struct neighbour *neigh;
1950 int rc = arpindex;
1951 struct net_device *netdev = iwdev->netdev;
1952 __be32 dst_ipaddr = htonl(dst_ip);
1953 __be32 src_ipaddr = htonl(src_ip);
1954
1955 rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
1956 if (IS_ERR(rt)) {
1957 i40iw_pr_err("ip_route_output\n");
1958 return rc;
1959 }
1960
1961 if (netif_is_bond_slave(netdev))
1962 netdev = netdev_master_upper_dev_get(netdev);
1963
1964 neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
1965
1966 rcu_read_lock();
1967 if (neigh) {
1968 if (neigh->nud_state & NUD_VALID) {
1969 if (arpindex >= 0) {
1970 if (ether_addr_equal(iwdev->arp_table[arpindex].mac_addr,
1971 neigh->ha))
1972 /* Mac address same as arp table */
1973 goto resolve_neigh_exit;
1974 i40iw_manage_arp_cache(iwdev,
1975 iwdev->arp_table[arpindex].mac_addr,
1976 &dst_ip,
1977 true,
1978 I40IW_ARP_DELETE);
1979 }
1980
1981 i40iw_manage_arp_cache(iwdev, neigh->ha, &dst_ip, true, I40IW_ARP_ADD);
1982 rc = i40iw_arp_table(iwdev, &dst_ip, true, NULL, I40IW_ARP_RESOLVE);
1983 } else {
1984 neigh_event_send(neigh, NULL);
1985 }
1986 }
1987 resolve_neigh_exit:
1988
1989 rcu_read_unlock();
1990 if (neigh)
1991 neigh_release(neigh);
1992
1993 ip_rt_put(rt);
1994 return rc;
1995 }
1996
1997 /**
1998 * i40iw_get_dst_ipv6
1999 */
2000 static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
2001 struct sockaddr_in6 *dst_addr)
2002 {
2003 struct dst_entry *dst;
2004 struct flowi6 fl6;
2005
2006 memset(&fl6, 0, sizeof(fl6));
2007 fl6.daddr = dst_addr->sin6_addr;
2008 fl6.saddr = src_addr->sin6_addr;
2009 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
2010 fl6.flowi6_oif = dst_addr->sin6_scope_id;
2011
2012 dst = ip6_route_output(&init_net, NULL, &fl6);
2013 return dst;
2014 }
2015
2016 /**
2017 * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
2018 * @iwdev: iwarp device structure
2019 * @dst_ip: remote ip address
2020 * @arpindex: if there is an arp entry
2021 */
2022 static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
2023 u32 *src,
2024 u32 *dest,
2025 int arpindex)
2026 {
2027 struct neighbour *neigh;
2028 int rc = arpindex;
2029 struct net_device *netdev = iwdev->netdev;
2030 struct dst_entry *dst;
2031 struct sockaddr_in6 dst_addr;
2032 struct sockaddr_in6 src_addr;
2033
2034 memset(&dst_addr, 0, sizeof(dst_addr));
2035 dst_addr.sin6_family = AF_INET6;
2036 i40iw_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);
2037 memset(&src_addr, 0, sizeof(src_addr));
2038 src_addr.sin6_family = AF_INET6;
2039 i40iw_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);
2040 dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
2041 if (!dst || dst->error) {
2042 if (dst) {
2043 dst_release(dst);
2044 i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
2045 dst->error);
2046 }
2047 return rc;
2048 }
2049
2050 if (netif_is_bond_slave(netdev))
2051 netdev = netdev_master_upper_dev_get(netdev);
2052
2053 neigh = dst_neigh_lookup(dst, &dst_addr);
2054
2055 rcu_read_lock();
2056 if (neigh) {
2057 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "dst_neigh_lookup MAC=%pM\n", neigh->ha);
2058 if (neigh->nud_state & NUD_VALID) {
2059 if (arpindex >= 0) {
2060 if (ether_addr_equal
2061 (iwdev->arp_table[arpindex].mac_addr,
2062 neigh->ha)) {
2063 /* Mac address same as in arp table */
2064 goto resolve_neigh_exit6;
2065 }
2066 i40iw_manage_arp_cache(iwdev,
2067 iwdev->arp_table[arpindex].mac_addr,
2068 dest,
2069 false,
2070 I40IW_ARP_DELETE);
2071 }
2072 i40iw_manage_arp_cache(iwdev,
2073 neigh->ha,
2074 dest,
2075 false,
2076 I40IW_ARP_ADD);
2077 rc = i40iw_arp_table(iwdev,
2078 dest,
2079 false,
2080 NULL,
2081 I40IW_ARP_RESOLVE);
2082 } else {
2083 neigh_event_send(neigh, NULL);
2084 }
2085 }
2086
2087 resolve_neigh_exit6:
2088 rcu_read_unlock();
2089 if (neigh)
2090 neigh_release(neigh);
2091 dst_release(dst);
2092 return rc;
2093 }
2094
2095 /**
2096 * i40iw_ipv4_is_loopback - check if loopback
2097 * @loc_addr: local addr to compare
2098 * @rem_addr: remote address
2099 */
2100 static bool i40iw_ipv4_is_loopback(u32 loc_addr, u32 rem_addr)
2101 {
2102 return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
2103 }
2104
2105 /**
2106 * i40iw_ipv6_is_loopback - check if loopback
2107 * @loc_addr: local addr to compare
2108 * @rem_addr: remote address
2109 */
2110 static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr)
2111 {
2112 struct in6_addr raddr6;
2113
2114 i40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
2115 return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6);
2116 }
2117
2118 /**
2119 * i40iw_make_cm_node - create a new instance of a cm node
2120 * @cm_core: cm's core
2121 * @iwdev: iwarp device structure
2122 * @cm_info: quad info for connection
2123 * @listener: passive connection's listener
2124 */
2125 static struct i40iw_cm_node *i40iw_make_cm_node(
2126 struct i40iw_cm_core *cm_core,
2127 struct i40iw_device *iwdev,
2128 struct i40iw_cm_info *cm_info,
2129 struct i40iw_cm_listener *listener)
2130 {
2131 struct i40iw_cm_node *cm_node;
2132 struct timespec ts;
2133 int oldarpindex;
2134 int arpindex;
2135 struct net_device *netdev = iwdev->netdev;
2136
2137 /* create an hte and cm_node for this instance */
2138 cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
2139 if (!cm_node)
2140 return NULL;
2141
2142 /* set our node specific transport info */
2143 cm_node->ipv4 = cm_info->ipv4;
2144 cm_node->vlan_id = cm_info->vlan_id;
2145 if ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb)
2146 cm_node->vlan_id = 0;
2147 cm_node->tos = cm_info->tos;
2148 cm_node->user_pri = cm_info->user_pri;
2149 if (listener) {
2150 if (listener->tos != cm_info->tos)
2151 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB,
2152 "application TOS[%d] and remote client TOS[%d] mismatch\n",
2153 listener->tos, cm_info->tos);
2154 cm_node->tos = max(listener->tos, cm_info->tos);
2155 cm_node->user_pri = rt_tos2priority(cm_node->tos);
2156 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "listener: TOS:[%d] UP:[%d]\n",
2157 cm_node->tos, cm_node->user_pri);
2158 }
2159 memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
2160 memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
2161 cm_node->loc_port = cm_info->loc_port;
2162 cm_node->rem_port = cm_info->rem_port;
2163
2164 cm_node->mpa_frame_rev = iwdev->mpa_version;
2165 cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
2166 cm_node->ird_size = I40IW_MAX_IRD_SIZE;
2167 cm_node->ord_size = I40IW_MAX_ORD_SIZE;
2168
2169 cm_node->listener = listener;
2170 cm_node->cm_id = cm_info->cm_id;
2171 ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
2172 spin_lock_init(&cm_node->retrans_list_lock);
2173
2174 atomic_set(&cm_node->ref_count, 1);
2175 /* associate our parent CM core */
2176 cm_node->cm_core = cm_core;
2177 cm_node->tcp_cntxt.loc_id = I40IW_CM_DEF_LOCAL_ID;
2178 cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
2179 cm_node->tcp_cntxt.rcv_wnd =
2180 I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
2181 ts = current_kernel_time();
2182 cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
2183 cm_node->tcp_cntxt.mss = iwdev->vsi.mss;
2184
2185 cm_node->iwdev = iwdev;
2186 cm_node->dev = &iwdev->sc_dev;
2187
2188 if ((cm_node->ipv4 &&
2189 i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
2190 (!cm_node->ipv4 && i40iw_ipv6_is_loopback(cm_node->loc_addr,
2191 cm_node->rem_addr))) {
2192 arpindex = i40iw_arp_table(iwdev,
2193 cm_node->rem_addr,
2194 false,
2195 NULL,
2196 I40IW_ARP_RESOLVE);
2197 } else {
2198 oldarpindex = i40iw_arp_table(iwdev,
2199 cm_node->rem_addr,
2200 false,
2201 NULL,
2202 I40IW_ARP_RESOLVE);
2203 if (cm_node->ipv4)
2204 arpindex = i40iw_addr_resolve_neigh(iwdev,
2205 cm_info->loc_addr[0],
2206 cm_info->rem_addr[0],
2207 oldarpindex);
2208 else if (IS_ENABLED(CONFIG_IPV6))
2209 arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
2210 cm_info->loc_addr,
2211 cm_info->rem_addr,
2212 oldarpindex);
2213 else
2214 arpindex = -EINVAL;
2215 }
2216 if (arpindex < 0) {
2217 i40iw_pr_err("cm_node arpindex\n");
2218 kfree(cm_node);
2219 return NULL;
2220 }
2221 ether_addr_copy(cm_node->rem_mac, iwdev->arp_table[arpindex].mac_addr);
2222 i40iw_add_hte_node(cm_core, cm_node);
2223 cm_core->stats_nodes_created++;
2224 return cm_node;
2225 }
2226
2227 /**
2228 * i40iw_rem_ref_cm_node - destroy an instance of a cm node
2229 * @cm_node: connection's node
2230 */
2231 static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
2232 {
2233 struct i40iw_cm_core *cm_core = cm_node->cm_core;
2234 struct i40iw_qp *iwqp;
2235 struct i40iw_cm_info nfo;
2236 unsigned long flags;
2237
2238 spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
2239 if (atomic_dec_return(&cm_node->ref_count)) {
2240 spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
2241 return;
2242 }
2243 list_del(&cm_node->list);
2244 spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
2245
2246 /* if the node is destroyed before connection was accelerated */
2247 if (!cm_node->accelerated && cm_node->accept_pend) {
2248 pr_err("node destroyed before established\n");
2249 atomic_dec(&cm_node->listener->pend_accepts_cnt);
2250 }
2251 if (cm_node->close_entry)
2252 i40iw_handle_close_entry(cm_node, 0);
2253 if (cm_node->listener) {
2254 i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
2255 } else {
2256 if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) &&
2257 cm_node->apbvt_set) {
2258 i40iw_manage_apbvt(cm_node->iwdev,
2259 cm_node->loc_port,
2260 I40IW_MANAGE_APBVT_DEL);
2261 i40iw_get_addr_info(cm_node, &nfo);
2262 if (cm_node->qhash_set) {
2263 i40iw_manage_qhash(cm_node->iwdev,
2264 &nfo,
2265 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
2266 I40IW_QHASH_MANAGE_TYPE_DELETE,
2267 NULL,
2268 false);
2269 cm_node->qhash_set = 0;
2270 }
2271 }
2272 }
2273
2274 iwqp = cm_node->iwqp;
2275 if (iwqp) {
2276 iwqp->cm_node = NULL;
2277 i40iw_rem_ref(&iwqp->ibqp);
2278 cm_node->iwqp = NULL;
2279 } else if (cm_node->qhash_set) {
2280 i40iw_get_addr_info(cm_node, &nfo);
2281 i40iw_manage_qhash(cm_node->iwdev,
2282 &nfo,
2283 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
2284 I40IW_QHASH_MANAGE_TYPE_DELETE,
2285 NULL,
2286 false);
2287 cm_node->qhash_set = 0;
2288 }
2289
2290 cm_node->cm_core->stats_nodes_destroyed++;
2291 kfree(cm_node);
2292 }
2293
2294 /**
2295 * i40iw_handle_fin_pkt - FIN packet received
2296 * @cm_node: connection's node
2297 */
2298 static void i40iw_handle_fin_pkt(struct i40iw_cm_node *cm_node)
2299 {
2300 u32 ret;
2301
2302 switch (cm_node->state) {
2303 case I40IW_CM_STATE_SYN_RCVD:
2304 case I40IW_CM_STATE_SYN_SENT:
2305 case I40IW_CM_STATE_ESTABLISHED:
2306 case I40IW_CM_STATE_MPAREJ_RCVD:
2307 cm_node->tcp_cntxt.rcv_nxt++;
2308 i40iw_cleanup_retrans_entry(cm_node);
2309 cm_node->state = I40IW_CM_STATE_LAST_ACK;
2310 i40iw_send_fin(cm_node);
2311 break;
2312 case I40IW_CM_STATE_MPAREQ_SENT:
2313 i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
2314 cm_node->tcp_cntxt.rcv_nxt++;
2315 i40iw_cleanup_retrans_entry(cm_node);
2316 cm_node->state = I40IW_CM_STATE_CLOSED;
2317 atomic_inc(&cm_node->ref_count);
2318 i40iw_send_reset(cm_node);
2319 break;
2320 case I40IW_CM_STATE_FIN_WAIT1:
2321 cm_node->tcp_cntxt.rcv_nxt++;
2322 i40iw_cleanup_retrans_entry(cm_node);
2323 cm_node->state = I40IW_CM_STATE_CLOSING;
2324 i40iw_send_ack(cm_node);
2325 /*
2326 * Wait for ACK as this is simultaneous close.
2327 * After we receive ACK, do not send anything.
2328 * Just rm the node.
2329 */
2330 break;
2331 case I40IW_CM_STATE_FIN_WAIT2:
2332 cm_node->tcp_cntxt.rcv_nxt++;
2333 i40iw_cleanup_retrans_entry(cm_node);
2334 cm_node->state = I40IW_CM_STATE_TIME_WAIT;
2335 i40iw_send_ack(cm_node);
2336 ret =
2337 i40iw_schedule_cm_timer(cm_node, NULL, I40IW_TIMER_TYPE_CLOSE, 1, 0);
2338 if (ret)
2339 i40iw_pr_err("node %p state = %d\n", cm_node, cm_node->state);
2340 break;
2341 case I40IW_CM_STATE_TIME_WAIT:
2342 cm_node->tcp_cntxt.rcv_nxt++;
2343 i40iw_cleanup_retrans_entry(cm_node);
2344 cm_node->state = I40IW_CM_STATE_CLOSED;
2345 i40iw_rem_ref_cm_node(cm_node);
2346 break;
2347 case I40IW_CM_STATE_OFFLOADED:
2348 default:
2349 i40iw_pr_err("bad state node %p state = %d\n", cm_node, cm_node->state);
2350 break;
2351 }
2352 }
2353
2354 /**
2355 * i40iw_handle_rst_pkt - process received RST packet
2356 * @cm_node: connection's node
2357 * @rbuf: receive buffer
2358 */
2359 static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
2360 struct i40iw_puda_buf *rbuf)
2361 {
2362 i40iw_cleanup_retrans_entry(cm_node);
2363 switch (cm_node->state) {
2364 case I40IW_CM_STATE_SYN_SENT:
2365 case I40IW_CM_STATE_MPAREQ_SENT:
2366 switch (cm_node->mpa_frame_rev) {
2367 case IETF_MPA_V2:
2368 cm_node->mpa_frame_rev = IETF_MPA_V1;
2369 /* send a syn and goto syn sent state */
2370 cm_node->state = I40IW_CM_STATE_SYN_SENT;
2371 if (i40iw_send_syn(cm_node, 0))
2372 i40iw_active_open_err(cm_node, false);
2373 break;
2374 case IETF_MPA_V1:
2375 default:
2376 i40iw_active_open_err(cm_node, false);
2377 break;
2378 }
2379 break;
2380 case I40IW_CM_STATE_MPAREQ_RCVD:
2381 atomic_add_return(1, &cm_node->passive_state);
2382 break;
2383 case I40IW_CM_STATE_ESTABLISHED:
2384 case I40IW_CM_STATE_SYN_RCVD:
2385 case I40IW_CM_STATE_LISTENING:
2386 i40iw_pr_err("Bad state state = %d\n", cm_node->state);
2387 i40iw_passive_open_err(cm_node, false);
2388 break;
2389 case I40IW_CM_STATE_OFFLOADED:
2390 i40iw_active_open_err(cm_node, false);
2391 break;
2392 case I40IW_CM_STATE_CLOSED:
2393 break;
2394 case I40IW_CM_STATE_FIN_WAIT2:
2395 case I40IW_CM_STATE_FIN_WAIT1:
2396 case I40IW_CM_STATE_LAST_ACK:
2397 cm_node->cm_id->rem_ref(cm_node->cm_id);
2398 case I40IW_CM_STATE_TIME_WAIT:
2399 cm_node->state = I40IW_CM_STATE_CLOSED;
2400 i40iw_rem_ref_cm_node(cm_node);
2401 break;
2402 default:
2403 break;
2404 }
2405 }
2406
2407 /**
2408 * i40iw_handle_rcv_mpa - Process a recv'd mpa buffer
2409 * @cm_node: connection's node
2410 * @rbuf: receive buffer
2411 */
2412 static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
2413 struct i40iw_puda_buf *rbuf)
2414 {
2415 int ret;
2416 int datasize = rbuf->datalen;
2417 u8 *dataloc = rbuf->data;
2418
2419 enum i40iw_cm_event_type type = I40IW_CM_EVENT_UNKNOWN;
2420 u32 res_type;
2421
2422 ret = i40iw_parse_mpa(cm_node, dataloc, &res_type, datasize);
2423 if (ret) {
2424 if (cm_node->state == I40IW_CM_STATE_MPAREQ_SENT)
2425 i40iw_active_open_err(cm_node, true);
2426 else
2427 i40iw_passive_open_err(cm_node, true);
2428 return;
2429 }
2430
2431 switch (cm_node->state) {
2432 case I40IW_CM_STATE_ESTABLISHED:
2433 if (res_type == I40IW_MPA_REQUEST_REJECT)
2434 i40iw_pr_err("state for reject\n");
2435 cm_node->state = I40IW_CM_STATE_MPAREQ_RCVD;
2436 type = I40IW_CM_EVENT_MPA_REQ;
2437 i40iw_send_ack(cm_node); /* ACK received MPA request */
2438 atomic_set(&cm_node->passive_state,
2439 I40IW_PASSIVE_STATE_INDICATED);
2440 break;
2441 case I40IW_CM_STATE_MPAREQ_SENT:
2442 i40iw_cleanup_retrans_entry(cm_node);
2443 if (res_type == I40IW_MPA_REQUEST_REJECT) {
2444 type = I40IW_CM_EVENT_MPA_REJECT;
2445 cm_node->state = I40IW_CM_STATE_MPAREJ_RCVD;
2446 } else {
2447 type = I40IW_CM_EVENT_CONNECTED;
2448 cm_node->state = I40IW_CM_STATE_OFFLOADED;
2449 i40iw_send_ack(cm_node);
2450 }
2451 break;
2452 default:
2453 pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
2454 break;
2455 }
2456 i40iw_create_event(cm_node, type);
2457 }
2458
2459 /**
2460 * i40iw_indicate_pkt_err - Send up err event to cm
2461 * @cm_node: connection's node
2462 */
2463 static void i40iw_indicate_pkt_err(struct i40iw_cm_node *cm_node)
2464 {
2465 switch (cm_node->state) {
2466 case I40IW_CM_STATE_SYN_SENT:
2467 case I40IW_CM_STATE_MPAREQ_SENT:
2468 i40iw_active_open_err(cm_node, true);
2469 break;
2470 case I40IW_CM_STATE_ESTABLISHED:
2471 case I40IW_CM_STATE_SYN_RCVD:
2472 i40iw_passive_open_err(cm_node, true);
2473 break;
2474 case I40IW_CM_STATE_OFFLOADED:
2475 default:
2476 break;
2477 }
2478 }
2479
2480 /**
2481 * i40iw_check_syn - Check for error on received syn ack
2482 * @cm_node: connection's node
2483 * @tcph: pointer tcp header
2484 */
2485 static int i40iw_check_syn(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
2486 {
2487 int err = 0;
2488
2489 if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {
2490 err = 1;
2491 i40iw_active_open_err(cm_node, true);
2492 }
2493 return err;
2494 }
2495
2496 /**
2497 * i40iw_check_seq - check seq numbers if OK
2498 * @cm_node: connection's node
2499 * @tcph: pointer tcp header
2500 */
2501 static int i40iw_check_seq(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
2502 {
2503 int err = 0;
2504 u32 seq;
2505 u32 ack_seq;
2506 u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
2507 u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
2508 u32 rcv_wnd;
2509
2510 seq = ntohl(tcph->seq);
2511 ack_seq = ntohl(tcph->ack_seq);
2512 rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
2513 if (ack_seq != loc_seq_num)
2514 err = -1;
2515 else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
2516 err = -1;
2517 if (err) {
2518 i40iw_pr_err("seq number\n");
2519 i40iw_indicate_pkt_err(cm_node);
2520 }
2521 return err;
2522 }
2523
2524 /**
2525 * i40iw_handle_syn_pkt - is for Passive node
2526 * @cm_node: connection's node
2527 * @rbuf: receive buffer
2528 */
2529 static void i40iw_handle_syn_pkt(struct i40iw_cm_node *cm_node,
2530 struct i40iw_puda_buf *rbuf)
2531 {
2532 struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
2533 int ret;
2534 u32 inc_sequence;
2535 int optionsize;
2536 struct i40iw_cm_info nfo;
2537
2538 optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
2539 inc_sequence = ntohl(tcph->seq);
2540
2541 switch (cm_node->state) {
2542 case I40IW_CM_STATE_SYN_SENT:
2543 case I40IW_CM_STATE_MPAREQ_SENT:
2544 /* Rcvd syn on active open connection */
2545 i40iw_active_open_err(cm_node, 1);
2546 break;
2547 case I40IW_CM_STATE_LISTENING:
2548 /* Passive OPEN */
2549 if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
2550 cm_node->listener->backlog) {
2551 cm_node->cm_core->stats_backlog_drops++;
2552 i40iw_passive_open_err(cm_node, false);
2553 break;
2554 }
2555 ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
2556 if (ret) {
2557 i40iw_passive_open_err(cm_node, false);
2558 /* drop pkt */
2559 break;
2560 }
2561 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
2562 cm_node->accept_pend = 1;
2563 atomic_inc(&cm_node->listener->pend_accepts_cnt);
2564
2565 cm_node->state = I40IW_CM_STATE_SYN_RCVD;
2566 i40iw_get_addr_info(cm_node, &nfo);
2567 ret = i40iw_manage_qhash(cm_node->iwdev,
2568 &nfo,
2569 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
2570 I40IW_QHASH_MANAGE_TYPE_ADD,
2571 (void *)cm_node,
2572 false);
2573 cm_node->qhash_set = true;
2574 break;
2575 case I40IW_CM_STATE_CLOSED:
2576 i40iw_cleanup_retrans_entry(cm_node);
2577 atomic_inc(&cm_node->ref_count);
2578 i40iw_send_reset(cm_node);
2579 break;
2580 case I40IW_CM_STATE_OFFLOADED:
2581 case I40IW_CM_STATE_ESTABLISHED:
2582 case I40IW_CM_STATE_FIN_WAIT1:
2583 case I40IW_CM_STATE_FIN_WAIT2:
2584 case I40IW_CM_STATE_MPAREQ_RCVD:
2585 case I40IW_CM_STATE_LAST_ACK:
2586 case I40IW_CM_STATE_CLOSING:
2587 case I40IW_CM_STATE_UNKNOWN:
2588 default:
2589 break;
2590 }
2591 }
2592
2593 /**
2594 * i40iw_handle_synack_pkt - Process SYN+ACK packet (active side)
2595 * @cm_node: connection's node
2596 * @rbuf: receive buffer
2597 */
2598 static void i40iw_handle_synack_pkt(struct i40iw_cm_node *cm_node,
2599 struct i40iw_puda_buf *rbuf)
2600 {
2601 struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
2602 int ret;
2603 u32 inc_sequence;
2604 int optionsize;
2605
2606 optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
2607 inc_sequence = ntohl(tcph->seq);
2608 switch (cm_node->state) {
2609 case I40IW_CM_STATE_SYN_SENT:
2610 i40iw_cleanup_retrans_entry(cm_node);
2611 /* active open */
2612 if (i40iw_check_syn(cm_node, tcph)) {
2613 i40iw_pr_err("check syn fail\n");
2614 return;
2615 }
2616 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
2617 /* setup options */
2618 ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 0);
2619 if (ret) {
2620 i40iw_debug(cm_node->dev,
2621 I40IW_DEBUG_CM,
2622 "cm_node=%p tcp_options failed\n",
2623 cm_node);
2624 break;
2625 }
2626 i40iw_cleanup_retrans_entry(cm_node);
2627 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
2628 i40iw_send_ack(cm_node); /* ACK for the syn_ack */
2629 ret = i40iw_send_mpa_request(cm_node);
2630 if (ret) {
2631 i40iw_debug(cm_node->dev,
2632 I40IW_DEBUG_CM,
2633 "cm_node=%p i40iw_send_mpa_request failed\n",
2634 cm_node);
2635 break;
2636 }
2637 cm_node->state = I40IW_CM_STATE_MPAREQ_SENT;
2638 break;
2639 case I40IW_CM_STATE_MPAREQ_RCVD:
2640 i40iw_passive_open_err(cm_node, true);
2641 break;
2642 case I40IW_CM_STATE_LISTENING:
2643 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
2644 i40iw_cleanup_retrans_entry(cm_node);
2645 cm_node->state = I40IW_CM_STATE_CLOSED;
2646 i40iw_send_reset(cm_node);
2647 break;
2648 case I40IW_CM_STATE_CLOSED:
2649 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
2650 i40iw_cleanup_retrans_entry(cm_node);
2651 atomic_inc(&cm_node->ref_count);
2652 i40iw_send_reset(cm_node);
2653 break;
2654 case I40IW_CM_STATE_ESTABLISHED:
2655 case I40IW_CM_STATE_FIN_WAIT1:
2656 case I40IW_CM_STATE_FIN_WAIT2:
2657 case I40IW_CM_STATE_LAST_ACK:
2658 case I40IW_CM_STATE_OFFLOADED:
2659 case I40IW_CM_STATE_CLOSING:
2660 case I40IW_CM_STATE_UNKNOWN:
2661 case I40IW_CM_STATE_MPAREQ_SENT:
2662 default:
2663 break;
2664 }
2665 }
2666
2667 /**
2668 * i40iw_handle_ack_pkt - process packet with ACK
2669 * @cm_node: connection's node
2670 * @rbuf: receive buffer
2671 */
2672 static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,
2673 struct i40iw_puda_buf *rbuf)
2674 {
2675 struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
2676 u32 inc_sequence;
2677 int ret = 0;
2678 int optionsize;
2679 u32 datasize = rbuf->datalen;
2680
2681 optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
2682
2683 if (i40iw_check_seq(cm_node, tcph))
2684 return -EINVAL;
2685
2686 inc_sequence = ntohl(tcph->seq);
2687 switch (cm_node->state) {
2688 case I40IW_CM_STATE_SYN_RCVD:
2689 i40iw_cleanup_retrans_entry(cm_node);
2690 ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
2691 if (ret)
2692 break;
2693 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
2694 cm_node->state = I40IW_CM_STATE_ESTABLISHED;
2695 if (datasize) {
2696 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
2697 i40iw_handle_rcv_mpa(cm_node, rbuf);
2698 }
2699 break;
2700 case I40IW_CM_STATE_ESTABLISHED:
2701 i40iw_cleanup_retrans_entry(cm_node);
2702 if (datasize) {
2703 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
2704 i40iw_handle_rcv_mpa(cm_node, rbuf);
2705 }
2706 break;
2707 case I40IW_CM_STATE_MPAREQ_SENT:
2708 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
2709 if (datasize) {
2710 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
2711 i40iw_handle_rcv_mpa(cm_node, rbuf);
2712 }
2713 break;
2714 case I40IW_CM_STATE_LISTENING:
2715 i40iw_cleanup_retrans_entry(cm_node);
2716 cm_node->state = I40IW_CM_STATE_CLOSED;
2717 i40iw_send_reset(cm_node);
2718 break;
2719 case I40IW_CM_STATE_CLOSED:
2720 i40iw_cleanup_retrans_entry(cm_node);
2721 atomic_inc(&cm_node->ref_count);
2722 i40iw_send_reset(cm_node);
2723 break;
2724 case I40IW_CM_STATE_LAST_ACK:
2725 case I40IW_CM_STATE_CLOSING:
2726 i40iw_cleanup_retrans_entry(cm_node);
2727 cm_node->state = I40IW_CM_STATE_CLOSED;
2728 if (!cm_node->accept_pend)
2729 cm_node->cm_id->rem_ref(cm_node->cm_id);
2730 i40iw_rem_ref_cm_node(cm_node);
2731 break;
2732 case I40IW_CM_STATE_FIN_WAIT1:
2733 i40iw_cleanup_retrans_entry(cm_node);
2734 cm_node->state = I40IW_CM_STATE_FIN_WAIT2;
2735 break;
2736 case I40IW_CM_STATE_SYN_SENT:
2737 case I40IW_CM_STATE_FIN_WAIT2:
2738 case I40IW_CM_STATE_OFFLOADED:
2739 case I40IW_CM_STATE_MPAREQ_RCVD:
2740 case I40IW_CM_STATE_UNKNOWN:
2741 default:
2742 i40iw_cleanup_retrans_entry(cm_node);
2743 break;
2744 }
2745 return ret;
2746 }
2747
2748 /**
2749 * i40iw_process_packet - process cm packet
2750 * @cm_node: connection's node
2751 * @rbuf: receive buffer
2752 */
2753 static void i40iw_process_packet(struct i40iw_cm_node *cm_node,
2754 struct i40iw_puda_buf *rbuf)
2755 {
2756 enum i40iw_tcpip_pkt_type pkt_type = I40IW_PKT_TYPE_UNKNOWN;
2757 struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
2758 u32 fin_set = 0;
2759 int ret;
2760
2761 if (tcph->rst) {
2762 pkt_type = I40IW_PKT_TYPE_RST;
2763 } else if (tcph->syn) {
2764 pkt_type = I40IW_PKT_TYPE_SYN;
2765 if (tcph->ack)
2766 pkt_type = I40IW_PKT_TYPE_SYNACK;
2767 } else if (tcph->ack) {
2768 pkt_type = I40IW_PKT_TYPE_ACK;
2769 }
2770 if (tcph->fin)
2771 fin_set = 1;
2772
2773 switch (pkt_type) {
2774 case I40IW_PKT_TYPE_SYN:
2775 i40iw_handle_syn_pkt(cm_node, rbuf);
2776 break;
2777 case I40IW_PKT_TYPE_SYNACK:
2778 i40iw_handle_synack_pkt(cm_node, rbuf);
2779 break;
2780 case I40IW_PKT_TYPE_ACK:
2781 ret = i40iw_handle_ack_pkt(cm_node, rbuf);
2782 if (fin_set && !ret)
2783 i40iw_handle_fin_pkt(cm_node);
2784 break;
2785 case I40IW_PKT_TYPE_RST:
2786 i40iw_handle_rst_pkt(cm_node, rbuf);
2787 break;
2788 default:
2789 if (fin_set &&
2790 (!i40iw_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
2791 i40iw_handle_fin_pkt(cm_node);
2792 break;
2793 }
2794 }
2795
2796 /**
2797 * i40iw_make_listen_node - create a listen node with params
2798 * @cm_core: cm's core
2799 * @iwdev: iwarp device structure
2800 * @cm_info: quad info for connection
2801 */
2802 static struct i40iw_cm_listener *i40iw_make_listen_node(
2803 struct i40iw_cm_core *cm_core,
2804 struct i40iw_device *iwdev,
2805 struct i40iw_cm_info *cm_info)
2806 {
2807 struct i40iw_cm_listener *listener;
2808 unsigned long flags;
2809
2810 /* cannot have multiple matching listeners */
2811 listener = i40iw_find_listener(cm_core, cm_info->loc_addr,
2812 cm_info->loc_port,
2813 cm_info->vlan_id,
2814 I40IW_CM_LISTENER_EITHER_STATE);
2815 if (listener &&
2816 (listener->listener_state == I40IW_CM_LISTENER_ACTIVE_STATE)) {
2817 atomic_dec(&listener->ref_count);
2818 i40iw_debug(cm_core->dev,
2819 I40IW_DEBUG_CM,
2820 "Not creating listener since it already exists\n");
2821 return NULL;
2822 }
2823
2824 if (!listener) {
2825 /* create a CM listen node (1/2 node to compare incoming traffic to) */
2826 listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
2827 if (!listener)
2828 return NULL;
2829 cm_core->stats_listen_nodes_created++;
2830 memcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr));
2831 listener->loc_port = cm_info->loc_port;
2832
2833 INIT_LIST_HEAD(&listener->child_listen_list);
2834
2835 atomic_set(&listener->ref_count, 1);
2836 } else {
2837 listener->reused_node = 1;
2838 }
2839
2840 listener->cm_id = cm_info->cm_id;
2841 listener->ipv4 = cm_info->ipv4;
2842 listener->vlan_id = cm_info->vlan_id;
2843 atomic_set(&listener->pend_accepts_cnt, 0);
2844 listener->cm_core = cm_core;
2845 listener->iwdev = iwdev;
2846
2847 listener->backlog = cm_info->backlog;
2848 listener->listener_state = I40IW_CM_LISTENER_ACTIVE_STATE;
2849
2850 if (!listener->reused_node) {
2851 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
2852 list_add(&listener->list, &cm_core->listen_nodes);
2853 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
2854 }
2855
2856 return listener;
2857 }
2858
2859 /**
2860 * i40iw_create_cm_node - make a connection node with params
2861 * @cm_core: cm's core
2862 * @iwdev: iwarp device structure
2863 * @private_data_len: len to provate data for mpa request
2864 * @private_data: pointer to private data for connection
2865 * @cm_info: quad info for connection
2866 */
2867 static struct i40iw_cm_node *i40iw_create_cm_node(
2868 struct i40iw_cm_core *cm_core,
2869 struct i40iw_device *iwdev,
2870 u16 private_data_len,
2871 void *private_data,
2872 struct i40iw_cm_info *cm_info)
2873 {
2874 struct i40iw_cm_node *cm_node;
2875 struct i40iw_cm_listener *loopback_remotelistener;
2876 struct i40iw_cm_node *loopback_remotenode;
2877 struct i40iw_cm_info loopback_cm_info;
2878
2879 /* create a CM connection node */
2880 cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
2881 if (!cm_node)
2882 return ERR_PTR(-ENOMEM);
2883 /* set our node side to client (active) side */
2884 cm_node->tcp_cntxt.client = 1;
2885 cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
2886
2887 if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {
2888 loopback_remotelistener = i40iw_find_listener(
2889 cm_core,
2890 cm_info->rem_addr,
2891 cm_node->rem_port,
2892 cm_node->vlan_id,
2893 I40IW_CM_LISTENER_ACTIVE_STATE);
2894 if (!loopback_remotelistener) {
2895 i40iw_rem_ref_cm_node(cm_node);
2896 return ERR_PTR(-ECONNREFUSED);
2897 } else {
2898 loopback_cm_info = *cm_info;
2899 loopback_cm_info.loc_port = cm_info->rem_port;
2900 loopback_cm_info.rem_port = cm_info->loc_port;
2901 loopback_cm_info.cm_id = loopback_remotelistener->cm_id;
2902 loopback_cm_info.ipv4 = cm_info->ipv4;
2903 loopback_remotenode = i40iw_make_cm_node(cm_core,
2904 iwdev,
2905 &loopback_cm_info,
2906 loopback_remotelistener);
2907 if (!loopback_remotenode) {
2908 i40iw_rem_ref_cm_node(cm_node);
2909 return ERR_PTR(-ENOMEM);
2910 }
2911 cm_core->stats_loopbacks++;
2912 loopback_remotenode->loopbackpartner = cm_node;
2913 loopback_remotenode->tcp_cntxt.rcv_wscale =
2914 I40IW_CM_DEFAULT_RCV_WND_SCALE;
2915 cm_node->loopbackpartner = loopback_remotenode;
2916 memcpy(loopback_remotenode->pdata_buf, private_data,
2917 private_data_len);
2918 loopback_remotenode->pdata.size = private_data_len;
2919
2920 cm_node->state = I40IW_CM_STATE_OFFLOADED;
2921 cm_node->tcp_cntxt.rcv_nxt =
2922 loopback_remotenode->tcp_cntxt.loc_seq_num;
2923 loopback_remotenode->tcp_cntxt.rcv_nxt =
2924 cm_node->tcp_cntxt.loc_seq_num;
2925 cm_node->tcp_cntxt.max_snd_wnd =
2926 loopback_remotenode->tcp_cntxt.rcv_wnd;
2927 loopback_remotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
2928 cm_node->tcp_cntxt.snd_wnd = loopback_remotenode->tcp_cntxt.rcv_wnd;
2929 loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
2930 cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
2931 loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
2932 loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD;
2933 i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ);
2934 }
2935 return cm_node;
2936 }
2937
2938 cm_node->pdata.size = private_data_len;
2939 cm_node->pdata.addr = cm_node->pdata_buf;
2940
2941 memcpy(cm_node->pdata_buf, private_data, private_data_len);
2942
2943 cm_node->state = I40IW_CM_STATE_SYN_SENT;
2944 return cm_node;
2945 }
2946
2947 /**
2948 * i40iw_cm_reject - reject and teardown a connection
2949 * @cm_node: connection's node
2950 * @pdate: ptr to private data for reject
2951 * @plen: size of private data
2952 */
2953 static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen)
2954 {
2955 int ret = 0;
2956 int err;
2957 int passive_state;
2958 struct iw_cm_id *cm_id = cm_node->cm_id;
2959 struct i40iw_cm_node *loopback = cm_node->loopbackpartner;
2960
2961 if (cm_node->tcp_cntxt.client)
2962 return ret;
2963 i40iw_cleanup_retrans_entry(cm_node);
2964
2965 if (!loopback) {
2966 passive_state = atomic_add_return(1, &cm_node->passive_state);
2967 if (passive_state == I40IW_SEND_RESET_EVENT) {
2968 cm_node->state = I40IW_CM_STATE_CLOSED;
2969 i40iw_rem_ref_cm_node(cm_node);
2970 } else {
2971 if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
2972 i40iw_rem_ref_cm_node(cm_node);
2973 } else {
2974 ret = i40iw_send_mpa_reject(cm_node, pdata, plen);
2975 if (ret) {
2976 cm_node->state = I40IW_CM_STATE_CLOSED;
2977 err = i40iw_send_reset(cm_node);
2978 if (err)
2979 i40iw_pr_err("send reset failed\n");
2980 } else {
2981 cm_id->add_ref(cm_id);
2982 }
2983 }
2984 }
2985 } else {
2986 cm_node->cm_id = NULL;
2987 if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
2988 i40iw_rem_ref_cm_node(cm_node);
2989 i40iw_rem_ref_cm_node(loopback);
2990 } else {
2991 ret = i40iw_send_cm_event(loopback,
2992 loopback->cm_id,
2993 IW_CM_EVENT_CONNECT_REPLY,
2994 -ECONNREFUSED);
2995 i40iw_rem_ref_cm_node(cm_node);
2996 loopback->state = I40IW_CM_STATE_CLOSING;
2997
2998 cm_id = loopback->cm_id;
2999 i40iw_rem_ref_cm_node(loopback);
3000 cm_id->rem_ref(cm_id);
3001 }
3002 }
3003
3004 return ret;
3005 }
3006
3007 /**
3008 * i40iw_cm_close - close of cm connection
3009 * @cm_node: connection's node
3010 */
3011 static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
3012 {
3013 int ret = 0;
3014
3015 if (!cm_node)
3016 return -EINVAL;
3017
3018 switch (cm_node->state) {
3019 case I40IW_CM_STATE_SYN_RCVD:
3020 case I40IW_CM_STATE_SYN_SENT:
3021 case I40IW_CM_STATE_ONE_SIDE_ESTABLISHED:
3022 case I40IW_CM_STATE_ESTABLISHED:
3023 case I40IW_CM_STATE_ACCEPTING:
3024 case I40IW_CM_STATE_MPAREQ_SENT:
3025 case I40IW_CM_STATE_MPAREQ_RCVD:
3026 i40iw_cleanup_retrans_entry(cm_node);
3027 i40iw_send_reset(cm_node);
3028 break;
3029 case I40IW_CM_STATE_CLOSE_WAIT:
3030 cm_node->state = I40IW_CM_STATE_LAST_ACK;
3031 i40iw_send_fin(cm_node);
3032 break;
3033 case I40IW_CM_STATE_FIN_WAIT1:
3034 case I40IW_CM_STATE_FIN_WAIT2:
3035 case I40IW_CM_STATE_LAST_ACK:
3036 case I40IW_CM_STATE_TIME_WAIT:
3037 case I40IW_CM_STATE_CLOSING:
3038 ret = -1;
3039 break;
3040 case I40IW_CM_STATE_LISTENING:
3041 i40iw_cleanup_retrans_entry(cm_node);
3042 i40iw_send_reset(cm_node);
3043 break;
3044 case I40IW_CM_STATE_MPAREJ_RCVD:
3045 case I40IW_CM_STATE_UNKNOWN:
3046 case I40IW_CM_STATE_INITED:
3047 case I40IW_CM_STATE_CLOSED:
3048 case I40IW_CM_STATE_LISTENER_DESTROYED:
3049 i40iw_rem_ref_cm_node(cm_node);
3050 break;
3051 case I40IW_CM_STATE_OFFLOADED:
3052 if (cm_node->send_entry)
3053 i40iw_pr_err("send_entry\n");
3054 i40iw_rem_ref_cm_node(cm_node);
3055 break;
3056 }
3057 return ret;
3058 }
3059
3060 /**
3061 * i40iw_receive_ilq - recv an ETHERNET packet, and process it
3062 * through CM
3063 * @vsi: pointer to the vsi structure
3064 * @rbuf: receive buffer
3065 */
3066 void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf)
3067 {
3068 struct i40iw_cm_node *cm_node;
3069 struct i40iw_cm_listener *listener;
3070 struct iphdr *iph;
3071 struct ipv6hdr *ip6h;
3072 struct tcphdr *tcph;
3073 struct i40iw_cm_info cm_info;
3074 struct i40iw_sc_dev *dev = vsi->dev;
3075 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
3076 struct i40iw_cm_core *cm_core = &iwdev->cm_core;
3077 struct vlan_ethhdr *ethh;
3078 u16 vtag;
3079
3080 /* if vlan, then maclen = 18 else 14 */
3081 iph = (struct iphdr *)rbuf->iph;
3082 memset(&cm_info, 0, sizeof(cm_info));
3083
3084 i40iw_debug_buf(dev,
3085 I40IW_DEBUG_ILQ,
3086 "RECEIVE ILQ BUFFER",
3087 rbuf->mem.va,
3088 rbuf->totallen);
3089 ethh = (struct vlan_ethhdr *)rbuf->mem.va;
3090
3091 if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
3092 vtag = ntohs(ethh->h_vlan_TCI);
3093 cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
3094 cm_info.vlan_id = vtag & VLAN_VID_MASK;
3095 i40iw_debug(cm_core->dev,
3096 I40IW_DEBUG_CM,
3097 "%s vlan_id=%d\n",
3098 __func__,
3099 cm_info.vlan_id);
3100 } else {
3101 cm_info.vlan_id = I40IW_NO_VLAN;
3102 }
3103 tcph = (struct tcphdr *)rbuf->tcph;
3104
3105 if (rbuf->ipv4) {
3106 cm_info.loc_addr[0] = ntohl(iph->daddr);
3107 cm_info.rem_addr[0] = ntohl(iph->saddr);
3108 cm_info.ipv4 = true;
3109 cm_info.tos = iph->tos;
3110 } else {
3111 ip6h = (struct ipv6hdr *)rbuf->iph;
3112 i40iw_copy_ip_ntohl(cm_info.loc_addr,
3113 ip6h->daddr.in6_u.u6_addr32);
3114 i40iw_copy_ip_ntohl(cm_info.rem_addr,
3115 ip6h->saddr.in6_u.u6_addr32);
3116 cm_info.ipv4 = false;
3117 cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
3118 }
3119 cm_info.loc_port = ntohs(tcph->dest);
3120 cm_info.rem_port = ntohs(tcph->source);
3121 cm_node = i40iw_find_node(cm_core,
3122 cm_info.rem_port,
3123 cm_info.rem_addr,
3124 cm_info.loc_port,
3125 cm_info.loc_addr,
3126 true);
3127
3128 if (!cm_node) {
3129 /* Only type of packet accepted are for */
3130 /* the PASSIVE open (syn only) */
3131 if (!tcph->syn || tcph->ack)
3132 return;
3133 listener =
3134 i40iw_find_listener(cm_core,
3135 cm_info.loc_addr,
3136 cm_info.loc_port,
3137 cm_info.vlan_id,
3138 I40IW_CM_LISTENER_ACTIVE_STATE);
3139 if (!listener) {
3140 cm_info.cm_id = NULL;
3141 i40iw_debug(cm_core->dev,
3142 I40IW_DEBUG_CM,
3143 "%s no listener found\n",
3144 __func__);
3145 return;
3146 }
3147 cm_info.cm_id = listener->cm_id;
3148 cm_node = i40iw_make_cm_node(cm_core, iwdev, &cm_info, listener);
3149 if (!cm_node) {
3150 i40iw_debug(cm_core->dev,
3151 I40IW_DEBUG_CM,
3152 "%s allocate node failed\n",
3153 __func__);
3154 atomic_dec(&listener->ref_count);
3155 return;
3156 }
3157 if (!tcph->rst && !tcph->fin) {
3158 cm_node->state = I40IW_CM_STATE_LISTENING;
3159 } else {
3160 i40iw_rem_ref_cm_node(cm_node);
3161 return;
3162 }
3163 atomic_inc(&cm_node->ref_count);
3164 } else if (cm_node->state == I40IW_CM_STATE_OFFLOADED) {
3165 i40iw_rem_ref_cm_node(cm_node);
3166 return;
3167 }
3168 i40iw_process_packet(cm_node, rbuf);
3169 i40iw_rem_ref_cm_node(cm_node);
3170 }
3171
3172 /**
3173 * i40iw_setup_cm_core - allocate a top level instance of a cm
3174 * core
3175 * @iwdev: iwarp device structure
3176 */
3177 void i40iw_setup_cm_core(struct i40iw_device *iwdev)
3178 {
3179 struct i40iw_cm_core *cm_core = &iwdev->cm_core;
3180
3181 cm_core->iwdev = iwdev;
3182 cm_core->dev = &iwdev->sc_dev;
3183
3184 INIT_LIST_HEAD(&cm_core->connected_nodes);
3185 INIT_LIST_HEAD(&cm_core->listen_nodes);
3186
3187 setup_timer(&cm_core->tcp_timer, i40iw_cm_timer_tick,
3188 (unsigned long)cm_core);
3189
3190 spin_lock_init(&cm_core->ht_lock);
3191 spin_lock_init(&cm_core->listen_list_lock);
3192
3193 cm_core->event_wq = alloc_ordered_workqueue("iwewq",
3194 WQ_MEM_RECLAIM);
3195
3196 cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq",
3197 WQ_MEM_RECLAIM);
3198 }
3199
3200 /**
3201 * i40iw_cleanup_cm_core - deallocate a top level instance of a
3202 * cm core
3203 * @cm_core: cm's core
3204 */
3205 void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core)
3206 {
3207 unsigned long flags;
3208
3209 if (!cm_core)
3210 return;
3211
3212 spin_lock_irqsave(&cm_core->ht_lock, flags);
3213 if (timer_pending(&cm_core->tcp_timer))
3214 del_timer_sync(&cm_core->tcp_timer);
3215 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
3216
3217 destroy_workqueue(cm_core->event_wq);
3218 destroy_workqueue(cm_core->disconn_wq);
3219 }
3220
3221 /**
3222 * i40iw_init_tcp_ctx - setup qp context
3223 * @cm_node: connection's node
3224 * @tcp_info: offload info for tcp
3225 * @iwqp: associate qp for the connection
3226 */
3227 static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
3228 struct i40iw_tcp_offload_info *tcp_info,
3229 struct i40iw_qp *iwqp)
3230 {
3231 tcp_info->ipv4 = cm_node->ipv4;
3232 tcp_info->drop_ooo_seg = true;
3233 tcp_info->wscale = true;
3234 tcp_info->ignore_tcp_opt = true;
3235 tcp_info->ignore_tcp_uns_opt = true;
3236 tcp_info->no_nagle = false;
3237
3238 tcp_info->ttl = I40IW_DEFAULT_TTL;
3239 tcp_info->rtt_var = cpu_to_le32(I40IW_DEFAULT_RTT_VAR);
3240 tcp_info->ss_thresh = cpu_to_le32(I40IW_DEFAULT_SS_THRESH);
3241 tcp_info->rexmit_thresh = I40IW_DEFAULT_REXMIT_THRESH;
3242
3243 tcp_info->tcp_state = I40IW_TCP_STATE_ESTABLISHED;
3244 tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
3245 tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
3246
3247 tcp_info->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
3248 tcp_info->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
3249 tcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
3250 tcp_info->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
3251
3252 tcp_info->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
3253 tcp_info->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss);
3254 tcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
3255 tcp_info->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
3256 tcp_info->max_snd_window = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
3257 tcp_info->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
3258 cm_node->tcp_cntxt.rcv_wscale);
3259
3260 tcp_info->flow_label = 0;
3261 tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
3262 if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
3263 tcp_info->insert_vlan_tag = true;
3264 tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id);
3265 }
3266 if (cm_node->ipv4) {
3267 tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
3268 tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
3269
3270 tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]);
3271 tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]);
3272 tcp_info->arp_idx =
3273 cpu_to_le16((u16)i40iw_arp_table(
3274 iwqp->iwdev,
3275 &tcp_info->dest_ip_addr3,
3276 true,
3277 NULL,
3278 I40IW_ARP_RESOLVE));
3279 } else {
3280 tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
3281 tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
3282 tcp_info->dest_ip_addr0 = cpu_to_le32(cm_node->rem_addr[0]);
3283 tcp_info->dest_ip_addr1 = cpu_to_le32(cm_node->rem_addr[1]);
3284 tcp_info->dest_ip_addr2 = cpu_to_le32(cm_node->rem_addr[2]);
3285 tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[3]);
3286 tcp_info->local_ipaddr0 = cpu_to_le32(cm_node->loc_addr[0]);
3287 tcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]);
3288 tcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]);
3289 tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]);
3290 tcp_info->arp_idx =
3291 cpu_to_le16((u16)i40iw_arp_table(
3292 iwqp->iwdev,
3293 &tcp_info->dest_ip_addr0,
3294 false,
3295 NULL,
3296 I40IW_ARP_RESOLVE));
3297 }
3298 }
3299
3300 /**
3301 * i40iw_cm_init_tsa_conn - setup qp for RTS
3302 * @iwqp: associate qp for the connection
3303 * @cm_node: connection's node
3304 */
3305 static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
3306 struct i40iw_cm_node *cm_node)
3307 {
3308 struct i40iw_tcp_offload_info tcp_info;
3309 struct i40iwarp_offload_info *iwarp_info;
3310 struct i40iw_qp_host_ctx_info *ctx_info;
3311 struct i40iw_device *iwdev = iwqp->iwdev;
3312 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
3313
3314 memset(&tcp_info, 0x00, sizeof(struct i40iw_tcp_offload_info));
3315 iwarp_info = &iwqp->iwarp_info;
3316 ctx_info = &iwqp->ctx_info;
3317
3318 ctx_info->tcp_info = &tcp_info;
3319 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
3320 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
3321
3322 iwarp_info->ord_size = cm_node->ord_size;
3323 iwarp_info->ird_size = i40iw_derive_hw_ird_setting(cm_node->ird_size);
3324
3325 if (iwarp_info->ord_size == 1)
3326 iwarp_info->ord_size = 2;
3327
3328 iwarp_info->rd_enable = true;
3329 iwarp_info->rdmap_ver = 1;
3330 iwarp_info->ddp_ver = 1;
3331
3332 iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
3333
3334 ctx_info->tcp_info_valid = true;
3335 ctx_info->iwarp_info_valid = true;
3336 ctx_info->add_to_qoslist = true;
3337 ctx_info->user_pri = cm_node->user_pri;
3338
3339 i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
3340 if (cm_node->snd_mark_en) {
3341 iwarp_info->snd_mark_en = true;
3342 iwarp_info->snd_mark_offset = (tcp_info.snd_nxt &
3343 SNDMARKER_SEQNMASK) + cm_node->lsmm_size;
3344 }
3345
3346 cm_node->state = I40IW_CM_STATE_OFFLOADED;
3347 tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
3348 tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
3349 tcp_info.tos = cm_node->tos;
3350
3351 dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
3352
3353 /* once tcp_info is set, no need to do it again */
3354 ctx_info->tcp_info_valid = false;
3355 ctx_info->iwarp_info_valid = false;
3356 ctx_info->add_to_qoslist = false;
3357 }
3358
3359 /**
3360 * i40iw_cm_disconn - when a connection is being closed
3361 * @iwqp: associate qp for the connection
3362 */
3363 void i40iw_cm_disconn(struct i40iw_qp *iwqp)
3364 {
3365 struct disconn_work *work;
3366 struct i40iw_device *iwdev = iwqp->iwdev;
3367 struct i40iw_cm_core *cm_core = &iwdev->cm_core;
3368 unsigned long flags;
3369
3370 work = kzalloc(sizeof(*work), GFP_ATOMIC);
3371 if (!work)
3372 return; /* Timer will clean up */
3373
3374 spin_lock_irqsave(&iwdev->qptable_lock, flags);
3375 if (!iwdev->qp_table[iwqp->ibqp.qp_num]) {
3376 spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
3377 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
3378 "%s qp_id %d is already freed\n",
3379 __func__, iwqp->ibqp.qp_num);
3380 kfree(work);
3381 return;
3382 }
3383 i40iw_add_ref(&iwqp->ibqp);
3384 spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
3385
3386 work->iwqp = iwqp;
3387 INIT_WORK(&work->work, i40iw_disconnect_worker);
3388 queue_work(cm_core->disconn_wq, &work->work);
3389 return;
3390 }
3391
3392 /**
3393 * i40iw_qp_disconnect - free qp and close cm
3394 * @iwqp: associate qp for the connection
3395 */
3396 static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
3397 {
3398 struct i40iw_device *iwdev;
3399 struct i40iw_ib_device *iwibdev;
3400
3401 iwdev = to_iwdev(iwqp->ibqp.device);
3402 if (!iwdev) {
3403 i40iw_pr_err("iwdev == NULL\n");
3404 return;
3405 }
3406
3407 iwibdev = iwdev->iwibdev;
3408
3409 if (iwqp->active_conn) {
3410 /* indicate this connection is NOT active */
3411 iwqp->active_conn = 0;
3412 } else {
3413 /* Need to free the Last Streaming Mode Message */
3414 if (iwqp->ietf_mem.va) {
3415 if (iwqp->lsmm_mr)
3416 iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr);
3417 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
3418 }
3419 }
3420
3421 /* close the CM node down if it is still active */
3422 if (iwqp->cm_node) {
3423 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "%s Call close API\n", __func__);
3424 i40iw_cm_close(iwqp->cm_node);
3425 }
3426 }
3427
3428 /**
3429 * i40iw_cm_disconn_true - called by worker thread to disconnect qp
3430 * @iwqp: associate qp for the connection
3431 */
3432 static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
3433 {
3434 struct iw_cm_id *cm_id;
3435 struct i40iw_device *iwdev;
3436 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
3437 u16 last_ae;
3438 u8 original_hw_tcp_state;
3439 u8 original_ibqp_state;
3440 int disconn_status = 0;
3441 int issue_disconn = 0;
3442 int issue_close = 0;
3443 int issue_flush = 0;
3444 struct ib_event ibevent;
3445 unsigned long flags;
3446 int ret;
3447
3448 if (!iwqp) {
3449 i40iw_pr_err("iwqp == NULL\n");
3450 return;
3451 }
3452
3453 spin_lock_irqsave(&iwqp->lock, flags);
3454 cm_id = iwqp->cm_id;
3455 /* make sure we havent already closed this connection */
3456 if (!cm_id) {
3457 spin_unlock_irqrestore(&iwqp->lock, flags);
3458 return;
3459 }
3460
3461 iwdev = to_iwdev(iwqp->ibqp.device);
3462
3463 original_hw_tcp_state = iwqp->hw_tcp_state;
3464 original_ibqp_state = iwqp->ibqp_state;
3465 last_ae = iwqp->last_aeq;
3466
3467 if (qp->term_flags) {
3468 issue_disconn = 1;
3469 issue_close = 1;
3470 iwqp->cm_id = NULL;
3471 /*When term timer expires after cm_timer, don't want
3472 *terminate-handler to issue cm_disconn which can re-free
3473 *a QP even after its refcnt=0.
3474 */
3475 i40iw_terminate_del_timer(qp);
3476 if (!iwqp->flush_issued) {
3477 iwqp->flush_issued = 1;
3478 issue_flush = 1;
3479 }
3480 } else if ((original_hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) ||
3481 ((original_ibqp_state == IB_QPS_RTS) &&
3482 (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
3483 issue_disconn = 1;
3484 if (last_ae == I40IW_AE_LLP_CONNECTION_RESET)
3485 disconn_status = -ECONNRESET;
3486 }
3487
3488 if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
3489 (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
3490 (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
3491 (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
3492 issue_close = 1;
3493 iwqp->cm_id = NULL;
3494 if (!iwqp->flush_issued) {
3495 iwqp->flush_issued = 1;
3496 issue_flush = 1;
3497 }
3498 }
3499
3500 spin_unlock_irqrestore(&iwqp->lock, flags);
3501 if (issue_flush && !iwqp->destroyed) {
3502 /* Flush the queues */
3503 i40iw_flush_wqes(iwdev, iwqp);
3504
3505 if (qp->term_flags && iwqp->ibqp.event_handler) {
3506 ibevent.device = iwqp->ibqp.device;
3507 ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
3508 IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
3509 ibevent.element.qp = &iwqp->ibqp;
3510 iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
3511 }
3512 }
3513
3514 if (cm_id && cm_id->event_handler) {
3515 if (issue_disconn) {
3516 ret = i40iw_send_cm_event(NULL,
3517 cm_id,
3518 IW_CM_EVENT_DISCONNECT,
3519 disconn_status);
3520
3521 if (ret)
3522 i40iw_debug(&iwdev->sc_dev,
3523 I40IW_DEBUG_CM,
3524 "disconnect event failed %s: - cm_id = %p\n",
3525 __func__, cm_id);
3526 }
3527 if (issue_close) {
3528 i40iw_qp_disconnect(iwqp);
3529 cm_id->provider_data = iwqp;
3530 ret = i40iw_send_cm_event(NULL, cm_id, IW_CM_EVENT_CLOSE, 0);
3531 if (ret)
3532 i40iw_debug(&iwdev->sc_dev,
3533 I40IW_DEBUG_CM,
3534 "close event failed %s: - cm_id = %p\n",
3535 __func__, cm_id);
3536 cm_id->rem_ref(cm_id);
3537 }
3538 }
3539 }
3540
3541 /**
3542 * i40iw_disconnect_worker - worker for connection close
3543 * @work: points or disconn structure
3544 */
3545 static void i40iw_disconnect_worker(struct work_struct *work)
3546 {
3547 struct disconn_work *dwork = container_of(work, struct disconn_work, work);
3548 struct i40iw_qp *iwqp = dwork->iwqp;
3549
3550 kfree(dwork);
3551 i40iw_cm_disconn_true(iwqp);
3552 i40iw_rem_ref(&iwqp->ibqp);
3553 }
3554
3555 /**
3556 * i40iw_accept - registered call for connection to be accepted
3557 * @cm_id: cm information for passive connection
3558 * @conn_param: accpet parameters
3559 */
3560 int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3561 {
3562 struct ib_qp *ibqp;
3563 struct i40iw_qp *iwqp;
3564 struct i40iw_device *iwdev;
3565 struct i40iw_sc_dev *dev;
3566 struct i40iw_cm_node *cm_node;
3567 struct ib_qp_attr attr;
3568 int passive_state;
3569 struct ib_mr *ibmr;
3570 struct i40iw_pd *iwpd;
3571 u16 buf_len = 0;
3572 struct i40iw_kmem_info accept;
3573 enum i40iw_status_code status;
3574 u64 tagged_offset;
3575
3576 memset(&attr, 0, sizeof(attr));
3577 ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
3578 if (!ibqp)
3579 return -EINVAL;
3580
3581 iwqp = to_iwqp(ibqp);
3582 iwdev = iwqp->iwdev;
3583 dev = &iwdev->sc_dev;
3584 cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
3585
3586 if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
3587 cm_node->ipv4 = true;
3588 cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr);
3589 } else {
3590 cm_node->ipv4 = false;
3591 i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id, NULL);
3592 }
3593 i40iw_debug(cm_node->dev,
3594 I40IW_DEBUG_CM,
3595 "Accept vlan_id=%d\n",
3596 cm_node->vlan_id);
3597 if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
3598 if (cm_node->loopbackpartner)
3599 i40iw_rem_ref_cm_node(cm_node->loopbackpartner);
3600 i40iw_rem_ref_cm_node(cm_node);
3601 return -EINVAL;
3602 }
3603
3604 passive_state = atomic_add_return(1, &cm_node->passive_state);
3605 if (passive_state == I40IW_SEND_RESET_EVENT) {
3606 i40iw_rem_ref_cm_node(cm_node);
3607 return -ECONNRESET;
3608 }
3609
3610 cm_node->cm_core->stats_accepts++;
3611 iwqp->cm_node = (void *)cm_node;
3612 cm_node->iwqp = iwqp;
3613
3614 buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE;
3615
3616 status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
3617
3618 if (status)
3619 return -ENOMEM;
3620 cm_node->pdata.size = conn_param->private_data_len;
3621 accept.addr = iwqp->ietf_mem.va;
3622 accept.size = i40iw_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
3623 memcpy(accept.addr + accept.size, conn_param->private_data,
3624 conn_param->private_data_len);
3625
3626 /* setup our first outgoing iWarp send WQE (the IETF frame response) */
3627 if ((cm_node->ipv4 &&
3628 !i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
3629 (!cm_node->ipv4 &&
3630 !i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) {
3631 iwpd = iwqp->iwpd;
3632 tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
3633 ibmr = i40iw_reg_phys_mr(&iwpd->ibpd,
3634 iwqp->ietf_mem.pa,
3635 buf_len,
3636 IB_ACCESS_LOCAL_WRITE,
3637 &tagged_offset);
3638 if (IS_ERR(ibmr)) {
3639 i40iw_free_dma_mem(dev->hw, &iwqp->ietf_mem);
3640 return -ENOMEM;
3641 }
3642
3643 ibmr->pd = &iwpd->ibpd;
3644 ibmr->device = iwpd->ibpd.device;
3645 iwqp->lsmm_mr = ibmr;
3646 if (iwqp->page)
3647 iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
3648 dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp,
3649 iwqp->ietf_mem.va,
3650 (accept.size + conn_param->private_data_len),
3651 ibmr->lkey);
3652
3653 } else {
3654 if (iwqp->page)
3655 iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
3656 dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);
3657 }
3658
3659 if (iwqp->page)
3660 kunmap(iwqp->page);
3661
3662 iwqp->cm_id = cm_id;
3663 cm_node->cm_id = cm_id;
3664
3665 cm_id->provider_data = (void *)iwqp;
3666 iwqp->active_conn = 0;
3667
3668 cm_node->lsmm_size = accept.size + conn_param->private_data_len;
3669 i40iw_cm_init_tsa_conn(iwqp, cm_node);
3670 cm_id->add_ref(cm_id);
3671 i40iw_add_ref(&iwqp->ibqp);
3672
3673 i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
3674
3675 attr.qp_state = IB_QPS_RTS;
3676 cm_node->qhash_set = false;
3677 i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
3678 if (cm_node->loopbackpartner) {
3679 cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
3680
3681 /* copy entire MPA frame to our cm_node's frame */
3682 memcpy(cm_node->loopbackpartner->pdata_buf,
3683 conn_param->private_data,
3684 conn_param->private_data_len);
3685 i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
3686 }
3687
3688 cm_node->accelerated = 1;
3689 if (cm_node->accept_pend) {
3690 if (!cm_node->listener)
3691 i40iw_pr_err("cm_node->listener NULL for passive node\n");
3692 atomic_dec(&cm_node->listener->pend_accepts_cnt);
3693 cm_node->accept_pend = 0;
3694 }
3695 return 0;
3696 }
3697
3698 /**
3699 * i40iw_reject - registered call for connection to be rejected
3700 * @cm_id: cm information for passive connection
3701 * @pdata: private data to be sent
3702 * @pdata_len: private data length
3703 */
3704 int i40iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
3705 {
3706 struct i40iw_device *iwdev;
3707 struct i40iw_cm_node *cm_node;
3708 struct i40iw_cm_node *loopback;
3709
3710 cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
3711 loopback = cm_node->loopbackpartner;
3712 cm_node->cm_id = cm_id;
3713 cm_node->pdata.size = pdata_len;
3714
3715 iwdev = to_iwdev(cm_id->device);
3716 if (!iwdev)
3717 return -EINVAL;
3718 cm_node->cm_core->stats_rejects++;
3719
3720 if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER)
3721 return -EINVAL;
3722
3723 if (loopback) {
3724 memcpy(&loopback->pdata_buf, pdata, pdata_len);
3725 loopback->pdata.size = pdata_len;
3726 }
3727
3728 return i40iw_cm_reject(cm_node, pdata, pdata_len);
3729 }
3730
3731 /**
3732 * i40iw_connect - registered call for connection to be established
3733 * @cm_id: cm information for passive connection
3734 * @conn_param: Information about the connection
3735 */
3736 int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3737 {
3738 struct ib_qp *ibqp;
3739 struct i40iw_qp *iwqp;
3740 struct i40iw_device *iwdev;
3741 struct i40iw_cm_node *cm_node;
3742 struct i40iw_cm_info cm_info;
3743 struct sockaddr_in *laddr;
3744 struct sockaddr_in *raddr;
3745 struct sockaddr_in6 *laddr6;
3746 struct sockaddr_in6 *raddr6;
3747 bool qhash_set = false;
3748 int apbvt_set = 0;
3749 int err = 0;
3750 enum i40iw_status_code status;
3751
3752 ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
3753 if (!ibqp)
3754 return -EINVAL;
3755 iwqp = to_iwqp(ibqp);
3756 if (!iwqp)
3757 return -EINVAL;
3758 iwdev = to_iwdev(iwqp->ibqp.device);
3759 if (!iwdev)
3760 return -EINVAL;
3761
3762 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
3763 raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
3764 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
3765 raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
3766
3767 if (!(laddr->sin_port) || !(raddr->sin_port))
3768 return -EINVAL;
3769
3770 iwqp->active_conn = 1;
3771 iwqp->cm_id = NULL;
3772 cm_id->provider_data = iwqp;
3773
3774 /* set up the connection params for the node */
3775 if (cm_id->remote_addr.ss_family == AF_INET) {
3776 cm_info.ipv4 = true;
3777 memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
3778 memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
3779 cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
3780 cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
3781 cm_info.loc_port = ntohs(laddr->sin_port);
3782 cm_info.rem_port = ntohs(raddr->sin_port);
3783 cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
3784 } else {
3785 cm_info.ipv4 = false;
3786 i40iw_copy_ip_ntohl(cm_info.loc_addr,
3787 laddr6->sin6_addr.in6_u.u6_addr32);
3788 i40iw_copy_ip_ntohl(cm_info.rem_addr,
3789 raddr6->sin6_addr.in6_u.u6_addr32);
3790 cm_info.loc_port = ntohs(laddr6->sin6_port);
3791 cm_info.rem_port = ntohs(raddr6->sin6_port);
3792 i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
3793 }
3794 cm_info.cm_id = cm_id;
3795 cm_info.tos = cm_id->tos;
3796 cm_info.user_pri = rt_tos2priority(cm_id->tos);
3797 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
3798 __func__, cm_id->tos, cm_info.user_pri);
3799 if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
3800 (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
3801 raddr6->sin6_addr.in6_u.u6_addr32,
3802 sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
3803 status = i40iw_manage_qhash(iwdev,
3804 &cm_info,
3805 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3806 I40IW_QHASH_MANAGE_TYPE_ADD,
3807 NULL,
3808 true);
3809 if (status)
3810 return -EINVAL;
3811 qhash_set = true;
3812 }
3813 status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD);
3814 if (status) {
3815 i40iw_manage_qhash(iwdev,
3816 &cm_info,
3817 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3818 I40IW_QHASH_MANAGE_TYPE_DELETE,
3819 NULL,
3820 false);
3821 return -EINVAL;
3822 }
3823
3824 apbvt_set = 1;
3825 cm_id->add_ref(cm_id);
3826 cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
3827 conn_param->private_data_len,
3828 (void *)conn_param->private_data,
3829 &cm_info);
3830
3831 if (IS_ERR(cm_node)) {
3832 err = PTR_ERR(cm_node);
3833 goto err_out;
3834 }
3835
3836 i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
3837 if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
3838 !cm_node->ord_size)
3839 cm_node->ord_size = 1;
3840
3841 cm_node->apbvt_set = apbvt_set;
3842 cm_node->qhash_set = qhash_set;
3843 iwqp->cm_node = cm_node;
3844 cm_node->iwqp = iwqp;
3845 iwqp->cm_id = cm_id;
3846 i40iw_add_ref(&iwqp->ibqp);
3847
3848 if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
3849 cm_node->state = I40IW_CM_STATE_SYN_SENT;
3850 err = i40iw_send_syn(cm_node, 0);
3851 if (err) {
3852 i40iw_rem_ref_cm_node(cm_node);
3853 goto err_out;
3854 }
3855 }
3856
3857 i40iw_debug(cm_node->dev,
3858 I40IW_DEBUG_CM,
3859 "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
3860 cm_node->rem_port,
3861 cm_node,
3862 cm_node->cm_id);
3863 return 0;
3864
3865 err_out:
3866 if (cm_info.ipv4)
3867 i40iw_debug(&iwdev->sc_dev,
3868 I40IW_DEBUG_CM,
3869 "Api - connect() FAILED: dest addr=%pI4",
3870 cm_info.rem_addr);
3871 else
3872 i40iw_debug(&iwdev->sc_dev,
3873 I40IW_DEBUG_CM,
3874 "Api - connect() FAILED: dest addr=%pI6",
3875 cm_info.rem_addr);
3876
3877 if (qhash_set)
3878 i40iw_manage_qhash(iwdev,
3879 &cm_info,
3880 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3881 I40IW_QHASH_MANAGE_TYPE_DELETE,
3882 NULL,
3883 false);
3884
3885 if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
3886 cm_info.loc_port))
3887 i40iw_manage_apbvt(iwdev,
3888 cm_info.loc_port,
3889 I40IW_MANAGE_APBVT_DEL);
3890 cm_id->rem_ref(cm_id);
3891 iwdev->cm_core.stats_connect_errs++;
3892 return err;
3893 }
3894
3895 /**
3896 * i40iw_create_listen - registered call creating listener
3897 * @cm_id: cm information for passive connection
3898 * @backlog: to max accept pending count
3899 */
3900 int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3901 {
3902 struct i40iw_device *iwdev;
3903 struct i40iw_cm_listener *cm_listen_node;
3904 struct i40iw_cm_info cm_info;
3905 enum i40iw_status_code ret;
3906 struct sockaddr_in *laddr;
3907 struct sockaddr_in6 *laddr6;
3908 bool wildcard = false;
3909
3910 iwdev = to_iwdev(cm_id->device);
3911 if (!iwdev)
3912 return -EINVAL;
3913
3914 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
3915 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
3916 memset(&cm_info, 0, sizeof(cm_info));
3917 if (laddr->sin_family == AF_INET) {
3918 cm_info.ipv4 = true;
3919 cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
3920 cm_info.loc_port = ntohs(laddr->sin_port);
3921
3922 if (laddr->sin_addr.s_addr != INADDR_ANY)
3923 cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
3924 else
3925 wildcard = true;
3926
3927 } else {
3928 cm_info.ipv4 = false;
3929 i40iw_copy_ip_ntohl(cm_info.loc_addr,
3930 laddr6->sin6_addr.in6_u.u6_addr32);
3931 cm_info.loc_port = ntohs(laddr6->sin6_port);
3932 if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY)
3933 i40iw_netdev_vlan_ipv6(cm_info.loc_addr,
3934 &cm_info.vlan_id,
3935 NULL);
3936 else
3937 wildcard = true;
3938 }
3939 cm_info.backlog = backlog;
3940 cm_info.cm_id = cm_id;
3941
3942 cm_listen_node = i40iw_make_listen_node(&iwdev->cm_core, iwdev, &cm_info);
3943 if (!cm_listen_node) {
3944 i40iw_pr_err("cm_listen_node == NULL\n");
3945 return -ENOMEM;
3946 }
3947
3948 cm_id->provider_data = cm_listen_node;
3949
3950 cm_listen_node->tos = cm_id->tos;
3951 cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
3952 cm_info.user_pri = cm_listen_node->user_pri;
3953
3954 if (!cm_listen_node->reused_node) {
3955 if (wildcard) {
3956 if (cm_info.ipv4)
3957 ret = i40iw_add_mqh_4(iwdev,
3958 &cm_info,
3959 cm_listen_node);
3960 else
3961 ret = i40iw_add_mqh_6(iwdev,
3962 &cm_info,
3963 cm_listen_node);
3964 if (ret)
3965 goto error;
3966
3967 ret = i40iw_manage_apbvt(iwdev,
3968 cm_info.loc_port,
3969 I40IW_MANAGE_APBVT_ADD);
3970
3971 if (ret)
3972 goto error;
3973 } else {
3974 ret = i40iw_manage_qhash(iwdev,
3975 &cm_info,
3976 I40IW_QHASH_TYPE_TCP_SYN,
3977 I40IW_QHASH_MANAGE_TYPE_ADD,
3978 NULL,
3979 true);
3980 if (ret)
3981 goto error;
3982 cm_listen_node->qhash_set = true;
3983 ret = i40iw_manage_apbvt(iwdev,
3984 cm_info.loc_port,
3985 I40IW_MANAGE_APBVT_ADD);
3986 if (ret)
3987 goto error;
3988 }
3989 }
3990 cm_id->add_ref(cm_id);
3991 cm_listen_node->cm_core->stats_listen_created++;
3992 return 0;
3993 error:
3994 i40iw_cm_del_listen(&iwdev->cm_core, (void *)cm_listen_node, false);
3995 return -EINVAL;
3996 }
3997
3998 /**
3999 * i40iw_destroy_listen - registered call to destroy listener
4000 * @cm_id: cm information for passive connection
4001 */
4002 int i40iw_destroy_listen(struct iw_cm_id *cm_id)
4003 {
4004 struct i40iw_device *iwdev;
4005
4006 iwdev = to_iwdev(cm_id->device);
4007 if (cm_id->provider_data)
4008 i40iw_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true);
4009 else
4010 i40iw_pr_err("cm_id->provider_data was NULL\n");
4011
4012 cm_id->rem_ref(cm_id);
4013
4014 return 0;
4015 }
4016
4017 /**
4018 * i40iw_cm_event_connected - handle connected active node
4019 * @event: the info for cm_node of connection
4020 */
4021 static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
4022 {
4023 struct i40iw_qp *iwqp;
4024 struct i40iw_device *iwdev;
4025 struct i40iw_cm_node *cm_node;
4026 struct i40iw_sc_dev *dev;
4027 struct ib_qp_attr attr;
4028 struct iw_cm_id *cm_id;
4029 int status;
4030 bool read0;
4031
4032 cm_node = event->cm_node;
4033 cm_id = cm_node->cm_id;
4034 iwqp = (struct i40iw_qp *)cm_id->provider_data;
4035 iwdev = to_iwdev(iwqp->ibqp.device);
4036 dev = &iwdev->sc_dev;
4037
4038 if (iwqp->destroyed) {
4039 status = -ETIMEDOUT;
4040 goto error;
4041 }
4042 i40iw_cm_init_tsa_conn(iwqp, cm_node);
4043 read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
4044 if (iwqp->page)
4045 iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
4046 dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
4047 if (iwqp->page)
4048 kunmap(iwqp->page);
4049 status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
4050 if (status)
4051 i40iw_pr_err("send cm event\n");
4052
4053 memset(&attr, 0, sizeof(attr));
4054 attr.qp_state = IB_QPS_RTS;
4055 cm_node->qhash_set = false;
4056 i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
4057
4058 cm_node->accelerated = 1;
4059 if (cm_node->accept_pend) {
4060 if (!cm_node->listener)
4061 i40iw_pr_err("listener is null for passive node\n");
4062 atomic_dec(&cm_node->listener->pend_accepts_cnt);
4063 cm_node->accept_pend = 0;
4064 }
4065 return;
4066
4067 error:
4068 iwqp->cm_id = NULL;
4069 cm_id->provider_data = NULL;
4070 i40iw_send_cm_event(event->cm_node,
4071 cm_id,
4072 IW_CM_EVENT_CONNECT_REPLY,
4073 status);
4074 cm_id->rem_ref(cm_id);
4075 i40iw_rem_ref_cm_node(event->cm_node);
4076 }
4077
4078 /**
4079 * i40iw_cm_event_reset - handle reset
4080 * @event: the info for cm_node of connection
4081 */
4082 static void i40iw_cm_event_reset(struct i40iw_cm_event *event)
4083 {
4084 struct i40iw_cm_node *cm_node = event->cm_node;
4085 struct iw_cm_id *cm_id = cm_node->cm_id;
4086 struct i40iw_qp *iwqp;
4087
4088 if (!cm_id)
4089 return;
4090
4091 iwqp = cm_id->provider_data;
4092 if (!iwqp)
4093 return;
4094
4095 i40iw_debug(cm_node->dev,
4096 I40IW_DEBUG_CM,
4097 "reset event %p - cm_id = %p\n",
4098 event->cm_node, cm_id);
4099 iwqp->cm_id = NULL;
4100
4101 i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET);
4102 i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
4103 }
4104
4105 /**
4106 * i40iw_cm_event_handler - worker thread callback to send event to cm upper layer
4107 * @work: pointer of cm event info.
4108 */
4109 static void i40iw_cm_event_handler(struct work_struct *work)
4110 {
4111 struct i40iw_cm_event *event = container_of(work,
4112 struct i40iw_cm_event,
4113 event_work);
4114 struct i40iw_cm_node *cm_node;
4115
4116 if (!event || !event->cm_node || !event->cm_node->cm_core)
4117 return;
4118
4119 cm_node = event->cm_node;
4120
4121 switch (event->type) {
4122 case I40IW_CM_EVENT_MPA_REQ:
4123 i40iw_send_cm_event(cm_node,
4124 cm_node->cm_id,
4125 IW_CM_EVENT_CONNECT_REQUEST,
4126 0);
4127 break;
4128 case I40IW_CM_EVENT_RESET:
4129 i40iw_cm_event_reset(event);
4130 break;
4131 case I40IW_CM_EVENT_CONNECTED:
4132 if (!event->cm_node->cm_id ||
4133 (event->cm_node->state != I40IW_CM_STATE_OFFLOADED))
4134 break;
4135 i40iw_cm_event_connected(event);
4136 break;
4137 case I40IW_CM_EVENT_MPA_REJECT:
4138 if (!event->cm_node->cm_id ||
4139 (cm_node->state == I40IW_CM_STATE_OFFLOADED))
4140 break;
4141 i40iw_send_cm_event(cm_node,
4142 cm_node->cm_id,
4143 IW_CM_EVENT_CONNECT_REPLY,
4144 -ECONNREFUSED);
4145 break;
4146 case I40IW_CM_EVENT_ABORTED:
4147 if (!event->cm_node->cm_id ||
4148 (event->cm_node->state == I40IW_CM_STATE_OFFLOADED))
4149 break;
4150 i40iw_event_connect_error(event);
4151 break;
4152 default:
4153 i40iw_pr_err("event type = %d\n", event->type);
4154 break;
4155 }
4156
4157 event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
4158 i40iw_rem_ref_cm_node(event->cm_node);
4159 kfree(event);
4160 }
4161
4162 /**
4163 * i40iw_cm_post_event - queue event request for worker thread
4164 * @event: cm node's info for up event call
4165 */
4166 static void i40iw_cm_post_event(struct i40iw_cm_event *event)
4167 {
4168 atomic_inc(&event->cm_node->ref_count);
4169 event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
4170 INIT_WORK(&event->event_work, i40iw_cm_event_handler);
4171
4172 queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
4173 }
4174
4175 /**
4176 * i40iw_qhash_ctrl - enable/disable qhash for list
4177 * @iwdev: device pointer
4178 * @parent_listen_node: parent listen node
4179 * @nfo: cm info node
4180 * @ipaddr: Pointer to IPv4 or IPv6 address
4181 * @ipv4: flag indicating IPv4 when true
4182 * @ifup: flag indicating interface up when true
4183 *
4184 * Enables or disables the qhash for the node in the child
4185 * listen list that matches ipaddr. If no matching IP was found
4186 * it will allocate and add a new child listen node to the
4187 * parent listen node. The listen_list_lock is assumed to be
4188 * held when called.
4189 */
4190 static void i40iw_qhash_ctrl(struct i40iw_device *iwdev,
4191 struct i40iw_cm_listener *parent_listen_node,
4192 struct i40iw_cm_info *nfo,
4193 u32 *ipaddr, bool ipv4, bool ifup)
4194 {
4195 struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
4196 struct i40iw_cm_listener *child_listen_node;
4197 struct list_head *pos, *tpos;
4198 enum i40iw_status_code ret;
4199 bool node_allocated = false;
4200 enum i40iw_quad_hash_manage_type op =
4201 ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
4202
4203 list_for_each_safe(pos, tpos, child_listen_list) {
4204 child_listen_node =
4205 list_entry(pos,
4206 struct i40iw_cm_listener,
4207 child_listen_list);
4208 if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
4209 goto set_qhash;
4210 }
4211
4212 /* if not found then add a child listener if interface is going up */
4213 if (!ifup)
4214 return;
4215 child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
4216 if (!child_listen_node)
4217 return;
4218 node_allocated = true;
4219 memcpy(child_listen_node, parent_listen_node, sizeof(*child_listen_node));
4220
4221 memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16);
4222
4223 set_qhash:
4224 memcpy(nfo->loc_addr,
4225 child_listen_node->loc_addr,
4226 sizeof(nfo->loc_addr));
4227 nfo->vlan_id = child_listen_node->vlan_id;
4228 ret = i40iw_manage_qhash(iwdev, nfo,
4229 I40IW_QHASH_TYPE_TCP_SYN,
4230 op,
4231 NULL, false);
4232 if (!ret) {
4233 child_listen_node->qhash_set = ifup;
4234 if (node_allocated)
4235 list_add(&child_listen_node->child_listen_list,
4236 &parent_listen_node->child_listen_list);
4237 } else if (node_allocated) {
4238 kfree(child_listen_node);
4239 }
4240 }
4241
4242 /**
4243 * i40iw_cm_disconnect_all - disconnect all connected qp's
4244 * @iwdev: device pointer
4245 */
4246 void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
4247 {
4248 struct i40iw_cm_core *cm_core = &iwdev->cm_core;
4249 struct list_head *list_core_temp;
4250 struct list_head *list_node;
4251 struct i40iw_cm_node *cm_node;
4252 unsigned long flags;
4253 struct list_head connected_list;
4254 struct ib_qp_attr attr;
4255
4256 INIT_LIST_HEAD(&connected_list);
4257 spin_lock_irqsave(&cm_core->ht_lock, flags);
4258 list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
4259 cm_node = container_of(list_node, struct i40iw_cm_node, list);
4260 atomic_inc(&cm_node->ref_count);
4261 list_add(&cm_node->connected_entry, &connected_list);
4262 }
4263 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
4264
4265 list_for_each_safe(list_node, list_core_temp, &connected_list) {
4266 cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
4267 attr.qp_state = IB_QPS_ERR;
4268 i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
4269 i40iw_rem_ref_cm_node(cm_node);
4270 }
4271 }
4272
4273 /**
4274 * i40iw_ifdown_notify - process an ifdown on an interface
4275 * @iwdev: device pointer
4276 * @ipaddr: Pointer to IPv4 or IPv6 address
4277 * @ipv4: flag indicating IPv4 when true
4278 * @ifup: flag indicating interface up when true
4279 */
4280 void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
4281 u32 *ipaddr, bool ipv4, bool ifup)
4282 {
4283 struct i40iw_cm_core *cm_core = &iwdev->cm_core;
4284 unsigned long flags;
4285 struct i40iw_cm_listener *listen_node;
4286 static const u32 ip_zero[4] = { 0, 0, 0, 0 };
4287 struct i40iw_cm_info nfo;
4288 u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
4289 enum i40iw_status_code ret;
4290 enum i40iw_quad_hash_manage_type op =
4291 ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
4292
4293 /* Disable or enable qhash for listeners */
4294 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
4295 list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
4296 if (vlan_id == listen_node->vlan_id &&
4297 (!memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) ||
4298 !memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) {
4299 memcpy(nfo.loc_addr, listen_node->loc_addr,
4300 sizeof(nfo.loc_addr));
4301 nfo.loc_port = listen_node->loc_port;
4302 nfo.ipv4 = listen_node->ipv4;
4303 nfo.vlan_id = listen_node->vlan_id;
4304 nfo.user_pri = listen_node->user_pri;
4305 if (!list_empty(&listen_node->child_listen_list)) {
4306 i40iw_qhash_ctrl(iwdev,
4307 listen_node,
4308 &nfo,
4309 ipaddr, ipv4, ifup);
4310 } else if (memcmp(listen_node->loc_addr, ip_zero,
4311 ipv4 ? 4 : 16)) {
4312 ret = i40iw_manage_qhash(iwdev,
4313 &nfo,
4314 I40IW_QHASH_TYPE_TCP_SYN,
4315 op,
4316 NULL,
4317 false);
4318 if (!ret)
4319 listen_node->qhash_set = ifup;
4320 }
4321 }
4322 }
4323 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
4324
4325 /* disconnect any connected qp's on ifdown */
4326 if (!ifup)
4327 i40iw_cm_disconnect_all(iwdev);
4328 }