]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/hw/i40iw/i40iw_utils.c
Merge tag 'linux-kselftest-4.13-rc6-fixes' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / i40iw / i40iw_utils.c
1 /*******************************************************************************
2 *
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 *******************************************************************************/
34
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <linux/crc32.h>
43 #include <linux/in.h>
44 #include <linux/ip.h>
45 #include <linux/tcp.h>
46 #include <linux/init.h>
47 #include <linux/io.h>
48 #include <asm/irq.h>
49 #include <asm/byteorder.h>
50 #include <net/netevent.h>
51 #include <net/neighbour.h>
52 #include "i40iw.h"
53
54 /**
55 * i40iw_arp_table - manage arp table
56 * @iwdev: iwarp device
57 * @ip_addr: ip address for device
58 * @mac_addr: mac address ptr
59 * @action: modify, delete or add
60 */
61 int i40iw_arp_table(struct i40iw_device *iwdev,
62 u32 *ip_addr,
63 bool ipv4,
64 u8 *mac_addr,
65 u32 action)
66 {
67 int arp_index;
68 int err;
69 u32 ip[4];
70
71 if (ipv4) {
72 memset(ip, 0, sizeof(ip));
73 ip[0] = *ip_addr;
74 } else {
75 memcpy(ip, ip_addr, sizeof(ip));
76 }
77
78 for (arp_index = 0; (u32)arp_index < iwdev->arp_table_size; arp_index++)
79 if (memcmp(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)) == 0)
80 break;
81 switch (action) {
82 case I40IW_ARP_ADD:
83 if (arp_index != iwdev->arp_table_size)
84 return -1;
85
86 arp_index = 0;
87 err = i40iw_alloc_resource(iwdev, iwdev->allocated_arps,
88 iwdev->arp_table_size,
89 (u32 *)&arp_index,
90 &iwdev->next_arp_index);
91
92 if (err)
93 return err;
94
95 memcpy(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip));
96 ether_addr_copy(iwdev->arp_table[arp_index].mac_addr, mac_addr);
97 break;
98 case I40IW_ARP_RESOLVE:
99 if (arp_index == iwdev->arp_table_size)
100 return -1;
101 break;
102 case I40IW_ARP_DELETE:
103 if (arp_index == iwdev->arp_table_size)
104 return -1;
105 memset(iwdev->arp_table[arp_index].ip_addr, 0,
106 sizeof(iwdev->arp_table[arp_index].ip_addr));
107 eth_zero_addr(iwdev->arp_table[arp_index].mac_addr);
108 i40iw_free_resource(iwdev, iwdev->allocated_arps, arp_index);
109 break;
110 default:
111 return -1;
112 }
113 return arp_index;
114 }
115
116 /**
117 * i40iw_wr32 - write 32 bits to hw register
118 * @hw: hardware information including registers
119 * @reg: register offset
120 * @value: vvalue to write to register
121 */
122 inline void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value)
123 {
124 writel(value, hw->hw_addr + reg);
125 }
126
127 /**
128 * i40iw_rd32 - read a 32 bit hw register
129 * @hw: hardware information including registers
130 * @reg: register offset
131 *
132 * Return value of register content
133 */
134 inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
135 {
136 return readl(hw->hw_addr + reg);
137 }
138
139 /**
140 * i40iw_inetaddr_event - system notifier for netdev events
141 * @notfier: not used
142 * @event: event for notifier
143 * @ptr: if address
144 */
145 int i40iw_inetaddr_event(struct notifier_block *notifier,
146 unsigned long event,
147 void *ptr)
148 {
149 struct in_ifaddr *ifa = ptr;
150 struct net_device *event_netdev = ifa->ifa_dev->dev;
151 struct net_device *netdev;
152 struct net_device *upper_dev;
153 struct i40iw_device *iwdev;
154 struct i40iw_handler *hdl;
155 u32 local_ipaddr;
156 u32 action = I40IW_ARP_ADD;
157
158 hdl = i40iw_find_netdev(event_netdev);
159 if (!hdl)
160 return NOTIFY_DONE;
161
162 iwdev = &hdl->device;
163 if (iwdev->init_state < INET_NOTIFIER)
164 return NOTIFY_DONE;
165
166 netdev = iwdev->ldev->netdev;
167 upper_dev = netdev_master_upper_dev_get(netdev);
168 if (netdev != event_netdev)
169 return NOTIFY_DONE;
170
171 if (upper_dev)
172 local_ipaddr = ntohl(
173 ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
174 else
175 local_ipaddr = ntohl(ifa->ifa_address);
176 switch (event) {
177 case NETDEV_DOWN:
178 action = I40IW_ARP_DELETE;
179 /* Fall through */
180 case NETDEV_UP:
181 /* Fall through */
182 case NETDEV_CHANGEADDR:
183 i40iw_manage_arp_cache(iwdev,
184 netdev->dev_addr,
185 &local_ipaddr,
186 true,
187 action);
188 i40iw_if_notify(iwdev, netdev, &local_ipaddr, true,
189 (action == I40IW_ARP_ADD) ? true : false);
190 break;
191 default:
192 break;
193 }
194 return NOTIFY_DONE;
195 }
196
197 /**
198 * i40iw_inet6addr_event - system notifier for ipv6 netdev events
199 * @notfier: not used
200 * @event: event for notifier
201 * @ptr: if address
202 */
203 int i40iw_inet6addr_event(struct notifier_block *notifier,
204 unsigned long event,
205 void *ptr)
206 {
207 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
208 struct net_device *event_netdev = ifa->idev->dev;
209 struct net_device *netdev;
210 struct i40iw_device *iwdev;
211 struct i40iw_handler *hdl;
212 u32 local_ipaddr6[4];
213 u32 action = I40IW_ARP_ADD;
214
215 hdl = i40iw_find_netdev(event_netdev);
216 if (!hdl)
217 return NOTIFY_DONE;
218
219 iwdev = &hdl->device;
220 if (iwdev->init_state < INET_NOTIFIER)
221 return NOTIFY_DONE;
222
223 netdev = iwdev->ldev->netdev;
224 if (netdev != event_netdev)
225 return NOTIFY_DONE;
226
227 i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
228 switch (event) {
229 case NETDEV_DOWN:
230 action = I40IW_ARP_DELETE;
231 /* Fall through */
232 case NETDEV_UP:
233 /* Fall through */
234 case NETDEV_CHANGEADDR:
235 i40iw_manage_arp_cache(iwdev,
236 netdev->dev_addr,
237 local_ipaddr6,
238 false,
239 action);
240 i40iw_if_notify(iwdev, netdev, local_ipaddr6, false,
241 (action == I40IW_ARP_ADD) ? true : false);
242 break;
243 default:
244 break;
245 }
246 return NOTIFY_DONE;
247 }
248
249 /**
250 * i40iw_net_event - system notifier for net events
251 * @notfier: not used
252 * @event: event for notifier
253 * @ptr: neighbor
254 */
255 int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *ptr)
256 {
257 struct neighbour *neigh = ptr;
258 struct i40iw_device *iwdev;
259 struct i40iw_handler *iwhdl;
260 __be32 *p;
261 u32 local_ipaddr[4];
262
263 switch (event) {
264 case NETEVENT_NEIGH_UPDATE:
265 iwhdl = i40iw_find_netdev((struct net_device *)neigh->dev);
266 if (!iwhdl)
267 return NOTIFY_DONE;
268 iwdev = &iwhdl->device;
269 if (iwdev->init_state < INET_NOTIFIER)
270 return NOTIFY_DONE;
271 p = (__be32 *)neigh->primary_key;
272 i40iw_copy_ip_ntohl(local_ipaddr, p);
273 if (neigh->nud_state & NUD_VALID) {
274 i40iw_manage_arp_cache(iwdev,
275 neigh->ha,
276 local_ipaddr,
277 false,
278 I40IW_ARP_ADD);
279
280 } else {
281 i40iw_manage_arp_cache(iwdev,
282 neigh->ha,
283 local_ipaddr,
284 false,
285 I40IW_ARP_DELETE);
286 }
287 break;
288 default:
289 break;
290 }
291 return NOTIFY_DONE;
292 }
293
294 /**
295 * i40iw_get_cqp_request - get cqp struct
296 * @cqp: device cqp ptr
297 * @wait: cqp to be used in wait mode
298 */
299 struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait)
300 {
301 struct i40iw_cqp_request *cqp_request = NULL;
302 unsigned long flags;
303
304 spin_lock_irqsave(&cqp->req_lock, flags);
305 if (!list_empty(&cqp->cqp_avail_reqs)) {
306 cqp_request = list_entry(cqp->cqp_avail_reqs.next,
307 struct i40iw_cqp_request, list);
308 list_del_init(&cqp_request->list);
309 }
310 spin_unlock_irqrestore(&cqp->req_lock, flags);
311 if (!cqp_request) {
312 cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
313 if (cqp_request) {
314 cqp_request->dynamic = true;
315 INIT_LIST_HEAD(&cqp_request->list);
316 init_waitqueue_head(&cqp_request->waitq);
317 }
318 }
319 if (!cqp_request) {
320 i40iw_pr_err("CQP Request Fail: No Memory");
321 return NULL;
322 }
323
324 if (wait) {
325 atomic_set(&cqp_request->refcount, 2);
326 cqp_request->waiting = true;
327 } else {
328 atomic_set(&cqp_request->refcount, 1);
329 }
330 return cqp_request;
331 }
332
333 /**
334 * i40iw_free_cqp_request - free cqp request
335 * @cqp: cqp ptr
336 * @cqp_request: to be put back in cqp list
337 */
338 void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
339 {
340 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
341 unsigned long flags;
342
343 if (cqp_request->dynamic) {
344 kfree(cqp_request);
345 } else {
346 cqp_request->request_done = false;
347 cqp_request->callback_fcn = NULL;
348 cqp_request->waiting = false;
349
350 spin_lock_irqsave(&cqp->req_lock, flags);
351 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
352 spin_unlock_irqrestore(&cqp->req_lock, flags);
353 }
354 wake_up(&iwdev->close_wq);
355 }
356
357 /**
358 * i40iw_put_cqp_request - dec ref count and free if 0
359 * @cqp: cqp ptr
360 * @cqp_request: to be put back in cqp list
361 */
362 void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
363 struct i40iw_cqp_request *cqp_request)
364 {
365 if (atomic_dec_and_test(&cqp_request->refcount))
366 i40iw_free_cqp_request(cqp, cqp_request);
367 }
368
369 /**
370 * i40iw_free_pending_cqp_request -free pending cqp request objs
371 * @cqp: cqp ptr
372 * @cqp_request: to be put back in cqp list
373 */
374 static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
375 struct i40iw_cqp_request *cqp_request)
376 {
377 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
378
379 if (cqp_request->waiting) {
380 cqp_request->compl_info.error = true;
381 cqp_request->request_done = true;
382 wake_up(&cqp_request->waitq);
383 }
384 i40iw_put_cqp_request(cqp, cqp_request);
385 wait_event_timeout(iwdev->close_wq,
386 !atomic_read(&cqp_request->refcount),
387 1000);
388 }
389
390 /**
391 * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
392 * @iwdev: iwarp device
393 */
394 void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
395 {
396 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
397 struct i40iw_cqp *cqp = &iwdev->cqp;
398 struct i40iw_cqp_request *cqp_request = NULL;
399 struct cqp_commands_info *pcmdinfo = NULL;
400 u32 i, pending_work, wqe_idx;
401
402 pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
403 wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
404 for (i = 0; i < pending_work; i++) {
405 cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
406 if (cqp_request)
407 i40iw_free_pending_cqp_request(cqp, cqp_request);
408 wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
409 }
410
411 while (!list_empty(&dev->cqp_cmd_head)) {
412 pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
413 cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);
414 if (cqp_request)
415 i40iw_free_pending_cqp_request(cqp, cqp_request);
416 }
417 }
418
419 /**
420 * i40iw_free_qp - callback after destroy cqp completes
421 * @cqp_request: cqp request for destroy qp
422 * @num: not used
423 */
424 static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
425 {
426 struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
427 struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
428 struct i40iw_device *iwdev;
429 u32 qp_num = iwqp->ibqp.qp_num;
430
431 iwdev = iwqp->iwdev;
432
433 i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
434 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
435 i40iw_rem_devusecount(iwdev);
436 }
437
438 /**
439 * i40iw_wait_event - wait for completion
440 * @iwdev: iwarp device
441 * @cqp_request: cqp request to wait
442 */
443 static int i40iw_wait_event(struct i40iw_device *iwdev,
444 struct i40iw_cqp_request *cqp_request)
445 {
446 struct cqp_commands_info *info = &cqp_request->info;
447 struct i40iw_cqp *iwcqp = &iwdev->cqp;
448 bool cqp_error = false;
449 int err_code = 0;
450 int timeout_ret = 0;
451
452 timeout_ret = wait_event_timeout(cqp_request->waitq,
453 cqp_request->request_done,
454 I40IW_EVENT_TIMEOUT);
455 if (!timeout_ret) {
456 i40iw_pr_err("error cqp command 0x%x timed out ret = %d\n",
457 info->cqp_cmd, timeout_ret);
458 err_code = -ETIME;
459 if (!iwdev->reset) {
460 iwdev->reset = true;
461 i40iw_request_reset(iwdev);
462 }
463 goto done;
464 }
465 cqp_error = cqp_request->compl_info.error;
466 if (cqp_error) {
467 i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n",
468 info->cqp_cmd, cqp_request->compl_info.maj_err_code,
469 cqp_request->compl_info.min_err_code);
470 err_code = -EPROTO;
471 goto done;
472 }
473 done:
474 i40iw_put_cqp_request(iwcqp, cqp_request);
475 return err_code;
476 }
477
478 /**
479 * i40iw_handle_cqp_op - process cqp command
480 * @iwdev: iwarp device
481 * @cqp_request: cqp request to process
482 */
483 enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
484 struct i40iw_cqp_request
485 *cqp_request)
486 {
487 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
488 enum i40iw_status_code status;
489 struct cqp_commands_info *info = &cqp_request->info;
490 int err_code = 0;
491
492 if (iwdev->reset) {
493 i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
494 return I40IW_ERR_CQP_COMPL_ERROR;
495 }
496
497 status = i40iw_process_cqp_cmd(dev, info);
498 if (status) {
499 i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
500 i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
501 return status;
502 }
503 if (cqp_request->waiting)
504 err_code = i40iw_wait_event(iwdev, cqp_request);
505 if (err_code)
506 status = I40IW_ERR_CQP_COMPL_ERROR;
507 return status;
508 }
509
510 /**
511 * i40iw_add_devusecount - add dev refcount
512 * @iwdev: dev for refcount
513 */
514 void i40iw_add_devusecount(struct i40iw_device *iwdev)
515 {
516 atomic64_inc(&iwdev->use_count);
517 }
518
519 /**
520 * i40iw_rem_devusecount - decrement refcount for dev
521 * @iwdev: device
522 */
523 void i40iw_rem_devusecount(struct i40iw_device *iwdev)
524 {
525 if (!atomic64_dec_and_test(&iwdev->use_count))
526 return;
527 wake_up(&iwdev->close_wq);
528 }
529
530 /**
531 * i40iw_add_pdusecount - add pd refcount
532 * @iwpd: pd for refcount
533 */
534 void i40iw_add_pdusecount(struct i40iw_pd *iwpd)
535 {
536 atomic_inc(&iwpd->usecount);
537 }
538
539 /**
540 * i40iw_rem_pdusecount - decrement refcount for pd and free if 0
541 * @iwpd: pd for refcount
542 * @iwdev: iwarp device
543 */
544 void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
545 {
546 if (!atomic_dec_and_test(&iwpd->usecount))
547 return;
548 i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id);
549 kfree(iwpd);
550 }
551
552 /**
553 * i40iw_add_ref - add refcount for qp
554 * @ibqp: iqarp qp
555 */
556 void i40iw_add_ref(struct ib_qp *ibqp)
557 {
558 struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
559
560 atomic_inc(&iwqp->refcount);
561 }
562
563 /**
564 * i40iw_rem_ref - rem refcount for qp and free if 0
565 * @ibqp: iqarp qp
566 */
567 void i40iw_rem_ref(struct ib_qp *ibqp)
568 {
569 struct i40iw_qp *iwqp;
570 enum i40iw_status_code status;
571 struct i40iw_cqp_request *cqp_request;
572 struct cqp_commands_info *cqp_info;
573 struct i40iw_device *iwdev;
574 u32 qp_num;
575 unsigned long flags;
576
577 iwqp = to_iwqp(ibqp);
578 iwdev = iwqp->iwdev;
579 spin_lock_irqsave(&iwdev->qptable_lock, flags);
580 if (!atomic_dec_and_test(&iwqp->refcount)) {
581 spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
582 return;
583 }
584
585 qp_num = iwqp->ibqp.qp_num;
586 iwdev->qp_table[qp_num] = NULL;
587 spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
588 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
589 if (!cqp_request)
590 return;
591
592 cqp_request->callback_fcn = i40iw_free_qp;
593 cqp_request->param = (void *)&iwqp->sc_qp;
594 cqp_info = &cqp_request->info;
595 cqp_info->cqp_cmd = OP_QP_DESTROY;
596 cqp_info->post_sq = 1;
597 cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
598 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
599 cqp_info->in.u.qp_destroy.remove_hash_idx = true;
600 status = i40iw_handle_cqp_op(iwdev, cqp_request);
601 if (!status)
602 return;
603
604 i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
605 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
606 i40iw_rem_devusecount(iwdev);
607 }
608
609 /**
610 * i40iw_get_qp - get qp address
611 * @device: iwarp device
612 * @qpn: qp number
613 */
614 struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn)
615 {
616 struct i40iw_device *iwdev = to_iwdev(device);
617
618 if ((qpn < IW_FIRST_QPN) || (qpn >= iwdev->max_qp))
619 return NULL;
620
621 return &iwdev->qp_table[qpn]->ibqp;
622 }
623
624 /**
625 * i40iw_debug_buf - print debug msg and buffer is mask set
626 * @dev: hardware control device structure
627 * @mask: mask to compare if to print debug buffer
628 * @buf: points buffer addr
629 * @size: saize of buffer to print
630 */
631 void i40iw_debug_buf(struct i40iw_sc_dev *dev,
632 enum i40iw_debug_flag mask,
633 char *desc,
634 u64 *buf,
635 u32 size)
636 {
637 u32 i;
638
639 if (!(dev->debug_mask & mask))
640 return;
641 i40iw_debug(dev, mask, "%s\n", desc);
642 i40iw_debug(dev, mask, "starting address virt=%p phy=%llxh\n", buf,
643 (unsigned long long)virt_to_phys(buf));
644
645 for (i = 0; i < size; i += 8)
646 i40iw_debug(dev, mask, "index %03d val: %016llx\n", i, buf[i / 8]);
647 }
648
649 /**
650 * i40iw_get_hw_addr - return hw addr
651 * @par: points to shared dev
652 */
653 u8 __iomem *i40iw_get_hw_addr(void *par)
654 {
655 struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)par;
656
657 return dev->hw->hw_addr;
658 }
659
660 /**
661 * i40iw_remove_head - return head entry and remove from list
662 * @list: list for entry
663 */
664 void *i40iw_remove_head(struct list_head *list)
665 {
666 struct list_head *entry;
667
668 if (list_empty(list))
669 return NULL;
670
671 entry = (void *)list->next;
672 list_del(entry);
673 return (void *)entry;
674 }
675
676 /**
677 * i40iw_allocate_dma_mem - Memory alloc helper fn
678 * @hw: pointer to the HW structure
679 * @mem: ptr to mem struct to fill out
680 * @size: size of memory requested
681 * @alignment: what to align the allocation to
682 */
683 enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
684 struct i40iw_dma_mem *mem,
685 u64 size,
686 u32 alignment)
687 {
688 struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
689
690 if (!mem)
691 return I40IW_ERR_PARAM;
692 mem->size = ALIGN(size, alignment);
693 mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size,
694 (dma_addr_t *)&mem->pa, GFP_KERNEL);
695 if (!mem->va)
696 return I40IW_ERR_NO_MEMORY;
697 return 0;
698 }
699
700 /**
701 * i40iw_free_dma_mem - Memory free helper fn
702 * @hw: pointer to the HW structure
703 * @mem: ptr to mem struct to free
704 */
705 void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem)
706 {
707 struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
708
709 if (!mem || !mem->va)
710 return;
711
712 dma_free_coherent(&pcidev->dev, mem->size,
713 mem->va, (dma_addr_t)mem->pa);
714 mem->va = NULL;
715 }
716
717 /**
718 * i40iw_allocate_virt_mem - virtual memory alloc helper fn
719 * @hw: pointer to the HW structure
720 * @mem: ptr to mem struct to fill out
721 * @size: size of memory requested
722 */
723 enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
724 struct i40iw_virt_mem *mem,
725 u32 size)
726 {
727 if (!mem)
728 return I40IW_ERR_PARAM;
729
730 mem->size = size;
731 mem->va = kzalloc(size, GFP_KERNEL);
732
733 if (mem->va)
734 return 0;
735 else
736 return I40IW_ERR_NO_MEMORY;
737 }
738
739 /**
740 * i40iw_free_virt_mem - virtual memory free helper fn
741 * @hw: pointer to the HW structure
742 * @mem: ptr to mem struct to free
743 */
744 enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
745 struct i40iw_virt_mem *mem)
746 {
747 if (!mem)
748 return I40IW_ERR_PARAM;
749 /*
750 * mem->va points to the parent of mem, so both mem and mem->va
751 * can not be touched once mem->va is freed
752 */
753 kfree(mem->va);
754 return 0;
755 }
756
757 /**
758 * i40iw_cqp_sds_cmd - create cqp command for sd
759 * @dev: hardware control device structure
760 * @sd_info: information for sd cqp
761 *
762 */
763 enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
764 struct i40iw_update_sds_info *sdinfo)
765 {
766 enum i40iw_status_code status;
767 struct i40iw_cqp_request *cqp_request;
768 struct cqp_commands_info *cqp_info;
769 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
770
771 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
772 if (!cqp_request)
773 return I40IW_ERR_NO_MEMORY;
774 cqp_info = &cqp_request->info;
775 memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
776 sizeof(cqp_info->in.u.update_pe_sds.info));
777 cqp_info->cqp_cmd = OP_UPDATE_PE_SDS;
778 cqp_info->post_sq = 1;
779 cqp_info->in.u.update_pe_sds.dev = dev;
780 cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
781 status = i40iw_handle_cqp_op(iwdev, cqp_request);
782 if (status)
783 i40iw_pr_err("CQP-OP Update SD's fail");
784 return status;
785 }
786
787 /**
788 * i40iw_qp_suspend_resume - cqp command for suspend/resume
789 * @dev: hardware control device structure
790 * @qp: hardware control qp
791 * @suspend: flag if suspend or resume
792 */
793 void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend)
794 {
795 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
796 struct i40iw_cqp_request *cqp_request;
797 struct i40iw_sc_cqp *cqp = dev->cqp;
798 struct cqp_commands_info *cqp_info;
799 enum i40iw_status_code status;
800
801 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
802 if (!cqp_request)
803 return;
804
805 cqp_info = &cqp_request->info;
806 cqp_info->cqp_cmd = (suspend) ? OP_SUSPEND : OP_RESUME;
807 cqp_info->in.u.suspend_resume.cqp = cqp;
808 cqp_info->in.u.suspend_resume.qp = qp;
809 cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
810 status = i40iw_handle_cqp_op(iwdev, cqp_request);
811 if (status)
812 i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
813 }
814
815 /**
816 * i40iw_term_modify_qp - modify qp for term message
817 * @qp: hardware control qp
818 * @next_state: qp's next state
819 * @term: terminate code
820 * @term_len: length
821 */
822 void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len)
823 {
824 struct i40iw_qp *iwqp;
825
826 iwqp = (struct i40iw_qp *)qp->back_qp;
827 i40iw_next_iw_state(iwqp, next_state, 0, term, term_len);
828 };
829
830 /**
831 * i40iw_terminate_done - after terminate is completed
832 * @qp: hardware control qp
833 * @timeout_occurred: indicates if terminate timer expired
834 */
835 void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
836 {
837 struct i40iw_qp *iwqp;
838 u32 next_iwarp_state = I40IW_QP_STATE_ERROR;
839 u8 hte = 0;
840 bool first_time;
841 unsigned long flags;
842
843 iwqp = (struct i40iw_qp *)qp->back_qp;
844 spin_lock_irqsave(&iwqp->lock, flags);
845 if (iwqp->hte_added) {
846 iwqp->hte_added = 0;
847 hte = 1;
848 }
849 first_time = !(qp->term_flags & I40IW_TERM_DONE);
850 qp->term_flags |= I40IW_TERM_DONE;
851 spin_unlock_irqrestore(&iwqp->lock, flags);
852 if (first_time) {
853 if (!timeout_occurred)
854 i40iw_terminate_del_timer(qp);
855 else
856 next_iwarp_state = I40IW_QP_STATE_CLOSING;
857
858 i40iw_next_iw_state(iwqp, next_iwarp_state, hte, 0, 0);
859 i40iw_cm_disconn(iwqp);
860 }
861 }
862
863 /**
864 * i40iw_terminate_imeout - timeout happened
865 * @context: points to iwarp qp
866 */
867 static void i40iw_terminate_timeout(unsigned long context)
868 {
869 struct i40iw_qp *iwqp = (struct i40iw_qp *)context;
870 struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
871
872 i40iw_terminate_done(qp, 1);
873 i40iw_rem_ref(&iwqp->ibqp);
874 }
875
876 /**
877 * i40iw_terminate_start_timer - start terminate timeout
878 * @qp: hardware control qp
879 */
880 void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
881 {
882 struct i40iw_qp *iwqp;
883
884 iwqp = (struct i40iw_qp *)qp->back_qp;
885 i40iw_add_ref(&iwqp->ibqp);
886 setup_timer(&iwqp->terminate_timer, i40iw_terminate_timeout,
887 (unsigned long)iwqp);
888 iwqp->terminate_timer.expires = jiffies + HZ;
889 add_timer(&iwqp->terminate_timer);
890 }
891
892 /**
893 * i40iw_terminate_del_timer - delete terminate timeout
894 * @qp: hardware control qp
895 */
896 void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
897 {
898 struct i40iw_qp *iwqp;
899
900 iwqp = (struct i40iw_qp *)qp->back_qp;
901 if (del_timer(&iwqp->terminate_timer))
902 i40iw_rem_ref(&iwqp->ibqp);
903 }
904
905 /**
906 * i40iw_cqp_generic_worker - generic worker for cqp
907 * @work: work pointer
908 */
909 static void i40iw_cqp_generic_worker(struct work_struct *work)
910 {
911 struct i40iw_virtchnl_work_info *work_info =
912 &((struct virtchnl_work *)work)->work_info;
913
914 if (work_info->worker_vf_dev)
915 work_info->callback_fcn(work_info->worker_vf_dev);
916 }
917
918 /**
919 * i40iw_cqp_spawn_worker - spawn worket thread
920 * @iwdev: device struct pointer
921 * @work_info: work request info
922 * @iw_vf_idx: virtual function index
923 */
924 void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
925 struct i40iw_virtchnl_work_info *work_info,
926 u32 iw_vf_idx)
927 {
928 struct virtchnl_work *work;
929 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
930
931 work = &iwdev->virtchnl_w[iw_vf_idx];
932 memcpy(&work->work_info, work_info, sizeof(*work_info));
933 INIT_WORK(&work->work, i40iw_cqp_generic_worker);
934 queue_work(iwdev->virtchnl_wq, &work->work);
935 }
936
937 /**
938 * i40iw_cqp_manage_hmc_fcn_worker -
939 * @work: work pointer for hmc info
940 */
941 static void i40iw_cqp_manage_hmc_fcn_worker(struct work_struct *work)
942 {
943 struct i40iw_cqp_request *cqp_request =
944 ((struct virtchnl_work *)work)->cqp_request;
945 struct i40iw_ccq_cqe_info ccq_cqe_info;
946 struct i40iw_hmc_fcn_info *hmcfcninfo =
947 &cqp_request->info.in.u.manage_hmc_pm.info;
948 struct i40iw_device *iwdev =
949 (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->back_dev;
950
951 ccq_cqe_info.cqp = NULL;
952 ccq_cqe_info.maj_err_code = cqp_request->compl_info.maj_err_code;
953 ccq_cqe_info.min_err_code = cqp_request->compl_info.min_err_code;
954 ccq_cqe_info.op_code = cqp_request->compl_info.op_code;
955 ccq_cqe_info.op_ret_val = cqp_request->compl_info.op_ret_val;
956 ccq_cqe_info.scratch = 0;
957 ccq_cqe_info.error = cqp_request->compl_info.error;
958 hmcfcninfo->callback_fcn(cqp_request->info.in.u.manage_hmc_pm.dev,
959 hmcfcninfo->cqp_callback_param, &ccq_cqe_info);
960 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
961 }
962
963 /**
964 * i40iw_cqp_manage_hmc_fcn_callback - called function after cqp completion
965 * @cqp_request: cqp request info struct for hmc fun
966 * @unused: unused param of callback
967 */
968 static void i40iw_cqp_manage_hmc_fcn_callback(struct i40iw_cqp_request *cqp_request,
969 u32 unused)
970 {
971 struct virtchnl_work *work;
972 struct i40iw_hmc_fcn_info *hmcfcninfo =
973 &cqp_request->info.in.u.manage_hmc_pm.info;
974 struct i40iw_device *iwdev =
975 (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->
976 back_dev;
977
978 if (hmcfcninfo && hmcfcninfo->callback_fcn) {
979 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s1\n", __func__);
980 atomic_inc(&cqp_request->refcount);
981 work = &iwdev->virtchnl_w[hmcfcninfo->iw_vf_idx];
982 work->cqp_request = cqp_request;
983 INIT_WORK(&work->work, i40iw_cqp_manage_hmc_fcn_worker);
984 queue_work(iwdev->virtchnl_wq, &work->work);
985 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s2\n", __func__);
986 } else {
987 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s: Something wrong\n", __func__);
988 }
989 }
990
991 /**
992 * i40iw_cqp_manage_hmc_fcn_cmd - issue cqp command to manage hmc
993 * @dev: hardware control device structure
994 * @hmcfcninfo: info for hmc
995 */
996 enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
997 struct i40iw_hmc_fcn_info *hmcfcninfo)
998 {
999 enum i40iw_status_code status;
1000 struct i40iw_cqp_request *cqp_request;
1001 struct cqp_commands_info *cqp_info;
1002 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1003
1004 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s\n", __func__);
1005 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
1006 if (!cqp_request)
1007 return I40IW_ERR_NO_MEMORY;
1008 cqp_info = &cqp_request->info;
1009 cqp_request->callback_fcn = i40iw_cqp_manage_hmc_fcn_callback;
1010 cqp_request->param = hmcfcninfo;
1011 memcpy(&cqp_info->in.u.manage_hmc_pm.info, hmcfcninfo,
1012 sizeof(*hmcfcninfo));
1013 cqp_info->in.u.manage_hmc_pm.dev = dev;
1014 cqp_info->cqp_cmd = OP_MANAGE_HMC_PM_FUNC_TABLE;
1015 cqp_info->post_sq = 1;
1016 cqp_info->in.u.manage_hmc_pm.scratch = (uintptr_t)cqp_request;
1017 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1018 if (status)
1019 i40iw_pr_err("CQP-OP Manage HMC fail");
1020 return status;
1021 }
1022
1023 /**
1024 * i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm
1025 * @iwdev: function device struct
1026 * @values_mem: buffer for fpm
1027 * @hmc_fn_id: function id for fpm
1028 */
1029 enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
1030 struct i40iw_dma_mem *values_mem,
1031 u8 hmc_fn_id)
1032 {
1033 enum i40iw_status_code status;
1034 struct i40iw_cqp_request *cqp_request;
1035 struct cqp_commands_info *cqp_info;
1036 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1037
1038 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1039 if (!cqp_request)
1040 return I40IW_ERR_NO_MEMORY;
1041 cqp_info = &cqp_request->info;
1042 cqp_request->param = NULL;
1043 cqp_info->in.u.query_fpm_values.cqp = dev->cqp;
1044 cqp_info->in.u.query_fpm_values.fpm_values_pa = values_mem->pa;
1045 cqp_info->in.u.query_fpm_values.fpm_values_va = values_mem->va;
1046 cqp_info->in.u.query_fpm_values.hmc_fn_id = hmc_fn_id;
1047 cqp_info->cqp_cmd = OP_QUERY_FPM_VALUES;
1048 cqp_info->post_sq = 1;
1049 cqp_info->in.u.query_fpm_values.scratch = (uintptr_t)cqp_request;
1050 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1051 if (status)
1052 i40iw_pr_err("CQP-OP Query FPM fail");
1053 return status;
1054 }
1055
1056 /**
1057 * i40iw_cqp_commit_fpm_values_cmd - commit fpm values in hw
1058 * @dev: hardware control device structure
1059 * @values_mem: buffer with fpm values
1060 * @hmc_fn_id: function id for fpm
1061 */
1062 enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
1063 struct i40iw_dma_mem *values_mem,
1064 u8 hmc_fn_id)
1065 {
1066 enum i40iw_status_code status;
1067 struct i40iw_cqp_request *cqp_request;
1068 struct cqp_commands_info *cqp_info;
1069 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1070
1071 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1072 if (!cqp_request)
1073 return I40IW_ERR_NO_MEMORY;
1074 cqp_info = &cqp_request->info;
1075 cqp_request->param = NULL;
1076 cqp_info->in.u.commit_fpm_values.cqp = dev->cqp;
1077 cqp_info->in.u.commit_fpm_values.fpm_values_pa = values_mem->pa;
1078 cqp_info->in.u.commit_fpm_values.fpm_values_va = values_mem->va;
1079 cqp_info->in.u.commit_fpm_values.hmc_fn_id = hmc_fn_id;
1080 cqp_info->cqp_cmd = OP_COMMIT_FPM_VALUES;
1081 cqp_info->post_sq = 1;
1082 cqp_info->in.u.commit_fpm_values.scratch = (uintptr_t)cqp_request;
1083 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1084 if (status)
1085 i40iw_pr_err("CQP-OP Commit FPM fail");
1086 return status;
1087 }
1088
1089 /**
1090 * i40iw_vf_wait_vchnl_resp - wait for channel msg
1091 * @iwdev: function's device struct
1092 */
1093 enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
1094 {
1095 struct i40iw_device *iwdev = dev->back_dev;
1096 int timeout_ret;
1097
1098 i40iw_debug(dev, I40IW_DEBUG_VIRT, "%s[%u] dev %p, iwdev %p\n",
1099 __func__, __LINE__, dev, iwdev);
1100
1101 atomic_set(&iwdev->vchnl_msgs, 2);
1102 timeout_ret = wait_event_timeout(iwdev->vchnl_waitq,
1103 (atomic_read(&iwdev->vchnl_msgs) == 1),
1104 I40IW_VCHNL_EVENT_TIMEOUT);
1105 atomic_dec(&iwdev->vchnl_msgs);
1106 if (!timeout_ret) {
1107 i40iw_pr_err("virt channel completion timeout = 0x%x\n", timeout_ret);
1108 atomic_set(&iwdev->vchnl_msgs, 0);
1109 dev->vchnl_up = false;
1110 return I40IW_ERR_TIMEOUT;
1111 }
1112 wake_up(&dev->vf_reqs);
1113 return 0;
1114 }
1115
1116 /**
1117 * i40iw_cqp_cq_create_cmd - create a cq for the cqp
1118 * @dev: device pointer
1119 * @cq: pointer to created cq
1120 */
1121 enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev,
1122 struct i40iw_sc_cq *cq)
1123 {
1124 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1125 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1126 struct i40iw_cqp_request *cqp_request;
1127 struct cqp_commands_info *cqp_info;
1128 enum i40iw_status_code status;
1129
1130 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1131 if (!cqp_request)
1132 return I40IW_ERR_NO_MEMORY;
1133
1134 cqp_info = &cqp_request->info;
1135 cqp_info->cqp_cmd = OP_CQ_CREATE;
1136 cqp_info->post_sq = 1;
1137 cqp_info->in.u.cq_create.cq = cq;
1138 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1139 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1140 if (status)
1141 i40iw_pr_err("CQP-OP Create QP fail");
1142
1143 return status;
1144 }
1145
1146 /**
1147 * i40iw_cqp_qp_create_cmd - create a qp for the cqp
1148 * @dev: device pointer
1149 * @qp: pointer to created qp
1150 */
1151 enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev,
1152 struct i40iw_sc_qp *qp)
1153 {
1154 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1155 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1156 struct i40iw_cqp_request *cqp_request;
1157 struct cqp_commands_info *cqp_info;
1158 struct i40iw_create_qp_info *qp_info;
1159 enum i40iw_status_code status;
1160
1161 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1162 if (!cqp_request)
1163 return I40IW_ERR_NO_MEMORY;
1164
1165 cqp_info = &cqp_request->info;
1166 qp_info = &cqp_request->info.in.u.qp_create.info;
1167
1168 memset(qp_info, 0, sizeof(*qp_info));
1169
1170 qp_info->cq_num_valid = true;
1171 qp_info->next_iwarp_state = I40IW_QP_STATE_RTS;
1172
1173 cqp_info->cqp_cmd = OP_QP_CREATE;
1174 cqp_info->post_sq = 1;
1175 cqp_info->in.u.qp_create.qp = qp;
1176 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
1177 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1178 if (status)
1179 i40iw_pr_err("CQP-OP QP create fail");
1180 return status;
1181 }
1182
1183 /**
1184 * i40iw_cqp_cq_destroy_cmd - destroy the cqp cq
1185 * @dev: device pointer
1186 * @cq: pointer to cq
1187 */
1188 void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
1189 {
1190 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1191
1192 i40iw_cq_wq_destroy(iwdev, cq);
1193 }
1194
1195 /**
1196 * i40iw_cqp_qp_destroy_cmd - destroy the cqp
1197 * @dev: device pointer
1198 * @qp: pointer to qp
1199 */
1200 void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
1201 {
1202 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1203 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1204 struct i40iw_cqp_request *cqp_request;
1205 struct cqp_commands_info *cqp_info;
1206 enum i40iw_status_code status;
1207
1208 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1209 if (!cqp_request)
1210 return;
1211
1212 cqp_info = &cqp_request->info;
1213 memset(cqp_info, 0, sizeof(*cqp_info));
1214
1215 cqp_info->cqp_cmd = OP_QP_DESTROY;
1216 cqp_info->post_sq = 1;
1217 cqp_info->in.u.qp_destroy.qp = qp;
1218 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
1219 cqp_info->in.u.qp_destroy.remove_hash_idx = true;
1220 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1221 if (status)
1222 i40iw_pr_err("CQP QP_DESTROY fail");
1223 }
1224
1225
1226 /**
1227 * i40iw_ieq_mpa_crc_ae - generate AE for crc error
1228 * @dev: hardware control device structure
1229 * @qp: hardware control qp
1230 */
1231 void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
1232 {
1233 struct i40iw_qp_flush_info info;
1234 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1235
1236 i40iw_debug(dev, I40IW_DEBUG_AEQ, "%s entered\n", __func__);
1237 memset(&info, 0, sizeof(info));
1238 info.ae_code = I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR;
1239 info.generate_ae = true;
1240 info.ae_source = 0x3;
1241 (void)i40iw_hw_flush_wqes(iwdev, qp, &info, false);
1242 }
1243
1244 /**
1245 * i40iw_init_hash_desc - initialize hash for crc calculation
1246 * @desc: cryption type
1247 */
1248 enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc)
1249 {
1250 struct crypto_shash *tfm;
1251 struct shash_desc *tdesc;
1252
1253 tfm = crypto_alloc_shash("crc32c", 0, 0);
1254 if (IS_ERR(tfm))
1255 return I40IW_ERR_MPA_CRC;
1256
1257 tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
1258 GFP_KERNEL);
1259 if (!tdesc) {
1260 crypto_free_shash(tfm);
1261 return I40IW_ERR_MPA_CRC;
1262 }
1263 tdesc->tfm = tfm;
1264 *desc = tdesc;
1265
1266 return 0;
1267 }
1268
1269 /**
1270 * i40iw_free_hash_desc - free hash desc
1271 * @desc: to be freed
1272 */
1273 void i40iw_free_hash_desc(struct shash_desc *desc)
1274 {
1275 if (desc) {
1276 crypto_free_shash(desc->tfm);
1277 kfree(desc);
1278 }
1279 }
1280
1281 /**
1282 * i40iw_alloc_query_fpm_buf - allocate buffer for fpm
1283 * @dev: hardware control device structure
1284 * @mem: buffer ptr for fpm to be allocated
1285 * @return: memory allocation status
1286 */
1287 enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
1288 struct i40iw_dma_mem *mem)
1289 {
1290 enum i40iw_status_code status;
1291 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1292
1293 status = i40iw_obj_aligned_mem(iwdev, mem, I40IW_QUERY_FPM_BUF_SIZE,
1294 I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
1295 return status;
1296 }
1297
1298 /**
1299 * i40iw_ieq_check_mpacrc - check if mpa crc is OK
1300 * @desc: desc for hash
1301 * @addr: address of buffer for crc
1302 * @length: length of buffer
1303 * @value: value to be compared
1304 */
1305 enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
1306 void *addr,
1307 u32 length,
1308 u32 value)
1309 {
1310 u32 crc = 0;
1311 int ret;
1312 enum i40iw_status_code ret_code = 0;
1313
1314 crypto_shash_init(desc);
1315 ret = crypto_shash_update(desc, addr, length);
1316 if (!ret)
1317 crypto_shash_final(desc, (u8 *)&crc);
1318 if (crc != value) {
1319 i40iw_pr_err("mpa crc check fail\n");
1320 ret_code = I40IW_ERR_MPA_CRC;
1321 }
1322 return ret_code;
1323 }
1324
1325 /**
1326 * i40iw_ieq_get_qp - get qp based on quad in puda buffer
1327 * @dev: hardware control device structure
1328 * @buf: receive puda buffer on exception q
1329 */
1330 struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
1331 struct i40iw_puda_buf *buf)
1332 {
1333 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1334 struct i40iw_qp *iwqp;
1335 struct i40iw_cm_node *cm_node;
1336 u32 loc_addr[4], rem_addr[4];
1337 u16 loc_port, rem_port;
1338 struct ipv6hdr *ip6h;
1339 struct iphdr *iph = (struct iphdr *)buf->iph;
1340 struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1341
1342 if (iph->version == 4) {
1343 memset(loc_addr, 0, sizeof(loc_addr));
1344 loc_addr[0] = ntohl(iph->daddr);
1345 memset(rem_addr, 0, sizeof(rem_addr));
1346 rem_addr[0] = ntohl(iph->saddr);
1347 } else {
1348 ip6h = (struct ipv6hdr *)buf->iph;
1349 i40iw_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
1350 i40iw_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
1351 }
1352 loc_port = ntohs(tcph->dest);
1353 rem_port = ntohs(tcph->source);
1354
1355 cm_node = i40iw_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
1356 loc_addr, false);
1357 if (!cm_node)
1358 return NULL;
1359 iwqp = cm_node->iwqp;
1360 return &iwqp->sc_qp;
1361 }
1362
1363 /**
1364 * i40iw_ieq_update_tcpip_info - update tcpip in the buffer
1365 * @buf: puda to update
1366 * @length: length of buffer
1367 * @seqnum: seq number for tcp
1368 */
1369 void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum)
1370 {
1371 struct tcphdr *tcph;
1372 struct iphdr *iph;
1373 u16 iphlen;
1374 u16 packetsize;
1375 u8 *addr = (u8 *)buf->mem.va;
1376
1377 iphlen = (buf->ipv4) ? 20 : 40;
1378 iph = (struct iphdr *)(addr + buf->maclen);
1379 tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
1380 packetsize = length + buf->tcphlen + iphlen;
1381
1382 iph->tot_len = htons(packetsize);
1383 tcph->seq = htonl(seqnum);
1384 }
1385
1386 /**
1387 * i40iw_puda_get_tcpip_info - get tcpip info from puda buffer
1388 * @info: to get information
1389 * @buf: puda buffer
1390 */
1391 enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
1392 struct i40iw_puda_buf *buf)
1393 {
1394 struct iphdr *iph;
1395 struct ipv6hdr *ip6h;
1396 struct tcphdr *tcph;
1397 u16 iphlen;
1398 u16 pkt_len;
1399 u8 *mem = (u8 *)buf->mem.va;
1400 struct ethhdr *ethh = (struct ethhdr *)buf->mem.va;
1401
1402 if (ethh->h_proto == htons(0x8100)) {
1403 info->vlan_valid = true;
1404 buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & VLAN_VID_MASK;
1405 }
1406 buf->maclen = (info->vlan_valid) ? 18 : 14;
1407 iphlen = (info->l3proto) ? 40 : 20;
1408 buf->ipv4 = (info->l3proto) ? false : true;
1409 buf->iph = mem + buf->maclen;
1410 iph = (struct iphdr *)buf->iph;
1411
1412 buf->tcph = buf->iph + iphlen;
1413 tcph = (struct tcphdr *)buf->tcph;
1414
1415 if (buf->ipv4) {
1416 pkt_len = ntohs(iph->tot_len);
1417 } else {
1418 ip6h = (struct ipv6hdr *)buf->iph;
1419 pkt_len = ntohs(ip6h->payload_len) + iphlen;
1420 }
1421
1422 buf->totallen = pkt_len + buf->maclen;
1423
1424 if (info->payload_len < buf->totallen) {
1425 i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n",
1426 info->payload_len, buf->totallen);
1427 return I40IW_ERR_INVALID_SIZE;
1428 }
1429
1430 buf->tcphlen = (tcph->doff) << 2;
1431 buf->datalen = pkt_len - iphlen - buf->tcphlen;
1432 buf->data = (buf->datalen) ? buf->tcph + buf->tcphlen : NULL;
1433 buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
1434 buf->seqnum = ntohl(tcph->seq);
1435 return 0;
1436 }
1437
1438 /**
1439 * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
1440 * @vsi: pointer to the vsi structure
1441 */
1442 static void i40iw_hw_stats_timeout(unsigned long vsi)
1443 {
1444 struct i40iw_sc_vsi *sc_vsi = (struct i40iw_sc_vsi *)vsi;
1445 struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
1446 struct i40iw_vsi_pestat *pf_devstat = sc_vsi->pestat;
1447 struct i40iw_vsi_pestat *vf_devstat = NULL;
1448 u16 iw_vf_idx;
1449 unsigned long flags;
1450
1451 /*PF*/
1452 i40iw_hw_stats_read_all(pf_devstat, &pf_devstat->hw_stats);
1453
1454 for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
1455 spin_lock_irqsave(&pf_devstat->lock, flags);
1456 if (pf_dev->vf_dev[iw_vf_idx]) {
1457 if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {
1458 vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->pestat;
1459 i40iw_hw_stats_read_all(vf_devstat, &vf_devstat->hw_stats);
1460 }
1461 }
1462 spin_unlock_irqrestore(&pf_devstat->lock, flags);
1463 }
1464
1465 mod_timer(&pf_devstat->stats_timer,
1466 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1467 }
1468
1469 /**
1470 * i40iw_hw_stats_start_timer - Start periodic stats timer
1471 * @vsi: pointer to the vsi structure
1472 */
1473 void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
1474 {
1475 struct i40iw_vsi_pestat *devstat = vsi->pestat;
1476
1477 setup_timer(&devstat->stats_timer, i40iw_hw_stats_timeout,
1478 (unsigned long)vsi);
1479 mod_timer(&devstat->stats_timer,
1480 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1481 }
1482
1483 /**
1484 * i40iw_hw_stats_stop_timer - Delete periodic stats timer
1485 * @vsi: pointer to the vsi structure
1486 */
1487 void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi)
1488 {
1489 struct i40iw_vsi_pestat *devstat = vsi->pestat;
1490
1491 del_timer_sync(&devstat->stats_timer);
1492 }