]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/infiniband/hw/i40iw/i40iw_utils.c
Merge tag 'reset-fixes-for-4.14' of git://git.pengutronix.de/git/pza/linux into fixes
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / hw / i40iw / i40iw_utils.c
CommitLineData
4e9042e6
FL
1/*******************************************************************************
2*
3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4*
5* This software is available to you under a choice of one of two
6* licenses. You may choose to be licensed under the terms of the GNU
7* General Public License (GPL) Version 2, available from the file
8* COPYING in the main directory of this source tree, or the
9* OpenFabrics.org BSD license below:
10*
11* Redistribution and use in source and binary forms, with or
12* without modification, are permitted provided that the following
13* conditions are met:
14*
15* - Redistributions of source code must retain the above
16* copyright notice, this list of conditions and the following
17* disclaimer.
18*
19* - Redistributions in binary form must reproduce the above
20* copyright notice, this list of conditions and the following
21* disclaimer in the documentation and/or other materials
22* provided with the distribution.
23*
24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31* SOFTWARE.
32*
33*******************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#include <linux/if_vlan.h>
42#include <linux/crc32.h>
43#include <linux/in.h>
44#include <linux/ip.h>
45#include <linux/tcp.h>
46#include <linux/init.h>
47#include <linux/io.h>
48#include <asm/irq.h>
49#include <asm/byteorder.h>
50#include <net/netevent.h>
51#include <net/neighbour.h>
52#include "i40iw.h"
53
54/**
55 * i40iw_arp_table - manage arp table
56 * @iwdev: iwarp device
57 * @ip_addr: ip address for device
58 * @mac_addr: mac address ptr
59 * @action: modify, delete or add
60 */
61int i40iw_arp_table(struct i40iw_device *iwdev,
20c61f7e 62 u32 *ip_addr,
4e9042e6
FL
63 bool ipv4,
64 u8 *mac_addr,
65 u32 action)
66{
67 int arp_index;
68 int err;
69 u32 ip[4];
70
71 if (ipv4) {
72 memset(ip, 0, sizeof(ip));
73 ip[0] = *ip_addr;
74 } else {
75 memcpy(ip, ip_addr, sizeof(ip));
76 }
77
78 for (arp_index = 0; (u32)arp_index < iwdev->arp_table_size; arp_index++)
79 if (memcmp(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)) == 0)
80 break;
81 switch (action) {
82 case I40IW_ARP_ADD:
83 if (arp_index != iwdev->arp_table_size)
84 return -1;
85
86 arp_index = 0;
87 err = i40iw_alloc_resource(iwdev, iwdev->allocated_arps,
88 iwdev->arp_table_size,
89 (u32 *)&arp_index,
90 &iwdev->next_arp_index);
91
92 if (err)
93 return err;
94
95 memcpy(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip));
96 ether_addr_copy(iwdev->arp_table[arp_index].mac_addr, mac_addr);
97 break;
98 case I40IW_ARP_RESOLVE:
99 if (arp_index == iwdev->arp_table_size)
100 return -1;
101 break;
102 case I40IW_ARP_DELETE:
103 if (arp_index == iwdev->arp_table_size)
104 return -1;
105 memset(iwdev->arp_table[arp_index].ip_addr, 0,
106 sizeof(iwdev->arp_table[arp_index].ip_addr));
107 eth_zero_addr(iwdev->arp_table[arp_index].mac_addr);
108 i40iw_free_resource(iwdev, iwdev->allocated_arps, arp_index);
109 break;
110 default:
111 return -1;
112 }
113 return arp_index;
114}
115
116/**
117 * i40iw_wr32 - write 32 bits to hw register
118 * @hw: hardware information including registers
119 * @reg: register offset
120 * @value: vvalue to write to register
121 */
122inline void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value)
123{
124 writel(value, hw->hw_addr + reg);
125}
126
127/**
128 * i40iw_rd32 - read a 32 bit hw register
129 * @hw: hardware information including registers
130 * @reg: register offset
131 *
132 * Return value of register content
133 */
134inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
135{
136 return readl(hw->hw_addr + reg);
137}
138
139/**
140 * i40iw_inetaddr_event - system notifier for netdev events
141 * @notfier: not used
142 * @event: event for notifier
143 * @ptr: if address
144 */
145int i40iw_inetaddr_event(struct notifier_block *notifier,
146 unsigned long event,
147 void *ptr)
148{
149 struct in_ifaddr *ifa = ptr;
150 struct net_device *event_netdev = ifa->ifa_dev->dev;
151 struct net_device *netdev;
152 struct net_device *upper_dev;
153 struct i40iw_device *iwdev;
154 struct i40iw_handler *hdl;
20c61f7e 155 u32 local_ipaddr;
e5e74b61 156 u32 action = I40IW_ARP_ADD;
4e9042e6
FL
157
158 hdl = i40iw_find_netdev(event_netdev);
159 if (!hdl)
160 return NOTIFY_DONE;
161
162 iwdev = &hdl->device;
47fb3c16 163 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
871a8623
SS
164 return NOTIFY_DONE;
165
4e9042e6
FL
166 netdev = iwdev->ldev->netdev;
167 upper_dev = netdev_master_upper_dev_get(netdev);
168 if (netdev != event_netdev)
169 return NOTIFY_DONE;
170
e5e74b61
MI
171 if (upper_dev)
172 local_ipaddr = ntohl(
173 ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
174 else
175 local_ipaddr = ntohl(ifa->ifa_address);
4e9042e6
FL
176 switch (event) {
177 case NETDEV_DOWN:
e5e74b61
MI
178 action = I40IW_ARP_DELETE;
179 /* Fall through */
4e9042e6 180 case NETDEV_UP:
e5e74b61 181 /* Fall through */
4e9042e6 182 case NETDEV_CHANGEADDR:
4e9042e6
FL
183 i40iw_manage_arp_cache(iwdev,
184 netdev->dev_addr,
185 &local_ipaddr,
186 true,
e5e74b61
MI
187 action);
188 i40iw_if_notify(iwdev, netdev, &local_ipaddr, true,
189 (action == I40IW_ARP_ADD) ? true : false);
4e9042e6
FL
190 break;
191 default:
192 break;
193 }
194 return NOTIFY_DONE;
195}
196
197/**
198 * i40iw_inet6addr_event - system notifier for ipv6 netdev events
199 * @notfier: not used
200 * @event: event for notifier
201 * @ptr: if address
202 */
203int i40iw_inet6addr_event(struct notifier_block *notifier,
204 unsigned long event,
205 void *ptr)
206{
207 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
208 struct net_device *event_netdev = ifa->idev->dev;
209 struct net_device *netdev;
210 struct i40iw_device *iwdev;
211 struct i40iw_handler *hdl;
20c61f7e 212 u32 local_ipaddr6[4];
e5e74b61 213 u32 action = I40IW_ARP_ADD;
4e9042e6
FL
214
215 hdl = i40iw_find_netdev(event_netdev);
216 if (!hdl)
217 return NOTIFY_DONE;
218
219 iwdev = &hdl->device;
47fb3c16 220 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
871a8623
SS
221 return NOTIFY_DONE;
222
4e9042e6
FL
223 netdev = iwdev->ldev->netdev;
224 if (netdev != event_netdev)
225 return NOTIFY_DONE;
226
e5e74b61 227 i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
4e9042e6
FL
228 switch (event) {
229 case NETDEV_DOWN:
e5e74b61
MI
230 action = I40IW_ARP_DELETE;
231 /* Fall through */
4e9042e6
FL
232 case NETDEV_UP:
233 /* Fall through */
234 case NETDEV_CHANGEADDR:
4e9042e6
FL
235 i40iw_manage_arp_cache(iwdev,
236 netdev->dev_addr,
237 local_ipaddr6,
238 false,
e5e74b61
MI
239 action);
240 i40iw_if_notify(iwdev, netdev, local_ipaddr6, false,
241 (action == I40IW_ARP_ADD) ? true : false);
4e9042e6
FL
242 break;
243 default:
244 break;
245 }
246 return NOTIFY_DONE;
247}
248
249/**
250 * i40iw_net_event - system notifier for net events
251 * @notfier: not used
252 * @event: event for notifier
253 * @ptr: neighbor
254 */
255int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *ptr)
256{
257 struct neighbour *neigh = ptr;
258 struct i40iw_device *iwdev;
259 struct i40iw_handler *iwhdl;
260 __be32 *p;
261 u32 local_ipaddr[4];
262
263 switch (event) {
264 case NETEVENT_NEIGH_UPDATE:
265 iwhdl = i40iw_find_netdev((struct net_device *)neigh->dev);
266 if (!iwhdl)
267 return NOTIFY_DONE;
268 iwdev = &iwhdl->device;
47fb3c16 269 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
871a8623 270 return NOTIFY_DONE;
4e9042e6
FL
271 p = (__be32 *)neigh->primary_key;
272 i40iw_copy_ip_ntohl(local_ipaddr, p);
273 if (neigh->nud_state & NUD_VALID) {
274 i40iw_manage_arp_cache(iwdev,
275 neigh->ha,
276 local_ipaddr,
277 false,
278 I40IW_ARP_ADD);
279
280 } else {
281 i40iw_manage_arp_cache(iwdev,
282 neigh->ha,
283 local_ipaddr,
284 false,
285 I40IW_ARP_DELETE);
286 }
287 break;
288 default:
289 break;
290 }
291 return NOTIFY_DONE;
292}
293
294/**
295 * i40iw_get_cqp_request - get cqp struct
296 * @cqp: device cqp ptr
297 * @wait: cqp to be used in wait mode
298 */
299struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait)
300{
301 struct i40iw_cqp_request *cqp_request = NULL;
302 unsigned long flags;
303
304 spin_lock_irqsave(&cqp->req_lock, flags);
305 if (!list_empty(&cqp->cqp_avail_reqs)) {
306 cqp_request = list_entry(cqp->cqp_avail_reqs.next,
307 struct i40iw_cqp_request, list);
308 list_del_init(&cqp_request->list);
309 }
310 spin_unlock_irqrestore(&cqp->req_lock, flags);
311 if (!cqp_request) {
312 cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
313 if (cqp_request) {
314 cqp_request->dynamic = true;
315 INIT_LIST_HEAD(&cqp_request->list);
316 init_waitqueue_head(&cqp_request->waitq);
317 }
318 }
319 if (!cqp_request) {
320 i40iw_pr_err("CQP Request Fail: No Memory");
321 return NULL;
322 }
323
324 if (wait) {
325 atomic_set(&cqp_request->refcount, 2);
326 cqp_request->waiting = true;
327 } else {
328 atomic_set(&cqp_request->refcount, 1);
329 }
330 return cqp_request;
331}
332
333/**
334 * i40iw_free_cqp_request - free cqp request
335 * @cqp: cqp ptr
336 * @cqp_request: to be put back in cqp list
337 */
338void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
339{
44b99f88 340 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
4e9042e6
FL
341 unsigned long flags;
342
343 if (cqp_request->dynamic) {
344 kfree(cqp_request);
345 } else {
346 cqp_request->request_done = false;
347 cqp_request->callback_fcn = NULL;
348 cqp_request->waiting = false;
349
350 spin_lock_irqsave(&cqp->req_lock, flags);
351 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
352 spin_unlock_irqrestore(&cqp->req_lock, flags);
353 }
44b99f88 354 wake_up(&iwdev->close_wq);
4e9042e6
FL
355}
356
357/**
358 * i40iw_put_cqp_request - dec ref count and free if 0
359 * @cqp: cqp ptr
360 * @cqp_request: to be put back in cqp list
361 */
362void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
363 struct i40iw_cqp_request *cqp_request)
364{
365 if (atomic_dec_and_test(&cqp_request->refcount))
366 i40iw_free_cqp_request(cqp, cqp_request);
367}
368
44b99f88
SS
369/**
370 * i40iw_free_pending_cqp_request -free pending cqp request objs
371 * @cqp: cqp ptr
372 * @cqp_request: to be put back in cqp list
373 */
374static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
375 struct i40iw_cqp_request *cqp_request)
376{
377 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
378
379 if (cqp_request->waiting) {
380 cqp_request->compl_info.error = true;
381 cqp_request->request_done = true;
382 wake_up(&cqp_request->waitq);
383 }
384 i40iw_put_cqp_request(cqp, cqp_request);
385 wait_event_timeout(iwdev->close_wq,
386 !atomic_read(&cqp_request->refcount),
387 1000);
388}
389
390/**
391 * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
392 * @iwdev: iwarp device
393 */
394void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
395{
396 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
397 struct i40iw_cqp *cqp = &iwdev->cqp;
398 struct i40iw_cqp_request *cqp_request = NULL;
399 struct cqp_commands_info *pcmdinfo = NULL;
400 u32 i, pending_work, wqe_idx;
401
402 pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
403 wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
404 for (i = 0; i < pending_work; i++) {
405 cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
406 if (cqp_request)
407 i40iw_free_pending_cqp_request(cqp, cqp_request);
408 wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
409 }
410
411 while (!list_empty(&dev->cqp_cmd_head)) {
412 pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
413 cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);
414 if (cqp_request)
415 i40iw_free_pending_cqp_request(cqp, cqp_request);
416 }
417}
418
4e9042e6
FL
419/**
420 * i40iw_free_qp - callback after destroy cqp completes
421 * @cqp_request: cqp request for destroy qp
422 * @num: not used
423 */
424static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
425{
426 struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
427 struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
428 struct i40iw_device *iwdev;
429 u32 qp_num = iwqp->ibqp.qp_num;
430
431 iwdev = iwqp->iwdev;
432
433 i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
434 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
d5965934 435 i40iw_rem_devusecount(iwdev);
4e9042e6
FL
436}
437
438/**
439 * i40iw_wait_event - wait for completion
440 * @iwdev: iwarp device
441 * @cqp_request: cqp request to wait
442 */
443static int i40iw_wait_event(struct i40iw_device *iwdev,
444 struct i40iw_cqp_request *cqp_request)
445{
446 struct cqp_commands_info *info = &cqp_request->info;
447 struct i40iw_cqp *iwcqp = &iwdev->cqp;
d26875b4 448 struct i40iw_cqp_timeout cqp_timeout;
4e9042e6
FL
449 bool cqp_error = false;
450 int err_code = 0;
d26875b4
SS
451 memset(&cqp_timeout, 0, sizeof(cqp_timeout));
452 cqp_timeout.compl_cqp_cmds = iwdev->sc_dev.cqp_cmd_stats[OP_COMPLETED_COMMANDS];
453 do {
454 if (wait_event_timeout(cqp_request->waitq,
455 cqp_request->request_done, CQP_COMPL_WAIT_TIME))
456 break;
457
458 i40iw_check_cqp_progress(&cqp_timeout, &iwdev->sc_dev);
459
460 if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
461 continue;
462
463 i40iw_pr_err("error cqp command 0x%x timed out", info->cqp_cmd);
4e9042e6 464 err_code = -ETIME;
78300cf8
HO
465 if (!iwdev->reset) {
466 iwdev->reset = true;
467 i40iw_request_reset(iwdev);
468 }
4e9042e6 469 goto done;
d26875b4 470 } while (1);
4e9042e6
FL
471 cqp_error = cqp_request->compl_info.error;
472 if (cqp_error) {
473 i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n",
474 info->cqp_cmd, cqp_request->compl_info.maj_err_code,
475 cqp_request->compl_info.min_err_code);
476 err_code = -EPROTO;
477 goto done;
478 }
479done:
480 i40iw_put_cqp_request(iwcqp, cqp_request);
481 return err_code;
482}
483
484/**
485 * i40iw_handle_cqp_op - process cqp command
486 * @iwdev: iwarp device
487 * @cqp_request: cqp request to process
488 */
489enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
490 struct i40iw_cqp_request
491 *cqp_request)
492{
493 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
494 enum i40iw_status_code status;
495 struct cqp_commands_info *info = &cqp_request->info;
496 int err_code = 0;
497
78300cf8
HO
498 if (iwdev->reset) {
499 i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
500 return I40IW_ERR_CQP_COMPL_ERROR;
501 }
502
4e9042e6
FL
503 status = i40iw_process_cqp_cmd(dev, info);
504 if (status) {
505 i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
506 i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
507 return status;
508 }
509 if (cqp_request->waiting)
510 err_code = i40iw_wait_event(iwdev, cqp_request);
511 if (err_code)
512 status = I40IW_ERR_CQP_COMPL_ERROR;
513 return status;
514}
515
d5965934
MI
516/**
517 * i40iw_add_devusecount - add dev refcount
518 * @iwdev: dev for refcount
519 */
520void i40iw_add_devusecount(struct i40iw_device *iwdev)
521{
522 atomic64_inc(&iwdev->use_count);
523}
524
525/**
526 * i40iw_rem_devusecount - decrement refcount for dev
527 * @iwdev: device
528 */
529void i40iw_rem_devusecount(struct i40iw_device *iwdev)
530{
531 if (!atomic64_dec_and_test(&iwdev->use_count))
532 return;
533 wake_up(&iwdev->close_wq);
534}
535
4e9042e6
FL
536/**
537 * i40iw_add_pdusecount - add pd refcount
538 * @iwpd: pd for refcount
539 */
540void i40iw_add_pdusecount(struct i40iw_pd *iwpd)
541{
542 atomic_inc(&iwpd->usecount);
543}
544
545/**
546 * i40iw_rem_pdusecount - decrement refcount for pd and free if 0
547 * @iwpd: pd for refcount
548 * @iwdev: iwarp device
549 */
550void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
551{
552 if (!atomic_dec_and_test(&iwpd->usecount))
553 return;
554 i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id);
555 kfree(iwpd);
556}
557
558/**
559 * i40iw_add_ref - add refcount for qp
560 * @ibqp: iqarp qp
561 */
562void i40iw_add_ref(struct ib_qp *ibqp)
563{
564 struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
565
566 atomic_inc(&iwqp->refcount);
567}
568
569/**
570 * i40iw_rem_ref - rem refcount for qp and free if 0
571 * @ibqp: iqarp qp
572 */
573void i40iw_rem_ref(struct ib_qp *ibqp)
574{
575 struct i40iw_qp *iwqp;
576 enum i40iw_status_code status;
577 struct i40iw_cqp_request *cqp_request;
578 struct cqp_commands_info *cqp_info;
579 struct i40iw_device *iwdev;
580 u32 qp_num;
996abf0a 581 unsigned long flags;
4e9042e6
FL
582
583 iwqp = to_iwqp(ibqp);
996abf0a
IM
584 iwdev = iwqp->iwdev;
585 spin_lock_irqsave(&iwdev->qptable_lock, flags);
586 if (!atomic_dec_and_test(&iwqp->refcount)) {
587 spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
4e9042e6 588 return;
996abf0a 589 }
4e9042e6 590
4e9042e6
FL
591 qp_num = iwqp->ibqp.qp_num;
592 iwdev->qp_table[qp_num] = NULL;
996abf0a 593 spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
4e9042e6
FL
594 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
595 if (!cqp_request)
596 return;
597
598 cqp_request->callback_fcn = i40iw_free_qp;
599 cqp_request->param = (void *)&iwqp->sc_qp;
600 cqp_info = &cqp_request->info;
601 cqp_info->cqp_cmd = OP_QP_DESTROY;
602 cqp_info->post_sq = 1;
603 cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
604 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
605 cqp_info->in.u.qp_destroy.remove_hash_idx = true;
606 status = i40iw_handle_cqp_op(iwdev, cqp_request);
b5e452a0
SS
607 if (!status)
608 return;
609
610 i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
611 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
612 i40iw_rem_devusecount(iwdev);
4e9042e6
FL
613}
614
615/**
616 * i40iw_get_qp - get qp address
617 * @device: iwarp device
618 * @qpn: qp number
619 */
620struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn)
621{
622 struct i40iw_device *iwdev = to_iwdev(device);
623
624 if ((qpn < IW_FIRST_QPN) || (qpn >= iwdev->max_qp))
625 return NULL;
626
627 return &iwdev->qp_table[qpn]->ibqp;
628}
629
630/**
631 * i40iw_debug_buf - print debug msg and buffer is mask set
632 * @dev: hardware control device structure
633 * @mask: mask to compare if to print debug buffer
634 * @buf: points buffer addr
635 * @size: saize of buffer to print
636 */
637void i40iw_debug_buf(struct i40iw_sc_dev *dev,
638 enum i40iw_debug_flag mask,
639 char *desc,
640 u64 *buf,
641 u32 size)
642{
643 u32 i;
644
645 if (!(dev->debug_mask & mask))
646 return;
647 i40iw_debug(dev, mask, "%s\n", desc);
648 i40iw_debug(dev, mask, "starting address virt=%p phy=%llxh\n", buf,
649 (unsigned long long)virt_to_phys(buf));
650
651 for (i = 0; i < size; i += 8)
652 i40iw_debug(dev, mask, "index %03d val: %016llx\n", i, buf[i / 8]);
653}
654
655/**
656 * i40iw_get_hw_addr - return hw addr
657 * @par: points to shared dev
658 */
659u8 __iomem *i40iw_get_hw_addr(void *par)
660{
661 struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)par;
662
663 return dev->hw->hw_addr;
664}
665
666/**
667 * i40iw_remove_head - return head entry and remove from list
668 * @list: list for entry
669 */
670void *i40iw_remove_head(struct list_head *list)
671{
672 struct list_head *entry;
673
674 if (list_empty(list))
675 return NULL;
676
677 entry = (void *)list->next;
678 list_del(entry);
679 return (void *)entry;
680}
681
682/**
683 * i40iw_allocate_dma_mem - Memory alloc helper fn
684 * @hw: pointer to the HW structure
685 * @mem: ptr to mem struct to fill out
686 * @size: size of memory requested
687 * @alignment: what to align the allocation to
688 */
689enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
690 struct i40iw_dma_mem *mem,
691 u64 size,
692 u32 alignment)
693{
694 struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
695
696 if (!mem)
697 return I40IW_ERR_PARAM;
698 mem->size = ALIGN(size, alignment);
699 mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size,
700 (dma_addr_t *)&mem->pa, GFP_KERNEL);
701 if (!mem->va)
702 return I40IW_ERR_NO_MEMORY;
703 return 0;
704}
705
706/**
707 * i40iw_free_dma_mem - Memory free helper fn
708 * @hw: pointer to the HW structure
709 * @mem: ptr to mem struct to free
710 */
711void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem)
712{
713 struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
714
715 if (!mem || !mem->va)
716 return;
717
718 dma_free_coherent(&pcidev->dev, mem->size,
719 mem->va, (dma_addr_t)mem->pa);
720 mem->va = NULL;
721}
722
723/**
724 * i40iw_allocate_virt_mem - virtual memory alloc helper fn
725 * @hw: pointer to the HW structure
726 * @mem: ptr to mem struct to fill out
727 * @size: size of memory requested
728 */
729enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
730 struct i40iw_virt_mem *mem,
731 u32 size)
732{
733 if (!mem)
734 return I40IW_ERR_PARAM;
735
736 mem->size = size;
737 mem->va = kzalloc(size, GFP_KERNEL);
738
739 if (mem->va)
740 return 0;
741 else
742 return I40IW_ERR_NO_MEMORY;
743}
744
745/**
746 * i40iw_free_virt_mem - virtual memory free helper fn
747 * @hw: pointer to the HW structure
748 * @mem: ptr to mem struct to free
749 */
750enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
751 struct i40iw_virt_mem *mem)
752{
753 if (!mem)
754 return I40IW_ERR_PARAM;
7eaf8313
MI
755 /*
756 * mem->va points to the parent of mem, so both mem and mem->va
757 * can not be touched once mem->va is freed
758 */
4e9042e6 759 kfree(mem->va);
4e9042e6
FL
760 return 0;
761}
762
763/**
764 * i40iw_cqp_sds_cmd - create cqp command for sd
765 * @dev: hardware control device structure
766 * @sd_info: information for sd cqp
767 *
768 */
769enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
770 struct i40iw_update_sds_info *sdinfo)
771{
772 enum i40iw_status_code status;
773 struct i40iw_cqp_request *cqp_request;
774 struct cqp_commands_info *cqp_info;
775 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
776
777 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
778 if (!cqp_request)
779 return I40IW_ERR_NO_MEMORY;
780 cqp_info = &cqp_request->info;
781 memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
782 sizeof(cqp_info->in.u.update_pe_sds.info));
783 cqp_info->cqp_cmd = OP_UPDATE_PE_SDS;
784 cqp_info->post_sq = 1;
785 cqp_info->in.u.update_pe_sds.dev = dev;
786 cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
787 status = i40iw_handle_cqp_op(iwdev, cqp_request);
788 if (status)
789 i40iw_pr_err("CQP-OP Update SD's fail");
790 return status;
791}
792
0fc2dc58
HO
793/**
794 * i40iw_qp_suspend_resume - cqp command for suspend/resume
795 * @dev: hardware control device structure
796 * @qp: hardware control qp
797 * @suspend: flag if suspend or resume
798 */
799void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend)
800{
801 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
802 struct i40iw_cqp_request *cqp_request;
803 struct i40iw_sc_cqp *cqp = dev->cqp;
804 struct cqp_commands_info *cqp_info;
805 enum i40iw_status_code status;
806
807 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
808 if (!cqp_request)
809 return;
810
811 cqp_info = &cqp_request->info;
812 cqp_info->cqp_cmd = (suspend) ? OP_SUSPEND : OP_RESUME;
813 cqp_info->in.u.suspend_resume.cqp = cqp;
814 cqp_info->in.u.suspend_resume.qp = qp;
815 cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
816 status = i40iw_handle_cqp_op(iwdev, cqp_request);
817 if (status)
818 i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
819}
820
4e9042e6
FL
821/**
822 * i40iw_term_modify_qp - modify qp for term message
823 * @qp: hardware control qp
824 * @next_state: qp's next state
825 * @term: terminate code
826 * @term_len: length
827 */
828void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len)
829{
830 struct i40iw_qp *iwqp;
831
832 iwqp = (struct i40iw_qp *)qp->back_qp;
833 i40iw_next_iw_state(iwqp, next_state, 0, term, term_len);
834};
835
836/**
837 * i40iw_terminate_done - after terminate is completed
838 * @qp: hardware control qp
839 * @timeout_occurred: indicates if terminate timer expired
840 */
841void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
842{
843 struct i40iw_qp *iwqp;
844 u32 next_iwarp_state = I40IW_QP_STATE_ERROR;
845 u8 hte = 0;
846 bool first_time;
847 unsigned long flags;
848
849 iwqp = (struct i40iw_qp *)qp->back_qp;
850 spin_lock_irqsave(&iwqp->lock, flags);
851 if (iwqp->hte_added) {
852 iwqp->hte_added = 0;
853 hte = 1;
854 }
855 first_time = !(qp->term_flags & I40IW_TERM_DONE);
856 qp->term_flags |= I40IW_TERM_DONE;
857 spin_unlock_irqrestore(&iwqp->lock, flags);
858 if (first_time) {
859 if (!timeout_occurred)
860 i40iw_terminate_del_timer(qp);
861 else
862 next_iwarp_state = I40IW_QP_STATE_CLOSING;
863
864 i40iw_next_iw_state(iwqp, next_iwarp_state, hte, 0, 0);
865 i40iw_cm_disconn(iwqp);
866 }
867}
868
869/**
870 * i40iw_terminate_imeout - timeout happened
871 * @context: points to iwarp qp
872 */
873static void i40iw_terminate_timeout(unsigned long context)
874{
875 struct i40iw_qp *iwqp = (struct i40iw_qp *)context;
876 struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
877
878 i40iw_terminate_done(qp, 1);
d627b506 879 i40iw_rem_ref(&iwqp->ibqp);
4e9042e6
FL
880}
881
882/**
883 * i40iw_terminate_start_timer - start terminate timeout
884 * @qp: hardware control qp
885 */
886void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
887{
888 struct i40iw_qp *iwqp;
889
890 iwqp = (struct i40iw_qp *)qp->back_qp;
d627b506 891 i40iw_add_ref(&iwqp->ibqp);
96ff2c11
GT
892 setup_timer(&iwqp->terminate_timer, i40iw_terminate_timeout,
893 (unsigned long)iwqp);
4e9042e6 894 iwqp->terminate_timer.expires = jiffies + HZ;
4e9042e6
FL
895 add_timer(&iwqp->terminate_timer);
896}
897
898/**
899 * i40iw_terminate_del_timer - delete terminate timeout
900 * @qp: hardware control qp
901 */
902void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
903{
904 struct i40iw_qp *iwqp;
905
906 iwqp = (struct i40iw_qp *)qp->back_qp;
d627b506
SS
907 if (del_timer(&iwqp->terminate_timer))
908 i40iw_rem_ref(&iwqp->ibqp);
4e9042e6
FL
909}
910
911/**
912 * i40iw_cqp_generic_worker - generic worker for cqp
913 * @work: work pointer
914 */
915static void i40iw_cqp_generic_worker(struct work_struct *work)
916{
917 struct i40iw_virtchnl_work_info *work_info =
918 &((struct virtchnl_work *)work)->work_info;
919
920 if (work_info->worker_vf_dev)
921 work_info->callback_fcn(work_info->worker_vf_dev);
922}
923
924/**
925 * i40iw_cqp_spawn_worker - spawn worket thread
926 * @iwdev: device struct pointer
927 * @work_info: work request info
928 * @iw_vf_idx: virtual function index
929 */
930void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
931 struct i40iw_virtchnl_work_info *work_info,
932 u32 iw_vf_idx)
933{
934 struct virtchnl_work *work;
935 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
936
937 work = &iwdev->virtchnl_w[iw_vf_idx];
938 memcpy(&work->work_info, work_info, sizeof(*work_info));
939 INIT_WORK(&work->work, i40iw_cqp_generic_worker);
940 queue_work(iwdev->virtchnl_wq, &work->work);
941}
942
943/**
944 * i40iw_cqp_manage_hmc_fcn_worker -
945 * @work: work pointer for hmc info
946 */
947static void i40iw_cqp_manage_hmc_fcn_worker(struct work_struct *work)
948{
949 struct i40iw_cqp_request *cqp_request =
950 ((struct virtchnl_work *)work)->cqp_request;
951 struct i40iw_ccq_cqe_info ccq_cqe_info;
952 struct i40iw_hmc_fcn_info *hmcfcninfo =
953 &cqp_request->info.in.u.manage_hmc_pm.info;
954 struct i40iw_device *iwdev =
955 (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->back_dev;
956
957 ccq_cqe_info.cqp = NULL;
958 ccq_cqe_info.maj_err_code = cqp_request->compl_info.maj_err_code;
959 ccq_cqe_info.min_err_code = cqp_request->compl_info.min_err_code;
960 ccq_cqe_info.op_code = cqp_request->compl_info.op_code;
961 ccq_cqe_info.op_ret_val = cqp_request->compl_info.op_ret_val;
962 ccq_cqe_info.scratch = 0;
963 ccq_cqe_info.error = cqp_request->compl_info.error;
964 hmcfcninfo->callback_fcn(cqp_request->info.in.u.manage_hmc_pm.dev,
965 hmcfcninfo->cqp_callback_param, &ccq_cqe_info);
966 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
967}
968
969/**
970 * i40iw_cqp_manage_hmc_fcn_callback - called function after cqp completion
971 * @cqp_request: cqp request info struct for hmc fun
972 * @unused: unused param of callback
973 */
974static void i40iw_cqp_manage_hmc_fcn_callback(struct i40iw_cqp_request *cqp_request,
975 u32 unused)
976{
977 struct virtchnl_work *work;
978 struct i40iw_hmc_fcn_info *hmcfcninfo =
979 &cqp_request->info.in.u.manage_hmc_pm.info;
980 struct i40iw_device *iwdev =
981 (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->
982 back_dev;
983
984 if (hmcfcninfo && hmcfcninfo->callback_fcn) {
985 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s1\n", __func__);
986 atomic_inc(&cqp_request->refcount);
987 work = &iwdev->virtchnl_w[hmcfcninfo->iw_vf_idx];
988 work->cqp_request = cqp_request;
989 INIT_WORK(&work->work, i40iw_cqp_manage_hmc_fcn_worker);
990 queue_work(iwdev->virtchnl_wq, &work->work);
991 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s2\n", __func__);
992 } else {
993 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s: Something wrong\n", __func__);
994 }
995}
996
997/**
998 * i40iw_cqp_manage_hmc_fcn_cmd - issue cqp command to manage hmc
999 * @dev: hardware control device structure
1000 * @hmcfcninfo: info for hmc
1001 */
1002enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
1003 struct i40iw_hmc_fcn_info *hmcfcninfo)
1004{
1005 enum i40iw_status_code status;
1006 struct i40iw_cqp_request *cqp_request;
1007 struct cqp_commands_info *cqp_info;
1008 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1009
1010 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s\n", __func__);
1011 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
1012 if (!cqp_request)
1013 return I40IW_ERR_NO_MEMORY;
1014 cqp_info = &cqp_request->info;
1015 cqp_request->callback_fcn = i40iw_cqp_manage_hmc_fcn_callback;
1016 cqp_request->param = hmcfcninfo;
1017 memcpy(&cqp_info->in.u.manage_hmc_pm.info, hmcfcninfo,
1018 sizeof(*hmcfcninfo));
1019 cqp_info->in.u.manage_hmc_pm.dev = dev;
1020 cqp_info->cqp_cmd = OP_MANAGE_HMC_PM_FUNC_TABLE;
1021 cqp_info->post_sq = 1;
1022 cqp_info->in.u.manage_hmc_pm.scratch = (uintptr_t)cqp_request;
1023 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1024 if (status)
1025 i40iw_pr_err("CQP-OP Manage HMC fail");
1026 return status;
1027}
1028
1029/**
1030 * i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm
1031 * @iwdev: function device struct
1032 * @values_mem: buffer for fpm
1033 * @hmc_fn_id: function id for fpm
1034 */
1035enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
1036 struct i40iw_dma_mem *values_mem,
1037 u8 hmc_fn_id)
1038{
1039 enum i40iw_status_code status;
1040 struct i40iw_cqp_request *cqp_request;
1041 struct cqp_commands_info *cqp_info;
1042 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1043
1044 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1045 if (!cqp_request)
1046 return I40IW_ERR_NO_MEMORY;
1047 cqp_info = &cqp_request->info;
1048 cqp_request->param = NULL;
1049 cqp_info->in.u.query_fpm_values.cqp = dev->cqp;
1050 cqp_info->in.u.query_fpm_values.fpm_values_pa = values_mem->pa;
1051 cqp_info->in.u.query_fpm_values.fpm_values_va = values_mem->va;
1052 cqp_info->in.u.query_fpm_values.hmc_fn_id = hmc_fn_id;
1053 cqp_info->cqp_cmd = OP_QUERY_FPM_VALUES;
1054 cqp_info->post_sq = 1;
1055 cqp_info->in.u.query_fpm_values.scratch = (uintptr_t)cqp_request;
1056 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1057 if (status)
1058 i40iw_pr_err("CQP-OP Query FPM fail");
1059 return status;
1060}
1061
1062/**
1063 * i40iw_cqp_commit_fpm_values_cmd - commit fpm values in hw
1064 * @dev: hardware control device structure
1065 * @values_mem: buffer with fpm values
1066 * @hmc_fn_id: function id for fpm
1067 */
1068enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
1069 struct i40iw_dma_mem *values_mem,
1070 u8 hmc_fn_id)
1071{
1072 enum i40iw_status_code status;
1073 struct i40iw_cqp_request *cqp_request;
1074 struct cqp_commands_info *cqp_info;
1075 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1076
1077 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1078 if (!cqp_request)
1079 return I40IW_ERR_NO_MEMORY;
1080 cqp_info = &cqp_request->info;
1081 cqp_request->param = NULL;
1082 cqp_info->in.u.commit_fpm_values.cqp = dev->cqp;
1083 cqp_info->in.u.commit_fpm_values.fpm_values_pa = values_mem->pa;
1084 cqp_info->in.u.commit_fpm_values.fpm_values_va = values_mem->va;
1085 cqp_info->in.u.commit_fpm_values.hmc_fn_id = hmc_fn_id;
1086 cqp_info->cqp_cmd = OP_COMMIT_FPM_VALUES;
1087 cqp_info->post_sq = 1;
1088 cqp_info->in.u.commit_fpm_values.scratch = (uintptr_t)cqp_request;
1089 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1090 if (status)
1091 i40iw_pr_err("CQP-OP Commit FPM fail");
1092 return status;
1093}
1094
1095/**
1096 * i40iw_vf_wait_vchnl_resp - wait for channel msg
1097 * @iwdev: function's device struct
1098 */
1099enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
1100{
1101 struct i40iw_device *iwdev = dev->back_dev;
4e9042e6
FL
1102 int timeout_ret;
1103
1104 i40iw_debug(dev, I40IW_DEBUG_VIRT, "%s[%u] dev %p, iwdev %p\n",
1105 __func__, __LINE__, dev, iwdev);
f69c3331
IM
1106
1107 atomic_set(&iwdev->vchnl_msgs, 2);
4e9042e6
FL
1108 timeout_ret = wait_event_timeout(iwdev->vchnl_waitq,
1109 (atomic_read(&iwdev->vchnl_msgs) == 1),
1110 I40IW_VCHNL_EVENT_TIMEOUT);
1111 atomic_dec(&iwdev->vchnl_msgs);
1112 if (!timeout_ret) {
1113 i40iw_pr_err("virt channel completion timeout = 0x%x\n", timeout_ret);
f69c3331
IM
1114 atomic_set(&iwdev->vchnl_msgs, 0);
1115 dev->vchnl_up = false;
1116 return I40IW_ERR_TIMEOUT;
4e9042e6 1117 }
f69c3331
IM
1118 wake_up(&dev->vf_reqs);
1119 return 0;
4e9042e6
FL
1120}
1121
d6f7bbcc
HO
1122/**
1123 * i40iw_cqp_cq_create_cmd - create a cq for the cqp
1124 * @dev: device pointer
1125 * @cq: pointer to created cq
1126 */
1127enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev,
1128 struct i40iw_sc_cq *cq)
1129{
1130 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1131 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1132 struct i40iw_cqp_request *cqp_request;
1133 struct cqp_commands_info *cqp_info;
1134 enum i40iw_status_code status;
1135
1136 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1137 if (!cqp_request)
1138 return I40IW_ERR_NO_MEMORY;
1139
1140 cqp_info = &cqp_request->info;
1141 cqp_info->cqp_cmd = OP_CQ_CREATE;
1142 cqp_info->post_sq = 1;
1143 cqp_info->in.u.cq_create.cq = cq;
1144 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1145 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1146 if (status)
1147 i40iw_pr_err("CQP-OP Create QP fail");
1148
1149 return status;
1150}
1151
1152/**
1153 * i40iw_cqp_qp_create_cmd - create a qp for the cqp
1154 * @dev: device pointer
1155 * @qp: pointer to created qp
1156 */
1157enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev,
1158 struct i40iw_sc_qp *qp)
1159{
1160 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1161 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1162 struct i40iw_cqp_request *cqp_request;
1163 struct cqp_commands_info *cqp_info;
1164 struct i40iw_create_qp_info *qp_info;
1165 enum i40iw_status_code status;
1166
1167 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1168 if (!cqp_request)
1169 return I40IW_ERR_NO_MEMORY;
1170
1171 cqp_info = &cqp_request->info;
1172 qp_info = &cqp_request->info.in.u.qp_create.info;
1173
1174 memset(qp_info, 0, sizeof(*qp_info));
1175
1176 qp_info->cq_num_valid = true;
1177 qp_info->next_iwarp_state = I40IW_QP_STATE_RTS;
1178
1179 cqp_info->cqp_cmd = OP_QP_CREATE;
1180 cqp_info->post_sq = 1;
1181 cqp_info->in.u.qp_create.qp = qp;
1182 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
1183 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1184 if (status)
1185 i40iw_pr_err("CQP-OP QP create fail");
1186 return status;
1187}
1188
1189/**
1190 * i40iw_cqp_cq_destroy_cmd - destroy the cqp cq
1191 * @dev: device pointer
1192 * @cq: pointer to cq
1193 */
1194void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
1195{
1196 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1197
1198 i40iw_cq_wq_destroy(iwdev, cq);
1199}
1200
1201/**
1202 * i40iw_cqp_qp_destroy_cmd - destroy the cqp
1203 * @dev: device pointer
1204 * @qp: pointer to qp
1205 */
1206void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
1207{
1208 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1209 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1210 struct i40iw_cqp_request *cqp_request;
1211 struct cqp_commands_info *cqp_info;
1212 enum i40iw_status_code status;
1213
1214 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1215 if (!cqp_request)
1216 return;
1217
1218 cqp_info = &cqp_request->info;
1219 memset(cqp_info, 0, sizeof(*cqp_info));
1220
1221 cqp_info->cqp_cmd = OP_QP_DESTROY;
1222 cqp_info->post_sq = 1;
1223 cqp_info->in.u.qp_destroy.qp = qp;
1224 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
1225 cqp_info->in.u.qp_destroy.remove_hash_idx = true;
1226 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1227 if (status)
1228 i40iw_pr_err("CQP QP_DESTROY fail");
1229}
1230
1231
4e9042e6
FL
1232/**
1233 * i40iw_ieq_mpa_crc_ae - generate AE for crc error
1234 * @dev: hardware control device structure
1235 * @qp: hardware control qp
1236 */
1237void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
1238{
1239 struct i40iw_qp_flush_info info;
1240 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1241
1242 i40iw_debug(dev, I40IW_DEBUG_AEQ, "%s entered\n", __func__);
1243 memset(&info, 0, sizeof(info));
1244 info.ae_code = I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR;
1245 info.generate_ae = true;
1246 info.ae_source = 0x3;
1247 (void)i40iw_hw_flush_wqes(iwdev, qp, &info, false);
1248}
1249
1250/**
1251 * i40iw_init_hash_desc - initialize hash for crc calculation
1252 * @desc: cryption type
1253 */
34abf9ed 1254enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc)
4e9042e6 1255{
34abf9ed
TN
1256 struct crypto_shash *tfm;
1257 struct shash_desc *tdesc;
1258
1259 tfm = crypto_alloc_shash("crc32c", 0, 0);
1260 if (IS_ERR(tfm))
1261 return I40IW_ERR_MPA_CRC;
1262
1263 tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
1264 GFP_KERNEL);
1265 if (!tdesc) {
1266 crypto_free_shash(tfm);
4e9042e6 1267 return I40IW_ERR_MPA_CRC;
34abf9ed
TN
1268 }
1269 tdesc->tfm = tfm;
1270 *desc = tdesc;
1271
4e9042e6
FL
1272 return 0;
1273}
1274
1275/**
1276 * i40iw_free_hash_desc - free hash desc
1277 * @desc: to be freed
1278 */
34abf9ed 1279void i40iw_free_hash_desc(struct shash_desc *desc)
4e9042e6 1280{
34abf9ed
TN
1281 if (desc) {
1282 crypto_free_shash(desc->tfm);
1283 kfree(desc);
1284 }
4e9042e6
FL
1285}
1286
1287/**
1288 * i40iw_alloc_query_fpm_buf - allocate buffer for fpm
1289 * @dev: hardware control device structure
1290 * @mem: buffer ptr for fpm to be allocated
1291 * @return: memory allocation status
1292 */
1293enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
1294 struct i40iw_dma_mem *mem)
1295{
1296 enum i40iw_status_code status;
1297 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1298
1299 status = i40iw_obj_aligned_mem(iwdev, mem, I40IW_QUERY_FPM_BUF_SIZE,
1300 I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
1301 return status;
1302}
1303
1304/**
1305 * i40iw_ieq_check_mpacrc - check if mpa crc is OK
1306 * @desc: desc for hash
1307 * @addr: address of buffer for crc
1308 * @length: length of buffer
1309 * @value: value to be compared
1310 */
34abf9ed 1311enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
4e9042e6
FL
1312 void *addr,
1313 u32 length,
1314 u32 value)
1315{
4e9042e6
FL
1316 u32 crc = 0;
1317 int ret;
1318 enum i40iw_status_code ret_code = 0;
1319
34abf9ed
TN
1320 crypto_shash_init(desc);
1321 ret = crypto_shash_update(desc, addr, length);
4e9042e6 1322 if (!ret)
34abf9ed 1323 crypto_shash_final(desc, (u8 *)&crc);
4e9042e6
FL
1324 if (crc != value) {
1325 i40iw_pr_err("mpa crc check fail\n");
1326 ret_code = I40IW_ERR_MPA_CRC;
1327 }
1328 return ret_code;
1329}
1330
1331/**
1332 * i40iw_ieq_get_qp - get qp based on quad in puda buffer
1333 * @dev: hardware control device structure
1334 * @buf: receive puda buffer on exception q
1335 */
1336struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
1337 struct i40iw_puda_buf *buf)
1338{
1339 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1340 struct i40iw_qp *iwqp;
1341 struct i40iw_cm_node *cm_node;
1342 u32 loc_addr[4], rem_addr[4];
1343 u16 loc_port, rem_port;
1344 struct ipv6hdr *ip6h;
1345 struct iphdr *iph = (struct iphdr *)buf->iph;
1346 struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1347
1348 if (iph->version == 4) {
1349 memset(loc_addr, 0, sizeof(loc_addr));
1350 loc_addr[0] = ntohl(iph->daddr);
1351 memset(rem_addr, 0, sizeof(rem_addr));
1352 rem_addr[0] = ntohl(iph->saddr);
1353 } else {
1354 ip6h = (struct ipv6hdr *)buf->iph;
1355 i40iw_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
1356 i40iw_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
1357 }
1358 loc_port = ntohs(tcph->dest);
1359 rem_port = ntohs(tcph->source);
1360
1361 cm_node = i40iw_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
1362 loc_addr, false);
1363 if (!cm_node)
1364 return NULL;
1365 iwqp = cm_node->iwqp;
1366 return &iwqp->sc_qp;
1367}
1368
1369/**
1370 * i40iw_ieq_update_tcpip_info - update tcpip in the buffer
1371 * @buf: puda to update
1372 * @length: length of buffer
1373 * @seqnum: seq number for tcp
1374 */
1375void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum)
1376{
1377 struct tcphdr *tcph;
1378 struct iphdr *iph;
1379 u16 iphlen;
1380 u16 packetsize;
1381 u8 *addr = (u8 *)buf->mem.va;
1382
1383 iphlen = (buf->ipv4) ? 20 : 40;
1384 iph = (struct iphdr *)(addr + buf->maclen);
1385 tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
1386 packetsize = length + buf->tcphlen + iphlen;
1387
1388 iph->tot_len = htons(packetsize);
1389 tcph->seq = htonl(seqnum);
1390}
1391
1392/**
1393 * i40iw_puda_get_tcpip_info - get tcpip info from puda buffer
1394 * @info: to get information
1395 * @buf: puda buffer
1396 */
1397enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
1398 struct i40iw_puda_buf *buf)
1399{
1400 struct iphdr *iph;
1401 struct ipv6hdr *ip6h;
1402 struct tcphdr *tcph;
1403 u16 iphlen;
1404 u16 pkt_len;
1405 u8 *mem = (u8 *)buf->mem.va;
1406 struct ethhdr *ethh = (struct ethhdr *)buf->mem.va;
1407
1408 if (ethh->h_proto == htons(0x8100)) {
1409 info->vlan_valid = true;
1410 buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & VLAN_VID_MASK;
1411 }
1412 buf->maclen = (info->vlan_valid) ? 18 : 14;
1413 iphlen = (info->l3proto) ? 40 : 20;
1414 buf->ipv4 = (info->l3proto) ? false : true;
1415 buf->iph = mem + buf->maclen;
1416 iph = (struct iphdr *)buf->iph;
1417
1418 buf->tcph = buf->iph + iphlen;
1419 tcph = (struct tcphdr *)buf->tcph;
1420
1421 if (buf->ipv4) {
1422 pkt_len = ntohs(iph->tot_len);
1423 } else {
1424 ip6h = (struct ipv6hdr *)buf->iph;
1425 pkt_len = ntohs(ip6h->payload_len) + iphlen;
1426 }
1427
1428 buf->totallen = pkt_len + buf->maclen;
1429
7581e96c 1430 if (info->payload_len < buf->totallen) {
4e9042e6
FL
1431 i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n",
1432 info->payload_len, buf->totallen);
1433 return I40IW_ERR_INVALID_SIZE;
1434 }
1435
1436 buf->tcphlen = (tcph->doff) << 2;
1437 buf->datalen = pkt_len - iphlen - buf->tcphlen;
1438 buf->data = (buf->datalen) ? buf->tcph + buf->tcphlen : NULL;
1439 buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
1440 buf->seqnum = ntohl(tcph->seq);
1441 return 0;
1442}
1443
1444/**
1445 * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
d6f7bbcc 1446 * @vsi: pointer to the vsi structure
4e9042e6 1447 */
d6f7bbcc 1448static void i40iw_hw_stats_timeout(unsigned long vsi)
4e9042e6 1449{
d6f7bbcc
HO
1450 struct i40iw_sc_vsi *sc_vsi = (struct i40iw_sc_vsi *)vsi;
1451 struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
1452 struct i40iw_vsi_pestat *pf_devstat = sc_vsi->pestat;
1453 struct i40iw_vsi_pestat *vf_devstat = NULL;
4e9042e6
FL
1454 u16 iw_vf_idx;
1455 unsigned long flags;
1456
1457 /*PF*/
d6f7bbcc
HO
1458 i40iw_hw_stats_read_all(pf_devstat, &pf_devstat->hw_stats);
1459
4e9042e6 1460 for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
d6f7bbcc 1461 spin_lock_irqsave(&pf_devstat->lock, flags);
4e9042e6
FL
1462 if (pf_dev->vf_dev[iw_vf_idx]) {
1463 if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {
d6f7bbcc
HO
1464 vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->pestat;
1465 i40iw_hw_stats_read_all(vf_devstat, &vf_devstat->hw_stats);
4e9042e6
FL
1466 }
1467 }
d6f7bbcc 1468 spin_unlock_irqrestore(&pf_devstat->lock, flags);
4e9042e6
FL
1469 }
1470
1471 mod_timer(&pf_devstat->stats_timer,
1472 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1473}
1474
1475/**
1476 * i40iw_hw_stats_start_timer - Start periodic stats timer
d6f7bbcc 1477 * @vsi: pointer to the vsi structure
4e9042e6 1478 */
d6f7bbcc 1479void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
4e9042e6 1480{
d6f7bbcc 1481 struct i40iw_vsi_pestat *devstat = vsi->pestat;
4e9042e6 1482
96ff2c11
GT
1483 setup_timer(&devstat->stats_timer, i40iw_hw_stats_timeout,
1484 (unsigned long)vsi);
4e9042e6
FL
1485 mod_timer(&devstat->stats_timer,
1486 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1487}
1488
1489/**
d6f7bbcc
HO
1490 * i40iw_hw_stats_stop_timer - Delete periodic stats timer
1491 * @vsi: pointer to the vsi structure
4e9042e6 1492 */
d6f7bbcc 1493void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi)
4e9042e6 1494{
d6f7bbcc 1495 struct i40iw_vsi_pestat *devstat = vsi->pestat;
4e9042e6
FL
1496
1497 del_timer_sync(&devstat->stats_timer);
1498}