]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/hw/i40iw/i40iw_main.c
i40iw: add main, hdr, status
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / i40iw / i40iw_main.c
CommitLineData
8e06af71
FL
1/*******************************************************************************
2*
3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4*
5* This software is available to you under a choice of one of two
6* licenses. You may choose to be licensed under the terms of the GNU
7* General Public License (GPL) Version 2, available from the file
8* COPYING in the main directory of this source tree, or the
9* OpenFabrics.org BSD license below:
10*
11* Redistribution and use in source and binary forms, with or
12* without modification, are permitted provided that the following
13* conditions are met:
14*
15* - Redistributions of source code must retain the above
16* copyright notice, this list of conditions and the following
17* disclaimer.
18*
19* - Redistributions in binary form must reproduce the above
20* copyright notice, this list of conditions and the following
21* disclaimer in the documentation and/or other materials
22* provided with the distribution.
23*
24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31* SOFTWARE.
32*
33*******************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
41#include <linux/if_vlan.h>
42#include <net/addrconf.h>
43
44#include "i40iw.h"
45#include "i40iw_register.h"
46#include <net/netevent.h>
47#define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
48#define CLIENT_IW_INTERFACE_VERSION_MINOR 01
49#define CLIENT_IW_INTERFACE_VERSION_BUILD 00
50
51#define DRV_VERSION_MAJOR 0
52#define DRV_VERSION_MINOR 5
53#define DRV_VERSION_BUILD 123
54#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
55 __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
56
57static int push_mode;
58module_param(push_mode, int, 0644);
59MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
60
61static int debug;
62module_param(debug, int, 0644);
63MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
64
65static int resource_profile;
66module_param(resource_profile, int, 0644);
67MODULE_PARM_DESC(resource_profile,
68 "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
69
70static int max_rdma_vfs = 32;
71module_param(max_rdma_vfs, int, 0644);
72MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
73static int mpa_version = 2;
74module_param(mpa_version, int, 0644);
75MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
76
77MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
78MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
79MODULE_LICENSE("Dual BSD/GPL");
80MODULE_VERSION(DRV_VERSION);
81
82static struct i40e_client i40iw_client;
83static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
84
85static LIST_HEAD(i40iw_handlers);
86static spinlock_t i40iw_handler_lock;
87
88static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
89 u32 vf_id, u8 *msg, u16 len);
90
91static struct notifier_block i40iw_inetaddr_notifier = {
92 .notifier_call = i40iw_inetaddr_event
93};
94
95static struct notifier_block i40iw_inetaddr6_notifier = {
96 .notifier_call = i40iw_inet6addr_event
97};
98
99static struct notifier_block i40iw_net_notifier = {
100 .notifier_call = i40iw_net_event
101};
102
103static int i40iw_notifiers_registered;
104
105/* registered port mapper netlink callbacks */
106static struct ibnl_client_cbs i40iw_nl_cb_table[] = {
107 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
108 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
109 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
110 [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
111 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
112 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
113 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
114};
115
116/**
117 * i40iw_find_i40e_handler - find a handler given a client info
118 * @ldev: pointer to a client info
119 */
120static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
121{
122 struct i40iw_handler *hdl;
123 unsigned long flags;
124
125 spin_lock_irqsave(&i40iw_handler_lock, flags);
126 list_for_each_entry(hdl, &i40iw_handlers, list) {
127 if (hdl->ldev.netdev == ldev->netdev) {
128 spin_unlock_irqrestore(&i40iw_handler_lock, flags);
129 return hdl;
130 }
131 }
132 spin_unlock_irqrestore(&i40iw_handler_lock, flags);
133 return NULL;
134}
135
136/**
137 * i40iw_find_netdev - find a handler given a netdev
138 * @netdev: pointer to net_device
139 */
140struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
141{
142 struct i40iw_handler *hdl;
143 unsigned long flags;
144
145 spin_lock_irqsave(&i40iw_handler_lock, flags);
146 list_for_each_entry(hdl, &i40iw_handlers, list) {
147 if (hdl->ldev.netdev == netdev) {
148 spin_unlock_irqrestore(&i40iw_handler_lock, flags);
149 return hdl;
150 }
151 }
152 spin_unlock_irqrestore(&i40iw_handler_lock, flags);
153 return NULL;
154}
155
156/**
157 * i40iw_add_handler - add a handler to the list
158 * @hdl: handler to be added to the handler list
159 */
160static void i40iw_add_handler(struct i40iw_handler *hdl)
161{
162 unsigned long flags;
163
164 spin_lock_irqsave(&i40iw_handler_lock, flags);
165 list_add(&hdl->list, &i40iw_handlers);
166 spin_unlock_irqrestore(&i40iw_handler_lock, flags);
167}
168
169/**
170 * i40iw_del_handler - delete a handler from the list
171 * @hdl: handler to be deleted from the handler list
172 */
173static int i40iw_del_handler(struct i40iw_handler *hdl)
174{
175 unsigned long flags;
176
177 spin_lock_irqsave(&i40iw_handler_lock, flags);
178 list_del(&hdl->list);
179 spin_unlock_irqrestore(&i40iw_handler_lock, flags);
180 return 0;
181}
182
183/**
184 * i40iw_enable_intr - set up device interrupts
185 * @dev: hardware control device structure
186 * @msix_id: id of the interrupt to be enabled
187 */
188static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
189{
190 u32 val;
191
192 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
193 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
194 (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
195 if (dev->is_pf)
196 i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
197 else
198 i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
199}
200
201/**
202 * i40iw_dpc - tasklet for aeq and ceq 0
203 * @data: iwarp device
204 */
205static void i40iw_dpc(unsigned long data)
206{
207 struct i40iw_device *iwdev = (struct i40iw_device *)data;
208
209 if (iwdev->msix_shared)
210 i40iw_process_ceq(iwdev, iwdev->ceqlist);
211 i40iw_process_aeq(iwdev);
212 i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
213}
214
215/**
216 * i40iw_ceq_dpc - dpc handler for CEQ
217 * @data: data points to CEQ
218 */
219static void i40iw_ceq_dpc(unsigned long data)
220{
221 struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
222 struct i40iw_device *iwdev = iwceq->iwdev;
223
224 i40iw_process_ceq(iwdev, iwceq);
225 i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
226}
227
228/**
229 * i40iw_irq_handler - interrupt handler for aeq and ceq0
230 * @irq: Interrupt request number
231 * @data: iwarp device
232 */
233static irqreturn_t i40iw_irq_handler(int irq, void *data)
234{
235 struct i40iw_device *iwdev = (struct i40iw_device *)data;
236
237 tasklet_schedule(&iwdev->dpc_tasklet);
238 return IRQ_HANDLED;
239}
240
241/**
242 * i40iw_destroy_cqp - destroy control qp
243 * @iwdev: iwarp device
244 * @create_done: 1 if cqp create poll was success
245 *
246 * Issue destroy cqp request and
247 * free the resources associated with the cqp
248 */
249static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
250{
251 enum i40iw_status_code status = 0;
252 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
253 struct i40iw_cqp *cqp = &iwdev->cqp;
254
255 if (free_hwcqp && dev->cqp_ops->cqp_destroy)
256 status = dev->cqp_ops->cqp_destroy(dev->cqp);
257 if (status)
258 i40iw_pr_err("destroy cqp failed");
259
260 i40iw_free_dma_mem(dev->hw, &cqp->sq);
261 kfree(cqp->scratch_array);
262 iwdev->cqp.scratch_array = NULL;
263
264 kfree(cqp->cqp_requests);
265 cqp->cqp_requests = NULL;
266}
267
268/**
269 * i40iw_disable_irqs - disable device interrupts
270 * @dev: hardware control device structure
271 * @msic_vec: msix vector to disable irq
272 * @dev_id: parameter to pass to free_irq (used during irq setup)
273 *
274 * The function is called when destroying aeq/ceq
275 */
276static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
277 struct i40iw_msix_vector *msix_vec,
278 void *dev_id)
279{
280 if (dev->is_pf)
281 i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
282 else
283 i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
284 synchronize_irq(msix_vec->irq);
285 free_irq(msix_vec->irq, dev_id);
286}
287
288/**
289 * i40iw_destroy_aeq - destroy aeq
290 * @iwdev: iwarp device
291 * @reset: true if called before reset
292 *
293 * Issue a destroy aeq request and
294 * free the resources associated with the aeq
295 * The function is called during driver unload
296 */
297static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
298{
299 enum i40iw_status_code status = I40IW_ERR_NOT_READY;
300 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
301 struct i40iw_aeq *aeq = &iwdev->aeq;
302
303 if (!iwdev->msix_shared)
304 i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
305 if (reset)
306 goto exit;
307
308 if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
309 status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
310 if (status)
311 i40iw_pr_err("destroy aeq failed %d\n", status);
312
313exit:
314 i40iw_free_dma_mem(dev->hw, &aeq->mem);
315}
316
317/**
318 * i40iw_destroy_ceq - destroy ceq
319 * @iwdev: iwarp device
320 * @iwceq: ceq to be destroyed
321 * @reset: true if called before reset
322 *
323 * Issue a destroy ceq request and
324 * free the resources associated with the ceq
325 */
326static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
327 struct i40iw_ceq *iwceq,
328 bool reset)
329{
330 enum i40iw_status_code status;
331 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
332
333 if (reset)
334 goto exit;
335
336 status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
337 if (status) {
338 i40iw_pr_err("ceq destroy command failed %d\n", status);
339 goto exit;
340 }
341
342 status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
343 if (status)
344 i40iw_pr_err("ceq destroy completion failed %d\n", status);
345exit:
346 i40iw_free_dma_mem(dev->hw, &iwceq->mem);
347}
348
349/**
350 * i40iw_dele_ceqs - destroy all ceq's
351 * @iwdev: iwarp device
352 * @reset: true if called before reset
353 *
354 * Go through all of the device ceq's and for each ceq
355 * disable the ceq interrupt and destroy the ceq
356 */
357static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
358{
359 u32 i = 0;
360 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
361 struct i40iw_ceq *iwceq = iwdev->ceqlist;
362 struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
363
364 if (iwdev->msix_shared) {
365 i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
366 i40iw_destroy_ceq(iwdev, iwceq, reset);
367 iwceq++;
368 i++;
369 }
370
371 for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
372 i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
373 i40iw_destroy_ceq(iwdev, iwceq, reset);
374 }
375}
376
377/**
378 * i40iw_destroy_ccq - destroy control cq
379 * @iwdev: iwarp device
380 * @reset: true if called before reset
381 *
382 * Issue destroy ccq request and
383 * free the resources associated with the ccq
384 */
385static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset)
386{
387 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
388 struct i40iw_ccq *ccq = &iwdev->ccq;
389 enum i40iw_status_code status = 0;
390
391 if (!reset)
392 status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
393 if (status)
394 i40iw_pr_err("ccq destroy failed %d\n", status);
395 i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
396}
397
398/* types of hmc objects */
399static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
400 I40IW_HMC_IW_QP,
401 I40IW_HMC_IW_CQ,
402 I40IW_HMC_IW_HTE,
403 I40IW_HMC_IW_ARP,
404 I40IW_HMC_IW_APBVT_ENTRY,
405 I40IW_HMC_IW_MR,
406 I40IW_HMC_IW_XF,
407 I40IW_HMC_IW_XFFL,
408 I40IW_HMC_IW_Q1,
409 I40IW_HMC_IW_Q1FL,
410 I40IW_HMC_IW_TIMER,
411};
412
413/**
414 * i40iw_close_hmc_objects_type - delete hmc objects of a given type
415 * @iwdev: iwarp device
416 * @obj_type: the hmc object type to be deleted
417 * @is_pf: true if the function is PF otherwise false
418 * @reset: true if called before reset
419 */
420static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
421 enum i40iw_hmc_rsrc_type obj_type,
422 struct i40iw_hmc_info *hmc_info,
423 bool is_pf,
424 bool reset)
425{
426 struct i40iw_hmc_del_obj_info info;
427
428 memset(&info, 0, sizeof(info));
429 info.hmc_info = hmc_info;
430 info.rsrc_type = obj_type;
431 info.count = hmc_info->hmc_obj[obj_type].cnt;
432 info.is_pf = is_pf;
433 if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
434 i40iw_pr_err("del obj of type %d failed\n", obj_type);
435}
436
437/**
438 * i40iw_del_hmc_objects - remove all device hmc objects
439 * @dev: iwarp device
440 * @hmc_info: hmc_info to free
441 * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
442 * by PF on behalf of VF
443 * @reset: true if called before reset
444 */
445static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
446 struct i40iw_hmc_info *hmc_info,
447 bool is_pf,
448 bool reset)
449{
450 unsigned int i;
451
452 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
453 i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
454}
455
456/**
457 * i40iw_ceq_handler - interrupt handler for ceq
458 * @data: ceq pointer
459 */
460static irqreturn_t i40iw_ceq_handler(int irq, void *data)
461{
462 struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
463
464 if (iwceq->irq != irq)
465 i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
466 tasklet_schedule(&iwceq->dpc_tasklet);
467 return IRQ_HANDLED;
468}
469
470/**
471 * i40iw_create_hmc_obj_type - create hmc object of a given type
472 * @dev: hardware control device structure
473 * @info: information for the hmc object to create
474 */
475static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
476 struct i40iw_hmc_create_obj_info *info)
477{
478 return dev->hmc_ops->create_hmc_object(dev, info);
479}
480
481/**
482 * i40iw_create_hmc_objs - create all hmc objects for the device
483 * @iwdev: iwarp device
484 * @is_pf: true if the function is PF otherwise false
485 *
486 * Create the device hmc objects and allocate hmc pages
487 * Return 0 if successful, otherwise clean up and return error
488 */
489static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
490 bool is_pf)
491{
492 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
493 struct i40iw_hmc_create_obj_info info;
494 enum i40iw_status_code status;
495 int i;
496
497 memset(&info, 0, sizeof(info));
498 info.hmc_info = dev->hmc_info;
499 info.is_pf = is_pf;
500 info.entry_type = iwdev->sd_type;
501 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
502 info.rsrc_type = iw_hmc_obj_types[i];
503 info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
504 status = i40iw_create_hmc_obj_type(dev, &info);
505 if (status) {
506 i40iw_pr_err("create obj type %d status = %d\n",
507 iw_hmc_obj_types[i], status);
508 break;
509 }
510 }
511 if (!status)
512 return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
513 dev->hmc_fn_id,
514 true, true));
515
516 while (i) {
517 i--;
518 /* destroy the hmc objects of a given type */
519 i40iw_close_hmc_objects_type(dev,
520 iw_hmc_obj_types[i],
521 dev->hmc_info,
522 is_pf,
523 false);
524 }
525 return status;
526}
527
528/**
529 * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
530 * @iwdev: iwarp device
531 * @memptr: points to the memory addresses
532 * @size: size of memory needed
533 * @mask: mask for the aligned memory
534 *
535 * Get aligned memory of the requested size and
536 * update the memptr to point to the new aligned memory
537 * Return 0 if successful, otherwise return no memory error
538 */
539enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
540 struct i40iw_dma_mem *memptr,
541 u32 size,
542 u32 mask)
543{
544 unsigned long va, newva;
545 unsigned long extra;
546
547 va = (unsigned long)iwdev->obj_next.va;
548 newva = va;
549 if (mask)
550 newva = ALIGN(va, (mask + 1));
551 extra = newva - va;
552 memptr->va = (u8 *)va + extra;
553 memptr->pa = iwdev->obj_next.pa + extra;
554 memptr->size = size;
555 if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
556 return I40IW_ERR_NO_MEMORY;
557
558 iwdev->obj_next.va = memptr->va + size;
559 iwdev->obj_next.pa = memptr->pa + size;
560 return 0;
561}
562
563/**
564 * i40iw_create_cqp - create control qp
565 * @iwdev: iwarp device
566 *
567 * Return 0, if the cqp and all the resources associated with it
568 * are successfully created, otherwise return error
569 */
570static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
571{
572 enum i40iw_status_code status;
573 u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
574 struct i40iw_dma_mem mem;
575 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
576 struct i40iw_cqp_init_info cqp_init_info;
577 struct i40iw_cqp *cqp = &iwdev->cqp;
578 u16 maj_err, min_err;
579 int i;
580
581 cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
582 if (!cqp->cqp_requests)
583 return I40IW_ERR_NO_MEMORY;
584 cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
585 if (!cqp->scratch_array) {
586 kfree(cqp->cqp_requests);
587 return I40IW_ERR_NO_MEMORY;
588 }
589 dev->cqp = &cqp->sc_cqp;
590 dev->cqp->dev = dev;
591 memset(&cqp_init_info, 0, sizeof(cqp_init_info));
592 status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
593 (sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
594 I40IW_CQP_ALIGNMENT);
595 if (status)
596 goto exit;
597 status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
598 I40IW_HOST_CTX_ALIGNMENT_MASK);
599 if (status)
600 goto exit;
601 dev->cqp->host_ctx_pa = mem.pa;
602 dev->cqp->host_ctx = mem.va;
603 /* populate the cqp init info */
604 cqp_init_info.dev = dev;
605 cqp_init_info.sq_size = sqsize;
606 cqp_init_info.sq = cqp->sq.va;
607 cqp_init_info.sq_pa = cqp->sq.pa;
608 cqp_init_info.host_ctx_pa = mem.pa;
609 cqp_init_info.host_ctx = mem.va;
610 cqp_init_info.hmc_profile = iwdev->resource_profile;
611 cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
612 cqp_init_info.scratch_array = cqp->scratch_array;
613 status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
614 if (status) {
615 i40iw_pr_err("cqp init status %d maj_err %d min_err %d\n",
616 status, maj_err, min_err);
617 goto exit;
618 }
619 status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
620 if (status) {
621 i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
622 status, maj_err, min_err);
623 goto exit;
624 }
625 spin_lock_init(&cqp->req_lock);
626 INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
627 INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
628 /* init the waitq of the cqp_requests and add them to the list */
629 for (i = 0; i < I40IW_CQP_SW_SQSIZE_2048; i++) {
630 init_waitqueue_head(&cqp->cqp_requests[i].waitq);
631 list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
632 }
633 return 0;
634exit:
635 /* clean up the created resources */
636 i40iw_destroy_cqp(iwdev, false);
637 return status;
638}
639
640/**
641 * i40iw_create_ccq - create control cq
642 * @iwdev: iwarp device
643 *
644 * Return 0, if the ccq and the resources associated with it
645 * are successfully created, otherwise return error
646 */
647static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
648{
649 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
650 struct i40iw_dma_mem mem;
651 enum i40iw_status_code status;
652 struct i40iw_ccq_init_info info;
653 struct i40iw_ccq *ccq = &iwdev->ccq;
654
655 memset(&info, 0, sizeof(info));
656 dev->ccq = &ccq->sc_cq;
657 dev->ccq->dev = dev;
658 info.dev = dev;
659 ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
660 ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
661 status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
662 ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
663 if (status)
664 goto exit;
665 status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
666 I40IW_SHADOWAREA_MASK);
667 if (status)
668 goto exit;
669 ccq->sc_cq.back_cq = (void *)ccq;
670 /* populate the ccq init info */
671 info.cq_base = ccq->mem_cq.va;
672 info.cq_pa = ccq->mem_cq.pa;
673 info.num_elem = IW_CCQ_SIZE;
674 info.shadow_area = mem.va;
675 info.shadow_area_pa = mem.pa;
676 info.ceqe_mask = false;
677 info.ceq_id_valid = true;
678 info.shadow_read_threshold = 16;
679 status = dev->ccq_ops->ccq_init(dev->ccq, &info);
680 if (!status)
681 status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
682exit:
683 if (status)
684 i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
685 return status;
686}
687
688/**
689 * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
690 * @iwdev: iwarp device
691 * @msix_vec: interrupt vector information
692 * @iwceq: ceq associated with the vector
693 * @ceq_id: the id number of the iwceq
694 *
695 * Allocate interrupt resources and enable irq handling
696 * Return 0 if successful, otherwise return error
697 */
698static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
699 struct i40iw_ceq *iwceq,
700 u32 ceq_id,
701 struct i40iw_msix_vector *msix_vec)
702{
703 enum i40iw_status_code status;
704
705 if (iwdev->msix_shared && !ceq_id) {
706 tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
707 status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
708 } else {
709 tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);
710 status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
711 }
712
713 if (status) {
714 i40iw_pr_err("ceq irq config fail\n");
715 return I40IW_ERR_CONFIG;
716 }
717 msix_vec->ceq_id = ceq_id;
718 msix_vec->cpu_affinity = 0;
719
720 return 0;
721}
722
723/**
724 * i40iw_create_ceq - create completion event queue
725 * @iwdev: iwarp device
726 * @iwceq: pointer to the ceq resources to be created
727 * @ceq_id: the id number of the iwceq
728 *
729 * Return 0, if the ceq and the resources associated with it
730 * are successfully created, otherwise return error
731 */
732static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
733 struct i40iw_ceq *iwceq,
734 u32 ceq_id)
735{
736 enum i40iw_status_code status;
737 struct i40iw_ceq_init_info info;
738 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
739 u64 scratch;
740
741 memset(&info, 0, sizeof(info));
742 info.ceq_id = ceq_id;
743 iwceq->iwdev = iwdev;
744 iwceq->mem.size = sizeof(struct i40iw_ceqe) *
745 iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
746 status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
747 I40IW_CEQ_ALIGNMENT);
748 if (status)
749 goto exit;
750 info.ceq_id = ceq_id;
751 info.ceqe_base = iwceq->mem.va;
752 info.ceqe_pa = iwceq->mem.pa;
753
754 info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
755 iwceq->sc_ceq.ceq_id = ceq_id;
756 info.dev = dev;
757 scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
758 status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
759 if (!status)
760 status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
761
762exit:
763 if (status)
764 i40iw_free_dma_mem(dev->hw, &iwceq->mem);
765 return status;
766}
767
768void i40iw_request_reset(struct i40iw_device *iwdev)
769{
770 struct i40e_info *ldev = iwdev->ldev;
771
772 ldev->ops->request_reset(ldev, iwdev->client, 1);
773}
774
775/**
776 * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
777 * @iwdev: iwarp device
778 * @ldev: i40e lan device
779 *
780 * Allocate a list for all device completion event queues
781 * Create the ceq's and configure their msix interrupt vectors
782 * Return 0, if at least one ceq is successfully set up, otherwise return error
783 */
784static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
785 struct i40e_info *ldev)
786{
787 u32 i;
788 u32 ceq_id;
789 struct i40iw_ceq *iwceq;
790 struct i40iw_msix_vector *msix_vec;
791 enum i40iw_status_code status = 0;
792 u32 num_ceqs;
793
794 if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
795 status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
796 iwdev->iw_qvlist);
797 if (status)
798 goto exit;
799 } else {
800 status = I40IW_ERR_BAD_PTR;
801 goto exit;
802 }
803
804 num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
805 iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
806 if (!iwdev->ceqlist) {
807 status = I40IW_ERR_NO_MEMORY;
808 goto exit;
809 }
810 i = (iwdev->msix_shared) ? 0 : 1;
811 for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
812 iwceq = &iwdev->ceqlist[ceq_id];
813 status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
814 if (status) {
815 i40iw_pr_err("create ceq status = %d\n", status);
816 break;
817 }
818
819 msix_vec = &iwdev->iw_msixtbl[i];
820 iwceq->irq = msix_vec->irq;
821 iwceq->msix_idx = msix_vec->idx;
822 status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
823 if (status) {
824 i40iw_destroy_ceq(iwdev, iwceq, false);
825 break;
826 }
827 i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
828 iwdev->ceqs_count++;
829 }
830
831exit:
832 if (status) {
833 if (!iwdev->ceqs_count) {
834 kfree(iwdev->ceqlist);
835 iwdev->ceqlist = NULL;
836 } else {
837 status = 0;
838 }
839 }
840 return status;
841}
842
843/**
844 * i40iw_configure_aeq_vector - set up the msix vector for aeq
845 * @iwdev: iwarp device
846 *
847 * Allocate interrupt resources and enable irq handling
848 * Return 0 if successful, otherwise return error
849 */
850static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
851{
852 struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
853 u32 ret = 0;
854
855 if (!iwdev->msix_shared) {
856 tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
857 ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
858 }
859 if (ret) {
860 i40iw_pr_err("aeq irq config fail\n");
861 return I40IW_ERR_CONFIG;
862 }
863
864 return 0;
865}
866
867/**
868 * i40iw_create_aeq - create async event queue
869 * @iwdev: iwarp device
870 *
871 * Return 0, if the aeq and the resources associated with it
872 * are successfully created, otherwise return error
873 */
874static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
875{
876 enum i40iw_status_code status;
877 struct i40iw_aeq_init_info info;
878 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
879 struct i40iw_aeq *aeq = &iwdev->aeq;
880 u64 scratch = 0;
881 u32 aeq_size;
882
883 aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
884 iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
885 memset(&info, 0, sizeof(info));
886 aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
887 status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
888 I40IW_AEQ_ALIGNMENT);
889 if (status)
890 goto exit;
891
892 info.aeqe_base = aeq->mem.va;
893 info.aeq_elem_pa = aeq->mem.pa;
894 info.elem_cnt = aeq_size;
895 info.dev = dev;
896 status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
897 if (status)
898 goto exit;
899 status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
900 if (!status)
901 status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
902exit:
903 if (status)
904 i40iw_free_dma_mem(dev->hw, &aeq->mem);
905 return status;
906}
907
908/**
909 * i40iw_setup_aeq - set up the device aeq
910 * @iwdev: iwarp device
911 *
912 * Create the aeq and configure its msix interrupt vector
913 * Return 0 if successful, otherwise return error
914 */
915static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
916{
917 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
918 enum i40iw_status_code status;
919
920 status = i40iw_create_aeq(iwdev);
921 if (status)
922 return status;
923
924 status = i40iw_configure_aeq_vector(iwdev);
925 if (status) {
926 i40iw_destroy_aeq(iwdev, false);
927 return status;
928 }
929
930 if (!iwdev->msix_shared)
931 i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
932 return 0;
933}
934
935/**
936 * i40iw_initialize_ilq - create iwarp local queue for cm
937 * @iwdev: iwarp device
938 *
939 * Return 0 if successful, otherwise return error
940 */
941static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
942{
943 struct i40iw_puda_rsrc_info info;
944 enum i40iw_status_code status;
945
946 info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
947 info.cq_id = 1;
948 info.qp_id = 0;
949 info.count = 1;
950 info.pd_id = 1;
951 info.sq_size = 8192;
952 info.rq_size = 8192;
953 info.buf_size = 1024;
954 info.tx_buf_cnt = 16384;
955 info.mss = iwdev->mss;
956 info.receive = i40iw_receive_ilq;
957 info.xmit_complete = i40iw_free_sqbuf;
958 status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
959 if (status)
960 i40iw_pr_err("ilq create fail\n");
961 return status;
962}
963
964/**
965 * i40iw_initialize_ieq - create iwarp exception queue
966 * @iwdev: iwarp device
967 *
968 * Return 0 if successful, otherwise return error
969 */
970static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
971{
972 struct i40iw_puda_rsrc_info info;
973 enum i40iw_status_code status;
974
975 info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
976 info.cq_id = 2;
977 info.qp_id = iwdev->sc_dev.exception_lan_queue;
978 info.count = 1;
979 info.pd_id = 2;
980 info.sq_size = 8192;
981 info.rq_size = 8192;
982 info.buf_size = 2048;
983 info.mss = iwdev->mss;
984 info.tx_buf_cnt = 16384;
985 status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
986 if (status)
987 i40iw_pr_err("ieq create fail\n");
988 return status;
989}
990
991/**
992 * i40iw_hmc_setup - create hmc objects for the device
993 * @iwdev: iwarp device
994 *
995 * Set up the device private memory space for the number and size of
996 * the hmc objects and create the objects
997 * Return 0 if successful, otherwise return error
998 */
999static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
1000{
1001 enum i40iw_status_code status;
1002
1003 iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
1004 status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
1005 if (status)
1006 goto exit;
1007 status = i40iw_create_hmc_objs(iwdev, true);
1008 if (status)
1009 goto exit;
1010 iwdev->init_state = HMC_OBJS_CREATED;
1011exit:
1012 return status;
1013}
1014
1015/**
1016 * i40iw_del_init_mem - deallocate memory resources
1017 * @iwdev: iwarp device
1018 */
1019static void i40iw_del_init_mem(struct i40iw_device *iwdev)
1020{
1021 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1022
1023 i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
1024 kfree(dev->hmc_info->sd_table.sd_entry);
1025 dev->hmc_info->sd_table.sd_entry = NULL;
1026 kfree(iwdev->mem_resources);
1027 iwdev->mem_resources = NULL;
1028 kfree(iwdev->ceqlist);
1029 iwdev->ceqlist = NULL;
1030 kfree(iwdev->iw_msixtbl);
1031 iwdev->iw_msixtbl = NULL;
1032 kfree(iwdev->hmc_info_mem);
1033 iwdev->hmc_info_mem = NULL;
1034}
1035
1036/**
1037 * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
1038 * @iwdev: iwarp device
1039 * @idx: the index of the mac ip address to delete
1040 */
1041static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
1042{
1043 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1044 struct i40iw_cqp_request *cqp_request;
1045 struct cqp_commands_info *cqp_info;
1046 enum i40iw_status_code status = 0;
1047
1048 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1049 if (!cqp_request) {
1050 i40iw_pr_err("cqp_request memory failed\n");
1051 return;
1052 }
1053 cqp_info = &cqp_request->info;
1054 cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
1055 cqp_info->post_sq = 1;
1056 cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1057 cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1058 cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
1059 cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
1060 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1061 if (status)
1062 i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
1063}
1064
1065/**
1066 * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
1067 * @iwdev: iwarp device
1068 * @mac_addr: pointer to mac address
1069 * @idx: the index of the mac ip address to add
1070 */
1071static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
1072 u8 *mac_addr,
1073 u8 idx)
1074{
1075 struct i40iw_local_mac_ipaddr_entry_info *info;
1076 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1077 struct i40iw_cqp_request *cqp_request;
1078 struct cqp_commands_info *cqp_info;
1079 enum i40iw_status_code status = 0;
1080
1081 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1082 if (!cqp_request) {
1083 i40iw_pr_err("cqp_request memory failed\n");
1084 return I40IW_ERR_NO_MEMORY;
1085 }
1086
1087 cqp_info = &cqp_request->info;
1088
1089 cqp_info->post_sq = 1;
1090 info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
1091 ether_addr_copy(info->mac_addr, mac_addr);
1092 info->entry_idx = idx;
1093 cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1094 cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
1095 cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1096 cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1097 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1098 if (status)
1099 i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
1100 return status;
1101}
1102
1103/**
1104 * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
1105 * @iwdev: iwarp device
1106 * @mac_ip_tbl_idx: the index of the new mac ip address
1107 *
1108 * Allocate a mac ip address entry and update the mac_ip_tbl_idx
1109 * to hold the index of the newly created mac ip address
1110 * Return 0 if successful, otherwise return error
1111 */
1112static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
1113 u16 *mac_ip_tbl_idx)
1114{
1115 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1116 struct i40iw_cqp_request *cqp_request;
1117 struct cqp_commands_info *cqp_info;
1118 enum i40iw_status_code status = 0;
1119
1120 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1121 if (!cqp_request) {
1122 i40iw_pr_err("cqp_request memory failed\n");
1123 return I40IW_ERR_NO_MEMORY;
1124 }
1125
1126 /* increment refcount, because we need the cqp request ret value */
1127 atomic_inc(&cqp_request->refcount);
1128
1129 cqp_info = &cqp_request->info;
1130 cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
1131 cqp_info->post_sq = 1;
1132 cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1133 cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1134 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1135 if (!status)
1136 *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
1137 else
1138 i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
1139 /* decrement refcount and free the cqp request, if no longer used */
1140 i40iw_put_cqp_request(iwcqp, cqp_request);
1141 return status;
1142}
1143
1144/**
1145 * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
1146 * @iwdev: iwarp device
1147 * @macaddr: pointer to mac address
1148 *
1149 * Allocate a mac ip address entry and add it to the hw table
1150 * Return 0 if successful, otherwise return error
1151 */
1152static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
1153 u8 *macaddr)
1154{
1155 enum i40iw_status_code status;
1156
1157 status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
1158 if (!status) {
1159 status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
1160 (u8)iwdev->mac_ip_table_idx);
1161 if (!status)
1162 status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
1163 (u8)iwdev->mac_ip_table_idx);
1164 else
1165 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1166 }
1167 return status;
1168}
1169
1170/**
1171 * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
1172 * @iwdev: iwarp device
1173 */
1174static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
1175{
1176 struct net_device *ip_dev;
1177 struct inet6_dev *idev;
1178 struct inet6_ifaddr *ifp;
1179 __be32 local_ipaddr6[4];
1180
1181 rcu_read_lock();
1182 for_each_netdev_rcu(&init_net, ip_dev) {
1183 if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
1184 (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
1185 (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
1186 idev = __in6_dev_get(ip_dev);
1187 if (!idev) {
1188 i40iw_pr_err("ipv6 inet device not found\n");
1189 break;
1190 }
1191 list_for_each_entry(ifp, &idev->addr_list, if_list) {
1192 i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
1193 rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
1194 i40iw_copy_ip_ntohl(local_ipaddr6,
1195 ifp->addr.in6_u.u6_addr32);
1196 i40iw_manage_arp_cache(iwdev,
1197 ip_dev->dev_addr,
1198 local_ipaddr6,
1199 false,
1200 I40IW_ARP_ADD);
1201 }
1202 }
1203 }
1204 rcu_read_unlock();
1205}
1206
1207/**
1208 * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
1209 * @iwdev: iwarp device
1210 */
1211static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
1212{
1213 struct net_device *dev;
1214 struct in_device *idev;
1215 bool got_lock = true;
1216 u32 ip_addr;
1217
1218 if (!rtnl_trylock())
1219 got_lock = false;
1220
1221 for_each_netdev(&init_net, dev) {
1222 if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
1223 (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
1224 (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
1225 idev = in_dev_get(dev);
1226 for_ifa(idev) {
1227 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
1228 "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
1229 rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
1230
1231 ip_addr = ntohl(ifa->ifa_address);
1232 i40iw_manage_arp_cache(iwdev,
1233 dev->dev_addr,
1234 &ip_addr,
1235 true,
1236 I40IW_ARP_ADD);
1237 }
1238 endfor_ifa(idev);
1239 in_dev_put(idev);
1240 }
1241 }
1242 if (got_lock)
1243 rtnl_unlock();
1244}
1245
1246/**
1247 * i40iw_add_mac_ip - add mac and ip addresses
1248 * @iwdev: iwarp device
1249 *
1250 * Create and add a mac ip address entry to the hw table and
1251 * ipv4/ipv6 addresses to the arp cache
1252 * Return 0 if successful, otherwise return error
1253 */
1254static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
1255{
1256 struct net_device *netdev = iwdev->netdev;
1257 enum i40iw_status_code status;
1258
1259 status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
1260 if (status)
1261 return status;
1262 i40iw_add_ipv4_addr(iwdev);
1263 i40iw_add_ipv6_addr(iwdev);
1264 return 0;
1265}
1266
1267/**
1268 * i40iw_wait_pe_ready - Check if firmware is ready
1269 * @hw: provides access to registers
1270 */
1271static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
1272{
1273 u32 statusfw;
1274 u32 statuscpu0;
1275 u32 statuscpu1;
1276 u32 statuscpu2;
1277 u32 retrycount = 0;
1278
1279 do {
1280 statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
1281 i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
1282 statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
1283 i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
1284 statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
1285 i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
1286 __LINE__, statuscpu1);
1287 statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
1288 i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
1289 __LINE__, statuscpu2);
1290 if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
1291 break; /* SUCCESS */
1292 mdelay(1000);
1293 retrycount++;
1294 } while (retrycount < 14);
1295 i40iw_wr32(hw, 0xb4040, 0x4C104C5);
1296}
1297
1298/**
1299 * i40iw_initialize_dev - initialize device
1300 * @iwdev: iwarp device
1301 * @ldev: lan device information
1302 *
1303 * Allocate memory for the hmc objects and initialize iwdev
1304 * Return 0 if successful, otherwise clean up the resources
1305 * and return error
1306 */
1307static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
1308 struct i40e_info *ldev)
1309{
1310 enum i40iw_status_code status;
1311 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1312 struct i40iw_device_init_info info;
1313 struct i40iw_dma_mem mem;
1314 u32 size;
1315
1316 memset(&info, 0, sizeof(info));
1317 size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
1318 (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
1319 iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
1320 if (!iwdev->hmc_info_mem) {
1321 i40iw_pr_err("memory alloc fail\n");
1322 return I40IW_ERR_NO_MEMORY;
1323 }
1324 iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
1325 dev->hmc_info = &iwdev->hw.hmc;
1326 dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
1327 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
1328 I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
1329 if (status)
1330 goto exit;
1331 info.fpm_query_buf_pa = mem.pa;
1332 info.fpm_query_buf = mem.va;
1333 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
1334 I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
1335 if (status)
1336 goto exit;
1337 info.fpm_commit_buf_pa = mem.pa;
1338 info.fpm_commit_buf = mem.va;
1339 info.hmc_fn_id = ldev->fid;
1340 info.is_pf = (ldev->ftype) ? false : true;
1341 info.bar0 = ldev->hw_addr;
1342 info.hw = &iwdev->hw;
1343 info.debug_mask = debug;
1344 info.qs_handle = ldev->params.qos.prio_qos[0].qs_handle;
1345 info.exception_lan_queue = 1;
1346 info.vchnl_send = i40iw_virtchnl_send;
1347 status = i40iw_device_init(&iwdev->sc_dev, &info);
1348exit:
1349 if (status) {
1350 kfree(iwdev->hmc_info_mem);
1351 iwdev->hmc_info_mem = NULL;
1352 }
1353 return status;
1354}
1355
1356/**
1357 * i40iw_register_notifiers - register tcp ip notifiers
1358 */
1359static void i40iw_register_notifiers(void)
1360{
1361 if (!i40iw_notifiers_registered) {
1362 register_inetaddr_notifier(&i40iw_inetaddr_notifier);
1363 register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1364 register_netevent_notifier(&i40iw_net_notifier);
1365 }
1366 i40iw_notifiers_registered++;
1367}
1368
1369/**
1370 * i40iw_save_msix_info - copy msix vector information to iwarp device
1371 * @iwdev: iwarp device
1372 * @ldev: lan device information
1373 *
1374 * Allocate iwdev msix table and copy the ldev msix info to the table
1375 * Return 0 if successful, otherwise return error
1376 */
1377static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
1378 struct i40e_info *ldev)
1379{
1380 struct i40e_qvlist_info *iw_qvlist;
1381 struct i40e_qv_info *iw_qvinfo;
1382 u32 ceq_idx;
1383 u32 i;
1384 u32 size;
1385
1386 iwdev->msix_count = ldev->msix_count;
1387
1388 size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
1389 size += sizeof(struct i40e_qvlist_info);
1390 size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
1391 iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
1392
1393 if (!iwdev->iw_msixtbl)
1394 return I40IW_ERR_NO_MEMORY;
1395 iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
1396 iw_qvlist = iwdev->iw_qvlist;
1397 iw_qvinfo = iw_qvlist->qv_info;
1398 iw_qvlist->num_vectors = iwdev->msix_count;
1399 if (iwdev->msix_count <= num_online_cpus())
1400 iwdev->msix_shared = true;
1401 for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
1402 iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
1403 iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
1404 if (i == 0) {
1405 iw_qvinfo->aeq_idx = 0;
1406 if (iwdev->msix_shared)
1407 iw_qvinfo->ceq_idx = ceq_idx++;
1408 else
1409 iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
1410 } else {
1411 iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
1412 iw_qvinfo->ceq_idx = ceq_idx++;
1413 }
1414 iw_qvinfo->itr_idx = 3;
1415 iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
1416 }
1417 return 0;
1418}
1419
1420/**
1421 * i40iw_deinit_device - clean up the device resources
1422 * @iwdev: iwarp device
1423 * @reset: true if called before reset
1424 * @del_hdl: true if delete hdl entry
1425 *
1426 * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
1427 * destroy the device queues and free the pble and the hmc objects
1428 */
1429static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del_hdl)
1430{
1431 struct i40e_info *ldev = iwdev->ldev;
1432
1433 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1434
1435 i40iw_pr_info("state = %d\n", iwdev->init_state);
1436
1437 switch (iwdev->init_state) {
1438 case RDMA_DEV_REGISTERED:
1439 iwdev->iw_status = 0;
1440 i40iw_port_ibevent(iwdev);
1441 i40iw_destroy_rdma_device(iwdev->iwibdev);
1442 /* fallthrough */
1443 case IP_ADDR_REGISTERED:
1444 if (!reset)
1445 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1446 /* fallthrough */
1447 case INET_NOTIFIER:
1448 if (i40iw_notifiers_registered > 0) {
1449 i40iw_notifiers_registered--;
1450 unregister_netevent_notifier(&i40iw_net_notifier);
1451 unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
1452 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1453 }
1454 /* fallthrough */
1455 case CEQ_CREATED:
1456 i40iw_dele_ceqs(iwdev, reset);
1457 /* fallthrough */
1458 case AEQ_CREATED:
1459 i40iw_destroy_aeq(iwdev, reset);
1460 /* fallthrough */
1461 case IEQ_CREATED:
1462 i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
1463 /* fallthrough */
1464 case ILQ_CREATED:
1465 i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
1466 /* fallthrough */
1467 case CCQ_CREATED:
1468 i40iw_destroy_ccq(iwdev, reset);
1469 /* fallthrough */
1470 case PBLE_CHUNK_MEM:
1471 i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
1472 /* fallthrough */
1473 case HMC_OBJS_CREATED:
1474 i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
1475 /* fallthrough */
1476 case CQP_CREATED:
1477 i40iw_destroy_cqp(iwdev, !reset);
1478 /* fallthrough */
1479 case INITIAL_STATE:
1480 i40iw_cleanup_cm_core(&iwdev->cm_core);
1481 if (dev->is_pf)
1482 i40iw_hw_stats_del_timer(dev);
1483
1484 i40iw_del_init_mem(iwdev);
1485 break;
1486 case INVALID_STATE:
1487 /* fallthrough */
1488 default:
1489 i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
1490 break;
1491 }
1492
1493 if (del_hdl)
1494 i40iw_del_handler(i40iw_find_i40e_handler(ldev));
1495 kfree(iwdev->hdl);
1496}
1497
1498/**
1499 * i40iw_setup_init_state - set up the initial device struct
1500 * @hdl: handler for iwarp device - one per instance
1501 * @ldev: lan device information
1502 * @client: iwarp client information, provided during registration
1503 *
1504 * Initialize the iwarp device and its hdl information
1505 * using the ldev and client information
1506 * Return 0 if successful, otherwise return error
1507 */
1508static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
1509 struct i40e_info *ldev,
1510 struct i40e_client *client)
1511{
1512 struct i40iw_device *iwdev = &hdl->device;
1513 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1514 enum i40iw_status_code status;
1515
1516 memcpy(&hdl->ldev, ldev, sizeof(*ldev));
1517 if (resource_profile == 1)
1518 resource_profile = 2;
1519
1520 iwdev->mpa_version = mpa_version;
1521 iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
1522 (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
1523 I40IW_HMC_PROFILE_DEFAULT;
1524 iwdev->max_rdma_vfs =
1525 (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0;
1526 iwdev->netdev = ldev->netdev;
1527 hdl->client = client;
1528 iwdev->mss = (!ldev->params.mtu) ? I40IW_DEFAULT_MSS : ldev->params.mtu - I40IW_MTU_TO_MSS;
1529 if (!ldev->ftype)
1530 iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
1531 else
1532 iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
1533
1534 status = i40iw_save_msix_info(iwdev, ldev);
1535 if (status)
1536 goto exit;
1537 iwdev->hw.dev_context = (void *)ldev->pcidev;
1538 iwdev->hw.hw_addr = ldev->hw_addr;
1539 status = i40iw_allocate_dma_mem(&iwdev->hw,
1540 &iwdev->obj_mem, 8192, 4096);
1541 if (status)
1542 goto exit;
1543 iwdev->obj_next = iwdev->obj_mem;
1544 iwdev->push_mode = push_mode;
1545 init_waitqueue_head(&iwdev->vchnl_waitq);
1546 status = i40iw_initialize_dev(iwdev, ldev);
1547exit:
1548 if (status) {
1549 kfree(iwdev->iw_msixtbl);
1550 i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
1551 iwdev->iw_msixtbl = NULL;
1552 }
1553 return status;
1554}
1555
1556/**
1557 * i40iw_open - client interface operation open for iwarp/uda device
1558 * @ldev: lan device information
1559 * @client: iwarp client information, provided during registration
1560 *
1561 * Called by the lan driver during the processing of client register
1562 * Create device resources, set up queues, pble and hmc objects and
1563 * register the device with the ib verbs interface
1564 * Return 0 if successful, otherwise return error
1565 */
1566static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
1567{
1568 struct i40iw_device *iwdev;
1569 struct i40iw_sc_dev *dev;
1570 enum i40iw_status_code status;
1571 struct i40iw_handler *hdl;
1572
1573 hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
1574 if (!hdl)
1575 return -ENOMEM;
1576 iwdev = &hdl->device;
1577 iwdev->hdl = hdl;
1578 dev = &iwdev->sc_dev;
1579 i40iw_setup_cm_core(iwdev);
1580
1581 dev->back_dev = (void *)iwdev;
1582 iwdev->ldev = &hdl->ldev;
1583 iwdev->client = client;
1584 mutex_init(&iwdev->pbl_mutex);
1585 i40iw_add_handler(hdl);
1586
1587 do {
1588 status = i40iw_setup_init_state(hdl, ldev, client);
1589 if (status)
1590 break;
1591 iwdev->init_state = INITIAL_STATE;
1592 if (dev->is_pf)
1593 i40iw_wait_pe_ready(dev->hw);
1594 status = i40iw_create_cqp(iwdev);
1595 if (status)
1596 break;
1597 iwdev->init_state = CQP_CREATED;
1598 status = i40iw_hmc_setup(iwdev);
1599 if (status)
1600 break;
1601 status = i40iw_create_ccq(iwdev);
1602 if (status)
1603 break;
1604 iwdev->init_state = CCQ_CREATED;
1605 status = i40iw_initialize_ilq(iwdev);
1606 if (status)
1607 break;
1608 iwdev->init_state = ILQ_CREATED;
1609 status = i40iw_initialize_ieq(iwdev);
1610 if (status)
1611 break;
1612 iwdev->init_state = IEQ_CREATED;
1613 status = i40iw_setup_aeq(iwdev);
1614 if (status)
1615 break;
1616 iwdev->init_state = AEQ_CREATED;
1617 status = i40iw_setup_ceqs(iwdev, ldev);
1618 if (status)
1619 break;
1620 iwdev->init_state = CEQ_CREATED;
1621 status = i40iw_initialize_hw_resources(iwdev);
1622 if (status)
1623 break;
1624 dev->ccq_ops->ccq_arm(dev->ccq);
1625 status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
1626 if (status)
1627 break;
1628 iwdev->virtchnl_wq = create_singlethread_workqueue("iwvch");
1629 i40iw_register_notifiers();
1630 iwdev->init_state = INET_NOTIFIER;
1631 status = i40iw_add_mac_ip(iwdev);
1632 if (status)
1633 break;
1634 iwdev->init_state = IP_ADDR_REGISTERED;
1635 if (i40iw_register_rdma_device(iwdev)) {
1636 i40iw_pr_err("register rdma device fail\n");
1637 break;
1638 };
1639
1640 iwdev->init_state = RDMA_DEV_REGISTERED;
1641 iwdev->iw_status = 1;
1642 i40iw_port_ibevent(iwdev);
1643 i40iw_pr_info("i40iw_open completed\n");
1644 return 0;
1645 } while (0);
1646
1647 i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
1648 i40iw_deinit_device(iwdev, false, false);
1649 return -ERESTART;
1650}
1651
1652/**
1653 * i40iw_l2param_change : handle qs handles for qos and mss change
1654 * @ldev: lan device information
1655 * @client: client for paramater change
1656 * @params: new parameters from L2
1657 */
1658static void i40iw_l2param_change(struct i40e_info *ldev,
1659 struct i40e_client *client,
1660 struct i40e_params *params)
1661{
1662 struct i40iw_handler *hdl;
1663 struct i40iw_device *iwdev;
1664
1665 hdl = i40iw_find_i40e_handler(ldev);
1666 if (!hdl)
1667 return;
1668
1669 iwdev = &hdl->device;
1670 if (params->mtu)
1671 iwdev->mss = params->mtu - I40IW_MTU_TO_MSS;
1672}
1673
1674/**
1675 * i40iw_close - client interface operation close for iwarp/uda device
1676 * @ldev: lan device information
1677 * @client: client to close
1678 *
1679 * Called by the lan driver during the processing of client unregister
1680 * Destroy and clean up the driver resources
1681 */
1682static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
1683{
1684 struct i40iw_device *iwdev;
1685 struct i40iw_handler *hdl;
1686
1687 hdl = i40iw_find_i40e_handler(ldev);
1688 if (!hdl)
1689 return;
1690
1691 iwdev = &hdl->device;
1692 destroy_workqueue(iwdev->virtchnl_wq);
1693 i40iw_deinit_device(iwdev, reset, true);
1694}
1695
1696/**
1697 * i40iw_vf_reset - process VF reset
1698 * @ldev: lan device information
1699 * @client: client interface instance
1700 * @vf_id: virtual function id
1701 *
1702 * Called when a VF is reset by the PF
1703 * Destroy and clean up the VF resources
1704 */
1705static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
1706{
1707 struct i40iw_handler *hdl;
1708 struct i40iw_sc_dev *dev;
1709 struct i40iw_hmc_fcn_info hmc_fcn_info;
1710 struct i40iw_virt_mem vf_dev_mem;
1711 struct i40iw_vfdev *tmp_vfdev;
1712 unsigned int i;
1713 unsigned long flags;
1714
1715 hdl = i40iw_find_i40e_handler(ldev);
1716 if (!hdl)
1717 return;
1718
1719 dev = &hdl->device.sc_dev;
1720
1721 for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
1722 if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
1723 continue;
1724
1725 /* free all resources allocated on behalf of vf */
1726 tmp_vfdev = dev->vf_dev[i];
1727 spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
1728 dev->vf_dev[i] = NULL;
1729 spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
1730 i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
1731 /* remove vf hmc function */
1732 memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
1733 hmc_fcn_info.vf_id = vf_id;
1734 hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
1735 hmc_fcn_info.free_fcn = true;
1736 i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
1737 /* free vf_dev */
1738 vf_dev_mem.va = tmp_vfdev;
1739 vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
1740 sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
1741 i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
1742 break;
1743 }
1744}
1745
1746/**
1747 * i40iw_vf_enable - enable a number of VFs
1748 * @ldev: lan device information
1749 * @client: client interface instance
1750 * @num_vfs: number of VFs for the PF
1751 *
1752 * Called when the number of VFs changes
1753 */
1754static void i40iw_vf_enable(struct i40e_info *ldev,
1755 struct i40e_client *client,
1756 u32 num_vfs)
1757{
1758 struct i40iw_handler *hdl;
1759
1760 hdl = i40iw_find_i40e_handler(ldev);
1761 if (!hdl)
1762 return;
1763
1764 if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
1765 hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
1766 else
1767 hdl->device.max_enabled_vfs = num_vfs;
1768}
1769
1770/**
1771 * i40iw_vf_capable - check if VF capable
1772 * @ldev: lan device information
1773 * @client: client interface instance
1774 * @vf_id: virtual function id
1775 *
1776 * Return 1 if a VF slot is available or if VF is already RDMA enabled
1777 * Return 0 otherwise
1778 */
1779static int i40iw_vf_capable(struct i40e_info *ldev,
1780 struct i40e_client *client,
1781 u32 vf_id)
1782{
1783 struct i40iw_handler *hdl;
1784 struct i40iw_sc_dev *dev;
1785 unsigned int i;
1786
1787 hdl = i40iw_find_i40e_handler(ldev);
1788 if (!hdl)
1789 return 0;
1790
1791 dev = &hdl->device.sc_dev;
1792
1793 for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
1794 if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
1795 return 1;
1796 }
1797
1798 return 0;
1799}
1800
1801/**
1802 * i40iw_virtchnl_receive - receive a message through the virtual channel
1803 * @ldev: lan device information
1804 * @client: client interface instance
1805 * @vf_id: virtual function id associated with the message
1806 * @msg: message buffer pointer
1807 * @len: length of the message
1808 *
1809 * Invoke virtual channel receive operation for the given msg
1810 * Return 0 if successful, otherwise return error
1811 */
1812static int i40iw_virtchnl_receive(struct i40e_info *ldev,
1813 struct i40e_client *client,
1814 u32 vf_id,
1815 u8 *msg,
1816 u16 len)
1817{
1818 struct i40iw_handler *hdl;
1819 struct i40iw_sc_dev *dev;
1820 struct i40iw_device *iwdev;
1821 int ret_code = I40IW_NOT_SUPPORTED;
1822
1823 if (!len || !msg)
1824 return I40IW_ERR_PARAM;
1825
1826 hdl = i40iw_find_i40e_handler(ldev);
1827 if (!hdl)
1828 return I40IW_ERR_PARAM;
1829
1830 dev = &hdl->device.sc_dev;
1831 iwdev = dev->back_dev;
1832
1833 i40iw_debug(dev, I40IW_DEBUG_VIRT, "msg %p, message length %u\n", msg, len);
1834
1835 if (dev->vchnl_if.vchnl_recv) {
1836 ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
1837 if (!dev->is_pf) {
1838 atomic_dec(&iwdev->vchnl_msgs);
1839 wake_up(&iwdev->vchnl_waitq);
1840 }
1841 }
1842 return ret_code;
1843}
1844
1845/**
1846 * i40iw_virtchnl_send - send a message through the virtual channel
1847 * @dev: iwarp device
1848 * @vf_id: virtual function id associated with the message
1849 * @msg: virtual channel message buffer pointer
1850 * @len: length of the message
1851 *
1852 * Invoke virtual channel send operation for the given msg
1853 * Return 0 if successful, otherwise return error
1854 */
1855static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
1856 u32 vf_id,
1857 u8 *msg,
1858 u16 len)
1859{
1860 struct i40iw_device *iwdev;
1861 struct i40e_info *ldev;
1862 enum i40iw_status_code ret_code = I40IW_ERR_BAD_PTR;
1863
1864 if (!dev || !dev->back_dev)
1865 return ret_code;
1866
1867 iwdev = dev->back_dev;
1868 ldev = iwdev->ldev;
1869
1870 if (ldev && ldev->ops && ldev->ops->virtchnl_send)
1871 ret_code = ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
1872
1873 return ret_code;
1874}
1875
1876/* client interface functions */
1877static struct i40e_client_ops i40e_ops = {
1878 .open = i40iw_open,
1879 .close = i40iw_close,
1880 .l2_param_change = i40iw_l2param_change,
1881 .virtchnl_receive = i40iw_virtchnl_receive,
1882 .vf_reset = i40iw_vf_reset,
1883 .vf_enable = i40iw_vf_enable,
1884 .vf_capable = i40iw_vf_capable
1885};
1886
1887/**
1888 * i40iw_init_module - driver initialization function
1889 *
1890 * First function to call when the driver is loaded
1891 * Register the driver as i40e client and port mapper client
1892 */
1893static int __init i40iw_init_module(void)
1894{
1895 int ret;
1896
1897 memset(&i40iw_client, 0, sizeof(i40iw_client));
1898 i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
1899 i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
1900 i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
1901 i40iw_client.ops = &i40e_ops;
1902 memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
1903 i40iw_client.type = I40E_CLIENT_IWARP;
1904 spin_lock_init(&i40iw_handler_lock);
1905 ret = i40e_register_client(&i40iw_client);
1906 ret = iwpm_init(RDMA_NL_I40IW);
1907 if (ret)
1908 i40iw_pr_err("Port mapper initialization failed\n");
1909 ret = ibnl_add_client(RDMA_NL_I40IW, RDMA_NL_IWPM_NUM_OPS,
1910 i40iw_nl_cb_table);
1911 if (ret)
1912 i40iw_pr_err("Failed to add netlink callback\n");
1913 return ret;
1914}
1915
1916/**
1917 * i40iw_exit_module - driver exit clean up function
1918 *
1919 * The function is called just before the driver is unloaded
1920 * Unregister the driver as i40e client and port mapper client
1921 */
1922static void __exit i40iw_exit_module(void)
1923{
1924 i40e_unregister_client(&i40iw_client);
1925 ibnl_remove_client(RDMA_NL_I40IW);
1926 iwpm_exit(RDMA_NL_I40IW);
1927}
1928
1929module_init(i40iw_init_module);
1930module_exit(i40iw_exit_module);