]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/qlogic/qed/qed_dev.c
qede: Add basic Network driver
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_dev.c
CommitLineData
fe56b9e6
YM
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <linux/io.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/mutex.h>
17#include <linux/pci.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/etherdevice.h>
21#include <linux/qed/qed_chain.h>
22#include <linux/qed/qed_if.h>
23#include "qed.h"
24#include "qed_cxt.h"
25#include "qed_dev_api.h"
26#include "qed_hsi.h"
27#include "qed_hw.h"
28#include "qed_init_ops.h"
29#include "qed_int.h"
30#include "qed_mcp.h"
31#include "qed_reg_addr.h"
32#include "qed_sp.h"
33
34/* API common to all protocols */
35void qed_init_dp(struct qed_dev *cdev,
36 u32 dp_module, u8 dp_level)
37{
38 u32 i;
39
40 cdev->dp_level = dp_level;
41 cdev->dp_module = dp_module;
42 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
43 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
44
45 p_hwfn->dp_level = dp_level;
46 p_hwfn->dp_module = dp_module;
47 }
48}
49
50void qed_init_struct(struct qed_dev *cdev)
51{
52 u8 i;
53
54 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
55 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
56
57 p_hwfn->cdev = cdev;
58 p_hwfn->my_id = i;
59 p_hwfn->b_active = false;
60
61 mutex_init(&p_hwfn->dmae_info.mutex);
62 }
63
64 /* hwfn 0 is always active */
65 cdev->hwfns[0].b_active = true;
66
67 /* set the default cache alignment to 128 */
68 cdev->cache_shift = 7;
69}
70
71static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
72{
73 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
74
75 kfree(qm_info->qm_pq_params);
76 qm_info->qm_pq_params = NULL;
77 kfree(qm_info->qm_vport_params);
78 qm_info->qm_vport_params = NULL;
79 kfree(qm_info->qm_port_params);
80 qm_info->qm_port_params = NULL;
81}
82
83void qed_resc_free(struct qed_dev *cdev)
84{
85 int i;
86
87 kfree(cdev->fw_data);
88 cdev->fw_data = NULL;
89
90 kfree(cdev->reset_stats);
91
25c089d7
YM
92 for_each_hwfn(cdev, i) {
93 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
94
95 kfree(p_hwfn->p_tx_cids);
96 p_hwfn->p_tx_cids = NULL;
97 kfree(p_hwfn->p_rx_cids);
98 p_hwfn->p_rx_cids = NULL;
99 }
100
fe56b9e6
YM
101 for_each_hwfn(cdev, i) {
102 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
103
104 qed_cxt_mngr_free(p_hwfn);
105 qed_qm_info_free(p_hwfn);
106 qed_spq_free(p_hwfn);
107 qed_eq_free(p_hwfn, p_hwfn->p_eq);
108 qed_consq_free(p_hwfn, p_hwfn->p_consq);
109 qed_int_free(p_hwfn);
110 qed_dmae_info_free(p_hwfn);
111 }
112}
113
114static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
115{
116 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
117 struct init_qm_port_params *p_qm_port;
118 u8 num_vports, i, vport_id, num_ports;
119 u16 num_pqs, multi_cos_tcs = 1;
120
121 memset(qm_info, 0, sizeof(*qm_info));
122
123 num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
124 num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
125
126 /* Sanity checking that setup requires legal number of resources */
127 if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
128 DP_ERR(p_hwfn,
129 "Need too many Physical queues - 0x%04x when only %04x are available\n",
130 num_pqs, RESC_NUM(p_hwfn, QED_PQ));
131 return -EINVAL;
132 }
133
134 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
135 */
136 qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
137 num_pqs, GFP_ATOMIC);
138 if (!qm_info->qm_pq_params)
139 goto alloc_err;
140
141 qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
142 num_vports, GFP_ATOMIC);
143 if (!qm_info->qm_vport_params)
144 goto alloc_err;
145
146 qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
147 MAX_NUM_PORTS, GFP_ATOMIC);
148 if (!qm_info->qm_port_params)
149 goto alloc_err;
150
151 vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
152
153 /* First init per-TC PQs */
154 for (i = 0; i < multi_cos_tcs; i++) {
155 struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
156
157 params->vport_id = vport_id;
158 params->tc_id = p_hwfn->hw_info.non_offload_tc;
159 params->wrr_group = 1;
160 }
161
162 /* Then init pure-LB PQ */
163 qm_info->pure_lb_pq = i;
164 qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
165 qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
166 qm_info->qm_pq_params[i].wrr_group = 1;
167 i++;
168
169 qm_info->offload_pq = 0;
170 qm_info->num_pqs = num_pqs;
171 qm_info->num_vports = num_vports;
172
173 /* Initialize qm port parameters */
174 num_ports = p_hwfn->cdev->num_ports_in_engines;
175 for (i = 0; i < num_ports; i++) {
176 p_qm_port = &qm_info->qm_port_params[i];
177 p_qm_port->active = 1;
178 p_qm_port->num_active_phys_tcs = 4;
179 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
180 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
181 }
182
183 qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
184
185 qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
186
187 qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
188
189 qm_info->pf_wfq = 0;
190 qm_info->pf_rl = 0;
191 qm_info->vport_rl_en = 1;
192
193 return 0;
194
195alloc_err:
196 DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
197 kfree(qm_info->qm_pq_params);
198 kfree(qm_info->qm_vport_params);
199 kfree(qm_info->qm_port_params);
200
201 return -ENOMEM;
202}
203
204int qed_resc_alloc(struct qed_dev *cdev)
205{
206 struct qed_consq *p_consq;
207 struct qed_eq *p_eq;
208 int i, rc = 0;
209
210 cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
211 if (!cdev->fw_data)
212 return -ENOMEM;
213
25c089d7
YM
214 /* Allocate Memory for the Queue->CID mapping */
215 for_each_hwfn(cdev, i) {
216 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
217 int tx_size = sizeof(struct qed_hw_cid_data) *
218 RESC_NUM(p_hwfn, QED_L2_QUEUE);
219 int rx_size = sizeof(struct qed_hw_cid_data) *
220 RESC_NUM(p_hwfn, QED_L2_QUEUE);
221
222 p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
223 if (!p_hwfn->p_tx_cids) {
224 DP_NOTICE(p_hwfn,
225 "Failed to allocate memory for Tx Cids\n");
226 goto alloc_err;
227 }
228
229 p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
230 if (!p_hwfn->p_rx_cids) {
231 DP_NOTICE(p_hwfn,
232 "Failed to allocate memory for Rx Cids\n");
233 goto alloc_err;
234 }
235 }
236
fe56b9e6
YM
237 for_each_hwfn(cdev, i) {
238 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
239
240 /* First allocate the context manager structure */
241 rc = qed_cxt_mngr_alloc(p_hwfn);
242 if (rc)
243 goto alloc_err;
244
245 /* Set the HW cid/tid numbers (in the contest manager)
246 * Must be done prior to any further computations.
247 */
248 rc = qed_cxt_set_pf_params(p_hwfn);
249 if (rc)
250 goto alloc_err;
251
252 /* Prepare and process QM requirements */
253 rc = qed_init_qm_info(p_hwfn);
254 if (rc)
255 goto alloc_err;
256
257 /* Compute the ILT client partition */
258 rc = qed_cxt_cfg_ilt_compute(p_hwfn);
259 if (rc)
260 goto alloc_err;
261
262 /* CID map / ILT shadow table / T2
263 * The talbes sizes are determined by the computations above
264 */
265 rc = qed_cxt_tables_alloc(p_hwfn);
266 if (rc)
267 goto alloc_err;
268
269 /* SPQ, must follow ILT because initializes SPQ context */
270 rc = qed_spq_alloc(p_hwfn);
271 if (rc)
272 goto alloc_err;
273
274 /* SP status block allocation */
275 p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
276 RESERVED_PTT_DPC);
277
278 rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
279 if (rc)
280 goto alloc_err;
281
282 /* EQ */
283 p_eq = qed_eq_alloc(p_hwfn, 256);
284
285 if (!p_eq)
286 goto alloc_err;
287 p_hwfn->p_eq = p_eq;
288
289 p_consq = qed_consq_alloc(p_hwfn);
290 if (!p_consq)
291 goto alloc_err;
292 p_hwfn->p_consq = p_consq;
293
294 /* DMA info initialization */
295 rc = qed_dmae_info_alloc(p_hwfn);
296 if (rc) {
297 DP_NOTICE(p_hwfn,
298 "Failed to allocate memory for dmae_info structure\n");
299 goto alloc_err;
300 }
301 }
302
303 cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
304 if (!cdev->reset_stats) {
305 DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
306 goto alloc_err;
307 }
308
309 return 0;
310
311alloc_err:
312 qed_resc_free(cdev);
313 return rc;
314}
315
316void qed_resc_setup(struct qed_dev *cdev)
317{
318 int i;
319
320 for_each_hwfn(cdev, i) {
321 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
322
323 qed_cxt_mngr_setup(p_hwfn);
324 qed_spq_setup(p_hwfn);
325 qed_eq_setup(p_hwfn, p_hwfn->p_eq);
326 qed_consq_setup(p_hwfn, p_hwfn->p_consq);
327
328 /* Read shadow of current MFW mailbox */
329 qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
330 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
331 p_hwfn->mcp_info->mfw_mb_cur,
332 p_hwfn->mcp_info->mfw_mb_length);
333
334 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
335 }
336}
337
338#define FINAL_CLEANUP_CMD_OFFSET (0)
339#define FINAL_CLEANUP_CMD (0x1)
340#define FINAL_CLEANUP_VALID_OFFSET (6)
341#define FINAL_CLEANUP_VFPF_ID_SHIFT (7)
342#define FINAL_CLEANUP_COMP (0x2)
343#define FINAL_CLEANUP_POLL_CNT (100)
344#define FINAL_CLEANUP_POLL_TIME (10)
345int qed_final_cleanup(struct qed_hwfn *p_hwfn,
346 struct qed_ptt *p_ptt,
347 u16 id)
348{
349 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
350 int rc = -EBUSY;
351
352 addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
353
354 command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
355 command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
356 command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
357 command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
358
359 /* Make sure notification is not set before initiating final cleanup */
360 if (REG_RD(p_hwfn, addr)) {
361 DP_NOTICE(
362 p_hwfn,
363 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
364 REG_WR(p_hwfn, addr, 0);
365 }
366
367 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
368 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
369 id, command);
370
371 qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
372
373 /* Poll until completion */
374 while (!REG_RD(p_hwfn, addr) && count--)
375 msleep(FINAL_CLEANUP_POLL_TIME);
376
377 if (REG_RD(p_hwfn, addr))
378 rc = 0;
379 else
380 DP_NOTICE(p_hwfn,
381 "Failed to receive FW final cleanup notification\n");
382
383 /* Cleanup afterwards */
384 REG_WR(p_hwfn, addr, 0);
385
386 return rc;
387}
388
389static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
390{
391 int hw_mode = 0;
392
393 hw_mode = (1 << MODE_BB_A0);
394
395 switch (p_hwfn->cdev->num_ports_in_engines) {
396 case 1:
397 hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
398 break;
399 case 2:
400 hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
401 break;
402 case 4:
403 hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
404 break;
405 default:
406 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
407 p_hwfn->cdev->num_ports_in_engines);
408 return;
409 }
410
411 switch (p_hwfn->cdev->mf_mode) {
412 case SF:
413 hw_mode |= 1 << MODE_SF;
414 break;
415 case MF_OVLAN:
416 hw_mode |= 1 << MODE_MF_SD;
417 break;
418 case MF_NPAR:
419 hw_mode |= 1 << MODE_MF_SI;
420 break;
421 default:
422 DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
423 hw_mode |= 1 << MODE_SF;
424 }
425
426 hw_mode |= 1 << MODE_ASIC;
427
428 p_hwfn->hw_info.hw_mode = hw_mode;
429}
430
431/* Init run time data for all PFs on an engine. */
432static void qed_init_cau_rt_data(struct qed_dev *cdev)
433{
434 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
435 int i, sb_id;
436
437 for_each_hwfn(cdev, i) {
438 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
439 struct qed_igu_info *p_igu_info;
440 struct qed_igu_block *p_block;
441 struct cau_sb_entry sb_entry;
442
443 p_igu_info = p_hwfn->hw_info.p_igu_info;
444
445 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
446 sb_id++) {
447 p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
448 if (!p_block->is_pf)
449 continue;
450
451 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
452 p_block->function_id,
453 0, 0);
454 STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
455 sb_entry);
456 }
457 }
458}
459
460static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
461 struct qed_ptt *p_ptt,
462 int hw_mode)
463{
464 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
465 struct qed_qm_common_rt_init_params params;
466 struct qed_dev *cdev = p_hwfn->cdev;
467 int rc = 0;
468
469 qed_init_cau_rt_data(cdev);
470
471 /* Program GTT windows */
472 qed_gtt_init(p_hwfn);
473
474 if (p_hwfn->mcp_info) {
475 if (p_hwfn->mcp_info->func_info.bandwidth_max)
476 qm_info->pf_rl_en = 1;
477 if (p_hwfn->mcp_info->func_info.bandwidth_min)
478 qm_info->pf_wfq_en = 1;
479 }
480
481 memset(&params, 0, sizeof(params));
482 params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
483 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
484 params.pf_rl_en = qm_info->pf_rl_en;
485 params.pf_wfq_en = qm_info->pf_wfq_en;
486 params.vport_rl_en = qm_info->vport_rl_en;
487 params.vport_wfq_en = qm_info->vport_wfq_en;
488 params.port_params = qm_info->qm_port_params;
489
490 qed_qm_common_rt_init(p_hwfn, &params);
491
492 qed_cxt_hw_init_common(p_hwfn);
493
494 /* Close gate from NIG to BRB/Storm; By default they are open, but
495 * we close them to prevent NIG from passing data to reset blocks.
496 * Should have been done in the ENGINE phase, but init-tool lacks
497 * proper port-pretend capabilities.
498 */
499 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
500 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
501 qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
502 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
503 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
504 qed_port_unpretend(p_hwfn, p_ptt);
505
506 rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
507 if (rc != 0)
508 return rc;
509
510 qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
511 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
512
513 /* Disable relaxed ordering in the PCI config space */
514 qed_wr(p_hwfn, p_ptt, 0x20b4,
515 qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
516
517 return rc;
518}
519
520static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
521 struct qed_ptt *p_ptt,
522 int hw_mode)
523{
524 int rc = 0;
525
526 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
527 hw_mode);
528 return rc;
529}
530
531static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
532 struct qed_ptt *p_ptt,
533 int hw_mode,
534 bool b_hw_start,
535 enum qed_int_mode int_mode,
536 bool allow_npar_tx_switch)
537{
538 u8 rel_pf_id = p_hwfn->rel_pf_id;
539 int rc = 0;
540
541 if (p_hwfn->mcp_info) {
542 struct qed_mcp_function_info *p_info;
543
544 p_info = &p_hwfn->mcp_info->func_info;
545 if (p_info->bandwidth_min)
546 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
547
548 /* Update rate limit once we'll actually have a link */
549 p_hwfn->qm_info.pf_rl = 100;
550 }
551
552 qed_cxt_hw_init_pf(p_hwfn);
553
554 qed_int_igu_init_rt(p_hwfn);
555
556 /* Set VLAN in NIG if needed */
557 if (hw_mode & (1 << MODE_MF_SD)) {
558 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
559 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
560 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
561 p_hwfn->hw_info.ovlan);
562 }
563
564 /* Enable classification by MAC if needed */
565 if (hw_mode & MODE_MF_SI) {
566 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
567 "Configuring TAGMAC_CLS_TYPE\n");
568 STORE_RT_REG(p_hwfn,
569 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
570 }
571
572 /* Protocl Configuration */
573 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
574 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
575 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
576
577 /* Cleanup chip from previous driver if such remains exist */
578 rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
579 if (rc != 0)
580 return rc;
581
582 /* PF Init sequence */
583 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
584 if (rc)
585 return rc;
586
587 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
588 rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
589 if (rc)
590 return rc;
591
592 /* Pure runtime initializations - directly to the HW */
593 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
594
595 if (b_hw_start) {
596 /* enable interrupts */
597 qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
598
599 /* send function start command */
600 rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
601 if (rc)
602 DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
603 }
604 return rc;
605}
606
607static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
608 struct qed_ptt *p_ptt,
609 u8 enable)
610{
611 u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
612
613 /* Change PF in PXP */
614 qed_wr(p_hwfn, p_ptt,
615 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
616
617 /* wait until value is set - try for 1 second every 50us */
618 for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
619 val = qed_rd(p_hwfn, p_ptt,
620 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
621 if (val == set_val)
622 break;
623
624 usleep_range(50, 60);
625 }
626
627 if (val != set_val) {
628 DP_NOTICE(p_hwfn,
629 "PFID_ENABLE_MASTER wasn't changed after a second\n");
630 return -EAGAIN;
631 }
632
633 return 0;
634}
635
636static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
637 struct qed_ptt *p_main_ptt)
638{
639 /* Read shadow of current MFW mailbox */
640 qed_mcp_read_mb(p_hwfn, p_main_ptt);
641 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
642 p_hwfn->mcp_info->mfw_mb_cur,
643 p_hwfn->mcp_info->mfw_mb_length);
644}
645
646int qed_hw_init(struct qed_dev *cdev,
647 bool b_hw_start,
648 enum qed_int_mode int_mode,
649 bool allow_npar_tx_switch,
650 const u8 *bin_fw_data)
651{
652 u32 load_code, param;
653 int rc, mfw_rc, i;
654
655 rc = qed_init_fw_data(cdev, bin_fw_data);
656 if (rc != 0)
657 return rc;
658
659 for_each_hwfn(cdev, i) {
660 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
661
662 /* Enable DMAE in PXP */
663 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
664
665 qed_calc_hw_mode(p_hwfn);
666
667 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
668 &load_code);
669 if (rc) {
670 DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
671 return rc;
672 }
673
674 qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
675
676 DP_VERBOSE(p_hwfn, QED_MSG_SP,
677 "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
678 rc, load_code);
679
680 p_hwfn->first_on_engine = (load_code ==
681 FW_MSG_CODE_DRV_LOAD_ENGINE);
682
683 switch (load_code) {
684 case FW_MSG_CODE_DRV_LOAD_ENGINE:
685 rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
686 p_hwfn->hw_info.hw_mode);
687 if (rc)
688 break;
689 /* Fall into */
690 case FW_MSG_CODE_DRV_LOAD_PORT:
691 rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
692 p_hwfn->hw_info.hw_mode);
693 if (rc)
694 break;
695
696 /* Fall into */
697 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
698 rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
699 p_hwfn->hw_info.hw_mode,
700 b_hw_start, int_mode,
701 allow_npar_tx_switch);
702 break;
703 default:
704 rc = -EINVAL;
705 break;
706 }
707
708 if (rc)
709 DP_NOTICE(p_hwfn,
710 "init phase failed for loadcode 0x%x (rc %d)\n",
711 load_code, rc);
712
713 /* ACK mfw regardless of success or failure of initialization */
714 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
715 DRV_MSG_CODE_LOAD_DONE,
716 0, &load_code, &param);
717 if (rc)
718 return rc;
719 if (mfw_rc) {
720 DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
721 return mfw_rc;
722 }
723
724 p_hwfn->hw_init_done = true;
725 }
726
727 return 0;
728}
729
730#define QED_HW_STOP_RETRY_LIMIT (10)
731int qed_hw_stop(struct qed_dev *cdev)
732{
733 int rc = 0, t_rc;
734 int i, j;
735
736 for_each_hwfn(cdev, j) {
737 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
738 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
739
740 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
741
742 /* mark the hw as uninitialized... */
743 p_hwfn->hw_init_done = false;
744
745 rc = qed_sp_pf_stop(p_hwfn);
746 if (rc)
747 return rc;
748
749 qed_wr(p_hwfn, p_ptt,
750 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
751
752 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
753 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
754 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
755 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
756 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
757
758 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
759 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
760 for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
761 if ((!qed_rd(p_hwfn, p_ptt,
762 TM_REG_PF_SCAN_ACTIVE_CONN)) &&
763 (!qed_rd(p_hwfn, p_ptt,
764 TM_REG_PF_SCAN_ACTIVE_TASK)))
765 break;
766
767 usleep_range(1000, 2000);
768 }
769 if (i == QED_HW_STOP_RETRY_LIMIT)
770 DP_NOTICE(p_hwfn,
771 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
772 (u8)qed_rd(p_hwfn, p_ptt,
773 TM_REG_PF_SCAN_ACTIVE_CONN),
774 (u8)qed_rd(p_hwfn, p_ptt,
775 TM_REG_PF_SCAN_ACTIVE_TASK));
776
777 /* Disable Attention Generation */
778 qed_int_igu_disable_int(p_hwfn, p_ptt);
779
780 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
781 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
782
783 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
784
785 /* Need to wait 1ms to guarantee SBs are cleared */
786 usleep_range(1000, 2000);
787 }
788
789 /* Disable DMAE in PXP - in CMT, this should only be done for
790 * first hw-function, and only after all transactions have
791 * stopped for all active hw-functions.
792 */
793 t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
794 cdev->hwfns[0].p_main_ptt,
795 false);
796 if (t_rc != 0)
797 rc = t_rc;
798
799 return rc;
800}
801
802static int qed_reg_assert(struct qed_hwfn *hwfn,
803 struct qed_ptt *ptt, u32 reg,
804 bool expected)
805{
806 u32 assert_val = qed_rd(hwfn, ptt, reg);
807
808 if (assert_val != expected) {
809 DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
810 reg, expected);
811 return -EINVAL;
812 }
813
814 return 0;
815}
816
817int qed_hw_reset(struct qed_dev *cdev)
818{
819 int rc = 0;
820 u32 unload_resp, unload_param;
821 int i;
822
823 for_each_hwfn(cdev, i) {
824 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
825
826 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
827
828 /* Check for incorrect states */
829 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
830 QM_REG_USG_CNT_PF_TX, 0);
831 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
832 QM_REG_USG_CNT_PF_OTHER, 0);
833
834 /* Disable PF in HW blocks */
835 qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
836 qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
837 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
838 TCFC_REG_STRONG_ENABLE_PF, 0);
839 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
840 CCFC_REG_STRONG_ENABLE_PF, 0);
841
842 /* Send unload command to MCP */
843 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
844 DRV_MSG_CODE_UNLOAD_REQ,
845 DRV_MB_PARAM_UNLOAD_WOL_MCP,
846 &unload_resp, &unload_param);
847 if (rc) {
848 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
849 unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
850 }
851
852 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
853 DRV_MSG_CODE_UNLOAD_DONE,
854 0, &unload_resp, &unload_param);
855 if (rc) {
856 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
857 return rc;
858 }
859 }
860
861 return rc;
862}
863
864/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
865static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
866{
867 qed_ptt_pool_free(p_hwfn);
868 kfree(p_hwfn->hw_info.p_igu_info);
869}
870
871/* Setup bar access */
872static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
873{
874 int rc;
875
876 /* Allocate PTT pool */
877 rc = qed_ptt_pool_alloc(p_hwfn);
878 if (rc)
879 return rc;
880
881 /* Allocate the main PTT */
882 p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
883
884 /* clear indirect access */
885 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
886 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
887 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
888 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
889
890 /* Clean Previous errors if such exist */
891 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
892 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
893 1 << p_hwfn->abs_pf_id);
894
895 /* enable internal target-read */
896 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
897 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
898
899 return 0;
900}
901
902static void get_function_id(struct qed_hwfn *p_hwfn)
903{
904 /* ME Register */
905 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
906
907 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
908
909 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
910 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
911 PXP_CONCRETE_FID_PFID);
912 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
913 PXP_CONCRETE_FID_PORT);
914}
915
25c089d7
YM
916static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
917{
918 u32 *feat_num = p_hwfn->hw_info.feat_num;
919 int num_features = 1;
920
921 feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
922 num_features,
923 RESC_NUM(p_hwfn, QED_L2_QUEUE));
924 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
925 "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
926 feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
927 num_features);
928}
929
fe56b9e6
YM
930static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
931{
932 u32 *resc_start = p_hwfn->hw_info.resc_start;
933 u32 *resc_num = p_hwfn->hw_info.resc_num;
934 int num_funcs, i;
935
936 num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
937 : p_hwfn->cdev->num_ports_in_engines;
938
939 resc_num[QED_SB] = min_t(u32,
940 (MAX_SB_PER_PATH_BB / num_funcs),
941 qed_int_get_num_sbs(p_hwfn, NULL));
25c089d7 942 resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
fe56b9e6 943 resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
25c089d7 944 resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
fe56b9e6
YM
945 resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
946 resc_num[QED_RL] = 8;
25c089d7
YM
947 resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
948 resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
949 num_funcs;
fe56b9e6
YM
950 resc_num[QED_ILT] = 950;
951
952 for (i = 0; i < QED_MAX_RESC; i++)
953 resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
954
25c089d7
YM
955 qed_hw_set_feat(p_hwfn);
956
fe56b9e6
YM
957 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
958 "The numbers for each resource are:\n"
959 "SB = %d start = %d\n"
25c089d7 960 "L2_QUEUE = %d start = %d\n"
fe56b9e6
YM
961 "VPORT = %d start = %d\n"
962 "PQ = %d start = %d\n"
963 "RL = %d start = %d\n"
25c089d7
YM
964 "MAC = %d start = %d\n"
965 "VLAN = %d start = %d\n"
fe56b9e6
YM
966 "ILT = %d start = %d\n",
967 p_hwfn->hw_info.resc_num[QED_SB],
968 p_hwfn->hw_info.resc_start[QED_SB],
25c089d7
YM
969 p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
970 p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
fe56b9e6
YM
971 p_hwfn->hw_info.resc_num[QED_VPORT],
972 p_hwfn->hw_info.resc_start[QED_VPORT],
973 p_hwfn->hw_info.resc_num[QED_PQ],
974 p_hwfn->hw_info.resc_start[QED_PQ],
975 p_hwfn->hw_info.resc_num[QED_RL],
976 p_hwfn->hw_info.resc_start[QED_RL],
25c089d7
YM
977 p_hwfn->hw_info.resc_num[QED_MAC],
978 p_hwfn->hw_info.resc_start[QED_MAC],
979 p_hwfn->hw_info.resc_num[QED_VLAN],
980 p_hwfn->hw_info.resc_start[QED_VLAN],
fe56b9e6
YM
981 p_hwfn->hw_info.resc_num[QED_ILT],
982 p_hwfn->hw_info.resc_start[QED_ILT]);
983}
984
985static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
986 struct qed_ptt *p_ptt)
987{
988 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, nvm_cfg_addr;
989 u32 val;
990
991 /* Read global nvm_cfg address */
992 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
993
994 /* Verify MCP has initialized it */
995 if (!nvm_cfg_addr) {
996 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
997 return -EINVAL;
998 }
999
1000 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
1001 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1002
1003 /* Read Vendor Id / Device Id */
1004 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1005 offsetof(struct nvm_cfg1, glob) +
1006 offsetof(struct nvm_cfg1_glob, pci_id);
1007 p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
1008 NVM_CFG1_GLOB_VENDOR_ID_MASK;
1009 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1010 offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
1011 offsetof(struct nvm_cfg1_func, device_id);
1012 val = qed_rd(p_hwfn, p_ptt, addr);
1013
1014 if (IS_MF(p_hwfn)) {
1015 p_hwfn->hw_info.device_id =
1016 (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
1017 NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
1018 } else {
1019 p_hwfn->hw_info.device_id =
1020 (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
1021 NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
1022 }
1023
1024 /* Read Multi-function information from shmem */
1025 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1026 offsetof(struct nvm_cfg1, glob) +
1027 offsetof(struct nvm_cfg1_glob, generic_cont0);
1028
1029 generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1030
1031 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1032 NVM_CFG1_GLOB_MF_MODE_OFFSET;
1033
1034 switch (mf_mode) {
1035 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1036 p_hwfn->cdev->mf_mode = MF_OVLAN;
1037 break;
1038 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1039 p_hwfn->cdev->mf_mode = MF_NPAR;
1040 break;
1041 case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
1042 p_hwfn->cdev->mf_mode = SF;
1043 break;
1044 }
1045 DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1046 p_hwfn->cdev->mf_mode);
1047
1048 return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1049}
1050
1051static int
1052qed_get_hw_info(struct qed_hwfn *p_hwfn,
1053 struct qed_ptt *p_ptt,
1054 enum qed_pci_personality personality)
1055{
1056 u32 port_mode;
1057 int rc;
1058
1059 /* Read the port mode */
1060 port_mode = qed_rd(p_hwfn, p_ptt,
1061 CNIG_REG_NW_PORT_MODE_BB_B0);
1062
1063 if (port_mode < 3) {
1064 p_hwfn->cdev->num_ports_in_engines = 1;
1065 } else if (port_mode <= 5) {
1066 p_hwfn->cdev->num_ports_in_engines = 2;
1067 } else {
1068 DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1069 p_hwfn->cdev->num_ports_in_engines);
1070
1071 /* Default num_ports_in_engines to something */
1072 p_hwfn->cdev->num_ports_in_engines = 1;
1073 }
1074
1075 qed_hw_get_nvm_info(p_hwfn, p_ptt);
1076
1077 rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1078 if (rc)
1079 return rc;
1080
1081 if (qed_mcp_is_init(p_hwfn))
1082 ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1083 p_hwfn->mcp_info->func_info.mac);
1084 else
1085 eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1086
1087 if (qed_mcp_is_init(p_hwfn)) {
1088 if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1089 p_hwfn->hw_info.ovlan =
1090 p_hwfn->mcp_info->func_info.ovlan;
1091
1092 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1093 }
1094
1095 if (qed_mcp_is_init(p_hwfn)) {
1096 enum qed_pci_personality protocol;
1097
1098 protocol = p_hwfn->mcp_info->func_info.protocol;
1099 p_hwfn->hw_info.personality = protocol;
1100 }
1101
1102 qed_hw_get_resc(p_hwfn);
1103
1104 return rc;
1105}
1106
1107static void qed_get_dev_info(struct qed_dev *cdev)
1108{
1109 u32 tmp;
1110
1111 cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1112 MISCS_REG_CHIP_NUM);
1113 cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1114 MISCS_REG_CHIP_REV);
1115 MASK_FIELD(CHIP_REV, cdev->chip_rev);
1116
1117 /* Learn number of HW-functions */
1118 tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1119 MISCS_REG_CMT_ENABLED_FOR_PAIR);
1120
1121 if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
1122 DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1123 cdev->num_hwfns = 2;
1124 } else {
1125 cdev->num_hwfns = 1;
1126 }
1127
1128 cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1129 MISCS_REG_CHIP_TEST_REG) >> 4;
1130 MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1131 cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1132 MISCS_REG_CHIP_METAL);
1133 MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1134
1135 DP_INFO(cdev->hwfns,
1136 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1137 cdev->chip_num, cdev->chip_rev,
1138 cdev->chip_bond_id, cdev->chip_metal);
1139}
1140
1141static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1142 void __iomem *p_regview,
1143 void __iomem *p_doorbells,
1144 enum qed_pci_personality personality)
1145{
1146 int rc = 0;
1147
1148 /* Split PCI bars evenly between hwfns */
1149 p_hwfn->regview = p_regview;
1150 p_hwfn->doorbells = p_doorbells;
1151
1152 /* Validate that chip access is feasible */
1153 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1154 DP_ERR(p_hwfn,
1155 "Reading the ME register returns all Fs; Preventing further chip access\n");
1156 return -EINVAL;
1157 }
1158
1159 get_function_id(p_hwfn);
1160
1161 rc = qed_hw_hwfn_prepare(p_hwfn);
1162 if (rc) {
1163 DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1164 goto err0;
1165 }
1166
1167 /* First hwfn learns basic information, e.g., number of hwfns */
1168 if (!p_hwfn->my_id)
1169 qed_get_dev_info(p_hwfn->cdev);
1170
1171 /* Initialize MCP structure */
1172 rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1173 if (rc) {
1174 DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1175 goto err1;
1176 }
1177
1178 /* Read the device configuration information from the HW and SHMEM */
1179 rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1180 if (rc) {
1181 DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1182 goto err2;
1183 }
1184
1185 /* Allocate the init RT array and initialize the init-ops engine */
1186 rc = qed_init_alloc(p_hwfn);
1187 if (rc) {
1188 DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1189 goto err2;
1190 }
1191
1192 return rc;
1193err2:
1194 qed_mcp_free(p_hwfn);
1195err1:
1196 qed_hw_hwfn_free(p_hwfn);
1197err0:
1198 return rc;
1199}
1200
1201static u32 qed_hw_bar_size(struct qed_dev *cdev,
1202 u8 bar_id)
1203{
1204 u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0);
1205
1206 return size / cdev->num_hwfns;
1207}
1208
1209int qed_hw_prepare(struct qed_dev *cdev,
1210 int personality)
1211{
1212 int rc, i;
1213
1214 /* Store the precompiled init data ptrs */
1215 qed_init_iro_array(cdev);
1216
1217 /* Initialize the first hwfn - will learn number of hwfns */
1218 rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview,
1219 cdev->doorbells, personality);
1220 if (rc)
1221 return rc;
1222
1223 personality = cdev->hwfns[0].hw_info.personality;
1224
1225 /* Initialize the rest of the hwfns */
1226 for (i = 1; i < cdev->num_hwfns; i++) {
1227 void __iomem *p_regview, *p_doorbell;
1228
1229 p_regview = cdev->regview +
1230 i * qed_hw_bar_size(cdev, 0);
1231 p_doorbell = cdev->doorbells +
1232 i * qed_hw_bar_size(cdev, 1);
1233 rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview,
1234 p_doorbell, personality);
1235 if (rc) {
1236 /* Cleanup previously initialized hwfns */
1237 while (--i >= 0) {
1238 qed_init_free(&cdev->hwfns[i]);
1239 qed_mcp_free(&cdev->hwfns[i]);
1240 qed_hw_hwfn_free(&cdev->hwfns[i]);
1241 }
1242 return rc;
1243 }
1244 }
1245
1246 return 0;
1247}
1248
1249void qed_hw_remove(struct qed_dev *cdev)
1250{
1251 int i;
1252
1253 for_each_hwfn(cdev, i) {
1254 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1255
1256 qed_init_free(p_hwfn);
1257 qed_hw_hwfn_free(p_hwfn);
1258 qed_mcp_free(p_hwfn);
1259 }
1260}
1261
1262int qed_chain_alloc(struct qed_dev *cdev,
1263 enum qed_chain_use_mode intended_use,
1264 enum qed_chain_mode mode,
1265 u16 num_elems,
1266 size_t elem_size,
1267 struct qed_chain *p_chain)
1268{
1269 dma_addr_t p_pbl_phys = 0;
1270 void *p_pbl_virt = NULL;
1271 dma_addr_t p_phys = 0;
1272 void *p_virt = NULL;
1273 u16 page_cnt = 0;
1274 size_t size;
1275
1276 if (mode == QED_CHAIN_MODE_SINGLE)
1277 page_cnt = 1;
1278 else
1279 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1280
1281 size = page_cnt * QED_CHAIN_PAGE_SIZE;
1282 p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1283 size, &p_phys, GFP_KERNEL);
1284 if (!p_virt) {
1285 DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1286 goto nomem;
1287 }
1288
1289 if (mode == QED_CHAIN_MODE_PBL) {
1290 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1291 p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1292 size, &p_pbl_phys,
1293 GFP_KERNEL);
1294 if (!p_pbl_virt) {
1295 DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1296 goto nomem;
1297 }
1298
1299 qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1300 (u8)elem_size, intended_use,
1301 p_pbl_phys, p_pbl_virt);
1302 } else {
1303 qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1304 (u8)elem_size, intended_use, mode);
1305 }
1306
1307 return 0;
1308
1309nomem:
1310 dma_free_coherent(&cdev->pdev->dev,
1311 page_cnt * QED_CHAIN_PAGE_SIZE,
1312 p_virt, p_phys);
1313 dma_free_coherent(&cdev->pdev->dev,
1314 page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1315 p_pbl_virt, p_pbl_phys);
1316
1317 return -ENOMEM;
1318}
1319
1320void qed_chain_free(struct qed_dev *cdev,
1321 struct qed_chain *p_chain)
1322{
1323 size_t size;
1324
1325 if (!p_chain->p_virt_addr)
1326 return;
1327
1328 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1329 size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1330 dma_free_coherent(&cdev->pdev->dev, size,
1331 p_chain->pbl.p_virt_table,
1332 p_chain->pbl.p_phys_table);
1333 }
1334
1335 size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1336 dma_free_coherent(&cdev->pdev->dev, size,
1337 p_chain->p_virt_addr,
1338 p_chain->p_phys_addr);
1339}