]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/qede/base/ecore_init_ops.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / qede / base / ecore_init_ops.c
1 /*
2 * Copyright (c) 2016 QLogic Corporation.
3 * All rights reserved.
4 * www.qlogic.com
5 *
6 * See LICENSE.qede_pmd for copyright and licensing details.
7 */
8
9 /* include the precompiled configuration values - only once */
10 #include "bcm_osal.h"
11 #include "ecore_hsi_common.h"
12 #include "ecore.h"
13 #include "ecore_hw.h"
14 #include "ecore_status.h"
15 #include "ecore_rt_defs.h"
16 #include "ecore_init_fw_funcs.h"
17
18 #include "ecore_iro_values.h"
19 #include "ecore_sriov.h"
20 #include "ecore_gtt_values.h"
21 #include "reg_addr.h"
22 #include "ecore_init_ops.h"
23
24 #define ECORE_INIT_MAX_POLL_COUNT 100
25 #define ECORE_INIT_POLL_PERIOD_US 500
26
27 void ecore_init_iro_array(struct ecore_dev *p_dev)
28 {
29 p_dev->iro_arr = iro_arr;
30 }
31
32 /* Runtime configuration helpers */
33 void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
34 {
35 int i;
36
37 for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
38 p_hwfn->rt_data.b_valid[i] = false;
39 }
40
41 void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
42 {
43 p_hwfn->rt_data.init_val[rt_offset] = val;
44 p_hwfn->rt_data.b_valid[rt_offset] = true;
45 }
46
47 void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
48 u32 rt_offset, u32 *p_val, osal_size_t size)
49 {
50 osal_size_t i;
51
52 for (i = 0; i < size / sizeof(u32); i++) {
53 p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
54 p_hwfn->rt_data.b_valid[rt_offset + i] = true;
55 }
56 }
57
58 static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
59 struct ecore_ptt *p_ptt,
60 u32 addr,
61 u16 rt_offset,
62 u16 size, bool b_must_dmae)
63 {
64 u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
65 bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
66 u16 i, segment;
67 enum _ecore_status_t rc = ECORE_SUCCESS;
68
69 /* Since not all RT entries are initialized, go over the RT and
70 * for each segment of initialized values use DMA.
71 */
72 for (i = 0; i < size; i++) {
73 if (!p_valid[i])
74 continue;
75
76 /* In case there isn't any wide-bus configuration here,
77 * simply write the data instead of using dmae.
78 */
79 if (!b_must_dmae) {
80 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
81 continue;
82 }
83
84 /* Start of a new segment */
85 for (segment = 1; i + segment < size; segment++)
86 if (!p_valid[i + segment])
87 break;
88
89 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
90 (osal_uintptr_t)(p_init_val + i),
91 addr + (i << 2), segment, 0);
92 if (rc != ECORE_SUCCESS)
93 return rc;
94
95 /* Jump over the entire segment, including invalid entry */
96 i += segment;
97 }
98
99 return rc;
100 }
101
102 enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
103 {
104 struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
105
106 if (IS_VF(p_hwfn->p_dev))
107 return ECORE_SUCCESS;
108
109 rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
110 sizeof(bool) * RUNTIME_ARRAY_SIZE);
111 if (!rt_data->b_valid)
112 return ECORE_NOMEM;
113
114 rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
115 sizeof(u32) * RUNTIME_ARRAY_SIZE);
116 if (!rt_data->init_val) {
117 OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
118 return ECORE_NOMEM;
119 }
120
121 return ECORE_SUCCESS;
122 }
123
124 void ecore_init_free(struct ecore_hwfn *p_hwfn)
125 {
126 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
127 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
128 }
129
130 static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
131 struct ecore_ptt *p_ptt,
132 u32 addr,
133 u32 dmae_data_offset,
134 u32 size, const u32 *p_buf,
135 bool b_must_dmae,
136 bool b_can_dmae)
137 {
138 enum _ecore_status_t rc = ECORE_SUCCESS;
139
140 /* Perform DMAE only for lengthy enough sections or for wide-bus */
141 #ifndef ASIC_ONLY
142 if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
143 !b_can_dmae || (!b_must_dmae && (size < 16))) {
144 #else
145 if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
146 #endif
147 const u32 *data = p_buf + dmae_data_offset;
148 u32 i;
149
150 for (i = 0; i < size; i++)
151 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
152 } else {
153 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
154 (osal_uintptr_t)(p_buf +
155 dmae_data_offset),
156 addr, size, 0);
157 }
158
159 return rc;
160 }
161
162 static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
163 struct ecore_ptt *p_ptt,
164 u32 addr, u32 fill,
165 u32 fill_count)
166 {
167 static u32 zero_buffer[DMAE_MAX_RW_SIZE];
168
169 OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
170
171 return ecore_dmae_host2grc(p_hwfn, p_ptt,
172 (osal_uintptr_t)&zero_buffer[0],
173 addr, fill_count,
174 ECORE_DMAE_FLAG_RW_REPL_SRC);
175 }
176
177 static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
178 struct ecore_ptt *p_ptt,
179 u32 addr, u32 fill, u32 fill_count)
180 {
181 u32 i;
182
183 for (i = 0; i < fill_count; i++, addr += sizeof(u32))
184 ecore_wr(p_hwfn, p_ptt, addr, fill);
185 }
186
187 static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
188 struct ecore_ptt *p_ptt,
189 struct init_write_op *cmd,
190 bool b_must_dmae,
191 bool b_can_dmae)
192 {
193 u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
194 u32 data = OSAL_LE32_TO_CPU(cmd->data);
195 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
196 #ifdef CONFIG_ECORE_ZIPPED_FW
197 u32 offset, output_len, input_len, max_size;
198 #endif
199 struct ecore_dev *p_dev = p_hwfn->p_dev;
200 union init_array_hdr *hdr;
201 const u32 *array_data;
202 enum _ecore_status_t rc = ECORE_SUCCESS;
203 u32 size;
204
205 array_data = p_dev->fw_data->arr_data;
206
207 hdr = (union init_array_hdr *)
208 (uintptr_t)(array_data + dmae_array_offset);
209 data = OSAL_LE32_TO_CPU(hdr->raw.data);
210 switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
211 case INIT_ARR_ZIPPED:
212 #ifdef CONFIG_ECORE_ZIPPED_FW
213 offset = dmae_array_offset + 1;
214 input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
215 max_size = MAX_ZIPPED_SIZE * 4;
216 OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
217
218 output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
219 (u8 *)(uintptr_t)&array_data[offset],
220 max_size,
221 (u8 *)p_hwfn->unzip_buf);
222 if (output_len) {
223 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
224 output_len,
225 p_hwfn->unzip_buf,
226 b_must_dmae, b_can_dmae);
227 } else {
228 DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n");
229 rc = ECORE_INVAL;
230 }
231 #else
232 DP_NOTICE(p_hwfn, true,
233 "Using zipped firmware without config enabled\n");
234 rc = ECORE_INVAL;
235 #endif
236 break;
237 case INIT_ARR_PATTERN:
238 {
239 u32 repeats = GET_FIELD(data,
240 INIT_ARRAY_PATTERN_HDR_REPETITIONS);
241 u32 i;
242
243 size = GET_FIELD(data,
244 INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
245
246 for (i = 0; i < repeats; i++, addr += size << 2) {
247 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
248 dmae_array_offset +
249 1, size, array_data,
250 b_must_dmae,
251 b_can_dmae);
252 if (rc)
253 break;
254 }
255 break;
256 }
257 case INIT_ARR_STANDARD:
258 size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
259 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
260 dmae_array_offset + 1,
261 size, array_data,
262 b_must_dmae, b_can_dmae);
263 break;
264 }
265
266 return rc;
267 }
268
269 /* init_ops write command */
270 static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
271 struct ecore_ptt *p_ptt,
272 struct init_write_op *p_cmd,
273 bool b_can_dmae)
274 {
275 u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
276 bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
277 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
278 enum _ecore_status_t rc = ECORE_SUCCESS;
279
280 /* Sanitize */
281 if (b_must_dmae && !b_can_dmae) {
282 DP_NOTICE(p_hwfn, true,
283 "Need to write to %08x for Wide-bus but DMAE isn't"
284 " allowed\n",
285 addr);
286 return ECORE_INVAL;
287 }
288
289 switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
290 case INIT_SRC_INLINE:
291 data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
292 ecore_wr(p_hwfn, p_ptt, addr, data);
293 break;
294 case INIT_SRC_ZEROS:
295 data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
296 if (b_must_dmae || (b_can_dmae && (data >= 64)))
297 rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
298 else
299 ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
300 break;
301 case INIT_SRC_ARRAY:
302 rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
303 b_must_dmae, b_can_dmae);
304 break;
305 case INIT_SRC_RUNTIME:
306 ecore_init_rt(p_hwfn, p_ptt, addr,
307 OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
308 OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
309 b_must_dmae);
310 break;
311 }
312
313 return rc;
314 }
315
316 static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
317 {
318 return (val == expected_val);
319 }
320
321 static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
322 {
323 return (val & expected_val) == expected_val;
324 }
325
326 static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
327 {
328 return (val | expected_val) > 0;
329 }
330
331 /* init_ops read/poll commands */
332 static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
333 struct ecore_ptt *p_ptt, struct init_read_op *cmd)
334 {
335 bool (*comp_check)(u32 val, u32 expected_val);
336 u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
337 u32 data, addr, poll;
338 int i;
339
340 data = OSAL_LE32_TO_CPU(cmd->op_data);
341 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
342 poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
343
344 #ifndef ASIC_ONLY
345 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
346 delay *= 100;
347 #endif
348
349 val = ecore_rd(p_hwfn, p_ptt, addr);
350
351 if (poll == INIT_POLL_NONE)
352 return;
353
354 switch (poll) {
355 case INIT_POLL_EQ:
356 comp_check = comp_eq;
357 break;
358 case INIT_POLL_OR:
359 comp_check = comp_or;
360 break;
361 case INIT_POLL_AND:
362 comp_check = comp_and;
363 break;
364 default:
365 DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
366 cmd->op_data);
367 return;
368 }
369
370 data = OSAL_LE32_TO_CPU(cmd->expected_val);
371 for (i = 0;
372 i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) {
373 OSAL_UDELAY(delay);
374 val = ecore_rd(p_hwfn, p_ptt, addr);
375 }
376
377 if (i == ECORE_INIT_MAX_POLL_COUNT)
378 DP_ERR(p_hwfn,
379 "Timeout when polling reg: 0x%08x [ Waiting-for: %08x"
380 " Got: %08x (comparsion %08x)]\n",
381 addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
382 OSAL_LE32_TO_CPU(cmd->op_data));
383 }
384
385 /* init_ops callbacks entry point */
386 static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
387 struct ecore_ptt *p_ptt,
388 struct init_callback_op *p_cmd)
389 {
390 DP_NOTICE(p_hwfn, true,
391 "Currently init values have no need of callbacks\n");
392 }
393
394 static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
395 u16 *p_offset, int modes)
396 {
397 struct ecore_dev *p_dev = p_hwfn->p_dev;
398 const u8 *modes_tree_buf;
399 u8 arg1, arg2, tree_val;
400
401 modes_tree_buf = p_dev->fw_data->modes_tree_buf;
402 tree_val = modes_tree_buf[(*p_offset)++];
403 switch (tree_val) {
404 case INIT_MODE_OP_NOT:
405 return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
406 case INIT_MODE_OP_OR:
407 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
408 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
409 return arg1 | arg2;
410 case INIT_MODE_OP_AND:
411 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
412 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
413 return arg1 & arg2;
414 default:
415 tree_val -= MAX_INIT_MODE_OPS;
416 return (modes & (1 << tree_val)) ? 1 : 0;
417 }
418 }
419
420 static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
421 struct init_if_mode_op *p_cmd, int modes)
422 {
423 u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
424
425 if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
426 return 0;
427 else
428 return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
429 INIT_IF_MODE_OP_CMD_OFFSET);
430 }
431
432 static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn,
433 struct init_if_phase_op *p_cmd,
434 u32 phase, u32 phase_id)
435 {
436 u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
437
438 if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
439 (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
440 GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
441 return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
442 INIT_IF_PHASE_OP_CMD_OFFSET);
443 else
444 return 0;
445 }
446
447 enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
448 struct ecore_ptt *p_ptt,
449 int phase, int phase_id, int modes)
450 {
451 struct ecore_dev *p_dev = p_hwfn->p_dev;
452 u32 cmd_num, num_init_ops;
453 union init_op *init_ops;
454 bool b_dmae = false;
455 enum _ecore_status_t rc = ECORE_SUCCESS;
456
457 num_init_ops = p_dev->fw_data->init_ops_size;
458 init_ops = p_dev->fw_data->init_ops;
459
460 #ifdef CONFIG_ECORE_ZIPPED_FW
461 p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
462 MAX_ZIPPED_SIZE * 4);
463 if (!p_hwfn->unzip_buf) {
464 DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
465 return ECORE_NOMEM;
466 }
467 #endif
468
469 for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
470 union init_op *cmd = &init_ops[cmd_num];
471 u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
472
473 switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
474 case INIT_OP_WRITE:
475 rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
476 b_dmae);
477 break;
478
479 case INIT_OP_READ:
480 ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
481 break;
482
483 case INIT_OP_IF_MODE:
484 cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
485 modes);
486 break;
487 case INIT_OP_IF_PHASE:
488 cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase,
489 phase, phase_id);
490 b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
491 break;
492 case INIT_OP_DELAY:
493 /* ecore_init_run is always invoked from
494 * sleep-able context
495 */
496 OSAL_UDELAY(cmd->delay.delay);
497 break;
498
499 case INIT_OP_CALLBACK:
500 ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
501 break;
502 }
503
504 if (rc)
505 break;
506 }
507 #ifdef CONFIG_ECORE_ZIPPED_FW
508 OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
509 #endif
510 return rc;
511 }
512
513 void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
514 {
515 u32 gtt_base;
516 u32 i;
517
518 #ifndef ASIC_ONLY
519 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
520 /* This is done by MFW on ASIC; regardless, this should only
521 * be done once per chip [i.e., common]. Implementation is
522 * not too bright, but it should work on the simple FPGA/EMUL
523 * scenarios.
524 */
525 static bool initialized;
526 int poll_cnt = 500;
527 u32 val;
528
529 /* initialize PTT/GTT (poll for completion) */
530 if (!initialized) {
531 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
532 PGLUE_B_REG_START_INIT_PTT_GTT, 1);
533 initialized = true;
534 }
535
536 do {
537 /* ptt might be overrided by HW until this is done */
538 OSAL_UDELAY(10);
539 ecore_ptt_invalidate(p_hwfn);
540 val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
541 PGLUE_B_REG_INIT_DONE_PTT_GTT);
542 } while ((val != 1) && --poll_cnt);
543
544 if (!poll_cnt)
545 DP_ERR(p_hwfn,
546 "PGLUE_B_REG_INIT_DONE didn't complete\n");
547 }
548 #endif
549
550 /* Set the global windows */
551 gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
552
553 for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
554 if (pxp_global_win[i])
555 REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
556 pxp_global_win[i]);
557 }
558
559 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
560 const u8 *data)
561 {
562 struct ecore_fw_data *fw = p_dev->fw_data;
563
564 #ifdef CONFIG_ECORE_BINARY_FW
565 struct bin_buffer_hdr *buf_hdr;
566 u32 offset, len;
567
568 if (!data) {
569 DP_NOTICE(p_dev, true, "Invalid fw data\n");
570 return ECORE_INVAL;
571 }
572
573 buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data;
574
575 offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
576 fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
577
578 offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
579 fw->init_ops = (union init_op *)((uintptr_t)(data + offset));
580
581 offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
582 fw->arr_data = (u32 *)((uintptr_t)(data + offset));
583
584 offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
585 fw->modes_tree_buf = (u8 *)((uintptr_t)(data + offset));
586 len = buf_hdr[BIN_BUF_INIT_CMD].length;
587 fw->init_ops_size = len / sizeof(struct init_raw_op);
588 #else
589 fw->init_ops = (union init_op *)init_ops;
590 fw->arr_data = (u32 *)init_val;
591 fw->modes_tree_buf = (u8 *)modes_tree_buf;
592 fw->init_ops_size = init_ops_size;
593 #endif
594
595 return ECORE_SUCCESS;
596 }