]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright (c) 2007-2013 Broadcom Corporation. | |
7c673cae FG |
3 | * |
4 | * Eric Davis <edavis@broadcom.com> | |
5 | * David Christensen <davidch@broadcom.com> | |
6 | * Gary Zambrano <zambrano@broadcom.com> | |
7 | * | |
8 | * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. | |
11fdf7f2 | 9 | * Copyright (c) 2015-2018 Cavium Inc. |
7c673cae | 10 | * All rights reserved. |
11fdf7f2 | 11 | * www.cavium.com |
7c673cae FG |
12 | */ |
13 | ||
14 | #ifndef ECORE_INIT_OPS_H | |
15 | #define ECORE_INIT_OPS_H | |
16 | ||
17 | static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t *zbuf, int len); | |
18 | static void ecore_write_dmae_phys_len(struct bnx2x_softc *sc, | |
19 | ecore_dma_addr_t phys_addr, uint32_t addr, | |
20 | uint32_t len); | |
21 | ||
22 | static void ecore_init_str_wr(struct bnx2x_softc *sc, uint32_t addr, | |
23 | const uint32_t *data, uint32_t len) | |
24 | { | |
25 | uint32_t i; | |
26 | ||
27 | for (i = 0; i < len; i++) | |
28 | REG_WR(sc, addr + i*4, data[i]); | |
29 | } | |
30 | ||
31 | static void ecore_write_big_buf(struct bnx2x_softc *sc, uint32_t addr, uint32_t len) | |
32 | { | |
33 | if (DMAE_READY(sc)) | |
34 | ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len); | |
35 | ||
36 | else ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len); | |
37 | } | |
38 | ||
39 | static void ecore_init_fill(struct bnx2x_softc *sc, uint32_t addr, int fill, | |
40 | uint32_t len) | |
41 | { | |
42 | uint32_t buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4)); | |
43 | uint32_t buf_len32 = buf_len/4; | |
44 | uint32_t i; | |
45 | ||
46 | ECORE_MEMSET(GUNZIP_BUF(sc), (uint8_t)fill, buf_len); | |
47 | ||
48 | for (i = 0; i < len; i += buf_len32) { | |
49 | uint32_t cur_len = min(buf_len32, len - i); | |
50 | ||
51 | ecore_write_big_buf(sc, addr + i*4, cur_len); | |
52 | } | |
53 | } | |
54 | ||
55 | static void ecore_write_big_buf_wb(struct bnx2x_softc *sc, uint32_t addr, uint32_t len) | |
56 | { | |
57 | if (DMAE_READY(sc)) | |
58 | ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len); | |
59 | ||
60 | else ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len); | |
61 | } | |
62 | ||
63 | static void ecore_init_wr_64(struct bnx2x_softc *sc, uint32_t addr, | |
64 | const uint32_t *data, uint32_t len64) | |
65 | { | |
66 | uint32_t buf_len32 = FW_BUF_SIZE/4; | |
67 | uint32_t len = len64*2; | |
68 | uint64_t data64 = 0; | |
69 | uint32_t i; | |
70 | ||
71 | /* 64 bit value is in a blob: first low DWORD, then high DWORD */ | |
72 | data64 = HILO_U64((*(data + 1)), (*data)); | |
73 | ||
74 | len64 = min((uint32_t)(FW_BUF_SIZE/8), len64); | |
75 | for (i = 0; i < len64; i++) { | |
76 | uint64_t *pdata = ((uint64_t *)(GUNZIP_BUF(sc))) + i; | |
77 | ||
78 | *pdata = data64; | |
79 | } | |
80 | ||
81 | for (i = 0; i < len; i += buf_len32) { | |
82 | uint32_t cur_len = min(buf_len32, len - i); | |
83 | ||
84 | ecore_write_big_buf_wb(sc, addr + i*4, cur_len); | |
85 | } | |
86 | } | |
87 | ||
88 | /********************************************************* | |
89 | There are different blobs for each PRAM section. | |
90 | In addition, each blob write operation is divided into a few operations | |
91 | in order to decrease the amount of phys. contiguous buffer needed. | |
92 | Thus, when we select a blob the address may be with some offset | |
93 | from the beginning of PRAM section. | |
94 | The same holds for the INT_TABLE sections. | |
95 | **********************************************************/ | |
96 | #define IF_IS_INT_TABLE_ADDR(base, addr) \ | |
97 | if (((base) <= (addr)) && ((base) + 0x400 >= (addr))) | |
98 | ||
99 | #define IF_IS_PRAM_ADDR(base, addr) \ | |
100 | if (((base) <= (addr)) && ((base) + 0x40000 >= (addr))) | |
101 | ||
102 | static const uint8_t *ecore_sel_blob(struct bnx2x_softc *sc, uint32_t addr, | |
103 | const uint8_t *data) | |
104 | { | |
105 | IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr) | |
106 | data = INIT_TSEM_INT_TABLE_DATA(sc); | |
107 | else | |
108 | IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr) | |
109 | data = INIT_CSEM_INT_TABLE_DATA(sc); | |
110 | else | |
111 | IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr) | |
112 | data = INIT_USEM_INT_TABLE_DATA(sc); | |
113 | else | |
114 | IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr) | |
115 | data = INIT_XSEM_INT_TABLE_DATA(sc); | |
116 | else | |
117 | IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr) | |
118 | data = INIT_TSEM_PRAM_DATA(sc); | |
119 | else | |
120 | IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr) | |
121 | data = INIT_CSEM_PRAM_DATA(sc); | |
122 | else | |
123 | IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr) | |
124 | data = INIT_USEM_PRAM_DATA(sc); | |
125 | else | |
126 | IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr) | |
127 | data = INIT_XSEM_PRAM_DATA(sc); | |
128 | ||
129 | return data; | |
130 | } | |
131 | ||
132 | static void ecore_init_wr_wb(struct bnx2x_softc *sc, uint32_t addr, | |
133 | const uint32_t *data, uint32_t len) | |
134 | { | |
135 | if (DMAE_READY(sc)) | |
136 | VIRT_WR_DMAE_LEN(sc, data, addr, len, 0); | |
137 | ||
138 | else ecore_init_str_wr(sc, addr, data, len); | |
139 | } | |
140 | ||
141 | static void ecore_wr_64(struct bnx2x_softc *sc, uint32_t reg, uint32_t val_lo, | |
142 | uint32_t val_hi) | |
143 | { | |
144 | uint32_t wb_write[2]; | |
145 | ||
146 | wb_write[0] = val_lo; | |
147 | wb_write[1] = val_hi; | |
148 | REG_WR_DMAE_LEN(sc, reg, wb_write, 2); | |
149 | } | |
150 | ||
151 | static void ecore_init_wr_zp(struct bnx2x_softc *sc, uint32_t addr, uint32_t len, | |
152 | uint32_t blob_off) | |
153 | { | |
154 | const uint8_t *data = NULL; | |
155 | int rc; | |
156 | uint32_t i; | |
157 | ||
158 | data = ecore_sel_blob(sc, addr, data) + blob_off*4; | |
159 | ||
160 | rc = ecore_gunzip(sc, data, len); | |
161 | if (rc) | |
162 | return; | |
163 | ||
164 | /* gunzip_outlen is in dwords */ | |
165 | len = GUNZIP_OUTLEN(sc); | |
166 | for (i = 0; i < len; i++) | |
167 | ((uint32_t *)GUNZIP_BUF(sc))[i] = (uint32_t) | |
168 | ECORE_CPU_TO_LE32(((uint32_t *)GUNZIP_BUF(sc))[i]); | |
169 | ||
170 | ecore_write_big_buf_wb(sc, addr, len); | |
171 | } | |
172 | ||
173 | static void ecore_init_block(struct bnx2x_softc *sc, uint32_t block, uint32_t stage) | |
174 | { | |
175 | uint16_t op_start = | |
176 | INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage, | |
177 | STAGE_START)]; | |
178 | uint16_t op_end = | |
179 | INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage, | |
180 | STAGE_END)]; | |
181 | const union init_op *op; | |
182 | uint32_t op_idx, op_type, addr, len; | |
183 | const uint32_t *data, *data_base; | |
184 | ||
185 | /* If empty block */ | |
186 | if (op_start == op_end) | |
187 | return; | |
188 | ||
189 | data_base = INIT_DATA(sc); | |
190 | ||
191 | for (op_idx = op_start; op_idx < op_end; op_idx++) { | |
192 | ||
193 | op = (const union init_op *)&(INIT_OPS(sc)[op_idx]); | |
194 | /* Get generic data */ | |
195 | op_type = op->raw.op; | |
196 | addr = op->raw.offset; | |
197 | /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and | |
198 | * OP_WR64 (we assume that op_arr_write and op_write have the | |
199 | * same structure). | |
200 | */ | |
201 | len = op->arr_wr.data_len; | |
202 | data = data_base + op->arr_wr.data_off; | |
203 | ||
204 | switch (op_type) { | |
205 | case OP_RD: | |
206 | REG_RD(sc, addr); | |
207 | break; | |
208 | case OP_WR: | |
209 | REG_WR(sc, addr, op->write.val); | |
210 | break; | |
211 | case OP_SW: | |
212 | ecore_init_str_wr(sc, addr, data, len); | |
213 | break; | |
214 | case OP_WB: | |
215 | ecore_init_wr_wb(sc, addr, data, len); | |
216 | break; | |
217 | case OP_ZR: | |
218 | case OP_WB_ZR: | |
219 | ecore_init_fill(sc, addr, 0, op->zero.len); | |
220 | break; | |
221 | case OP_ZP: | |
222 | ecore_init_wr_zp(sc, addr, len, op->arr_wr.data_off); | |
223 | break; | |
224 | case OP_WR_64: | |
225 | ecore_init_wr_64(sc, addr, data, len); | |
226 | break; | |
227 | case OP_IF_MODE_AND: | |
228 | /* if any of the flags doesn't match, skip the | |
229 | * conditional block. | |
230 | */ | |
231 | if ((INIT_MODE_FLAGS(sc) & | |
232 | op->if_mode.mode_bit_map) != | |
233 | op->if_mode.mode_bit_map) | |
234 | op_idx += op->if_mode.cmd_offset; | |
235 | break; | |
236 | case OP_IF_MODE_OR: | |
237 | /* if all the flags don't match, skip the conditional | |
238 | * block. | |
239 | */ | |
240 | if ((INIT_MODE_FLAGS(sc) & | |
241 | op->if_mode.mode_bit_map) == 0) | |
242 | op_idx += op->if_mode.cmd_offset; | |
243 | break; | |
244 | /* the following opcodes are unused at the moment. */ | |
245 | case OP_IF_PHASE: | |
246 | case OP_RT: | |
247 | case OP_DELAY: | |
248 | case OP_VERIFY: | |
249 | default: | |
250 | /* Should never get here! */ | |
251 | ||
252 | break; | |
253 | } | |
254 | } | |
255 | } | |
256 | ||
257 | ||
258 | /**************************************************************************** | |
259 | * PXP Arbiter | |
260 | ****************************************************************************/ | |
261 | /* | |
262 | * This code configures the PCI read/write arbiter | |
263 | * which implements a weighted round robin | |
264 | * between the virtual queues in the chip. | |
265 | * | |
266 | * The values were derived for each PCI max payload and max request size. | |
267 | * since max payload and max request size are only known at run time, | |
268 | * this is done as a separate init stage. | |
269 | */ | |
270 | ||
271 | #define NUM_WR_Q 13 | |
272 | #define NUM_RD_Q 29 | |
273 | #define MAX_RD_ORD 3 | |
274 | #define MAX_WR_ORD 2 | |
275 | ||
276 | /* configuration for one arbiter queue */ | |
277 | struct arb_line { | |
278 | int l; | |
279 | int add; | |
280 | int ubound; | |
281 | }; | |
282 | ||
283 | /* derived configuration for each read queue for each max request size */ | |
284 | static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = { | |
285 | /* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, | |
286 | { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} }, | |
287 | { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} }, | |
288 | { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} }, | |
289 | { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, | |
290 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, | |
291 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, | |
292 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, | |
293 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, | |
294 | /* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
295 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
296 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
297 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
298 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
299 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
300 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
301 | { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} }, | |
302 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
303 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
304 | /* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
305 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
306 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
307 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
308 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
309 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
310 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
311 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
312 | { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, | |
313 | { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} } | |
314 | }; | |
315 | ||
316 | /* derived configuration for each write queue for each max request size */ | |
317 | static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = { | |
318 | /* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} }, | |
319 | { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} }, | |
320 | { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, | |
321 | { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, | |
322 | { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, | |
323 | { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, | |
324 | { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} }, | |
325 | { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, | |
326 | { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, | |
327 | /* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} }, | |
328 | { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} }, | |
329 | { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} }, | |
330 | { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} } | |
331 | }; | |
332 | ||
333 | /* register addresses for read queues */ | |
334 | static const struct arb_line read_arb_addr[NUM_RD_Q-1] = { | |
335 | /* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0, | |
336 | PXP2_REG_RQ_BW_RD_UBOUND0}, | |
337 | {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, | |
338 | PXP2_REG_PSWRQ_BW_UB1}, | |
339 | {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, | |
340 | PXP2_REG_PSWRQ_BW_UB2}, | |
341 | {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, | |
342 | PXP2_REG_PSWRQ_BW_UB3}, | |
343 | {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4, | |
344 | PXP2_REG_RQ_BW_RD_UBOUND4}, | |
345 | {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5, | |
346 | PXP2_REG_RQ_BW_RD_UBOUND5}, | |
347 | {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, | |
348 | PXP2_REG_PSWRQ_BW_UB6}, | |
349 | {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, | |
350 | PXP2_REG_PSWRQ_BW_UB7}, | |
351 | {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, | |
352 | PXP2_REG_PSWRQ_BW_UB8}, | |
353 | /* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, | |
354 | PXP2_REG_PSWRQ_BW_UB9}, | |
355 | {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, | |
356 | PXP2_REG_PSWRQ_BW_UB10}, | |
357 | {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, | |
358 | PXP2_REG_PSWRQ_BW_UB11}, | |
359 | {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12, | |
360 | PXP2_REG_RQ_BW_RD_UBOUND12}, | |
361 | {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13, | |
362 | PXP2_REG_RQ_BW_RD_UBOUND13}, | |
363 | {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14, | |
364 | PXP2_REG_RQ_BW_RD_UBOUND14}, | |
365 | {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15, | |
366 | PXP2_REG_RQ_BW_RD_UBOUND15}, | |
367 | {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16, | |
368 | PXP2_REG_RQ_BW_RD_UBOUND16}, | |
369 | {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17, | |
370 | PXP2_REG_RQ_BW_RD_UBOUND17}, | |
371 | {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18, | |
372 | PXP2_REG_RQ_BW_RD_UBOUND18}, | |
373 | /* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19, | |
374 | PXP2_REG_RQ_BW_RD_UBOUND19}, | |
375 | {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20, | |
376 | PXP2_REG_RQ_BW_RD_UBOUND20}, | |
377 | {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22, | |
378 | PXP2_REG_RQ_BW_RD_UBOUND22}, | |
379 | {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23, | |
380 | PXP2_REG_RQ_BW_RD_UBOUND23}, | |
381 | {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24, | |
382 | PXP2_REG_RQ_BW_RD_UBOUND24}, | |
383 | {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25, | |
384 | PXP2_REG_RQ_BW_RD_UBOUND25}, | |
385 | {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26, | |
386 | PXP2_REG_RQ_BW_RD_UBOUND26}, | |
387 | {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27, | |
388 | PXP2_REG_RQ_BW_RD_UBOUND27}, | |
389 | {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, | |
390 | PXP2_REG_PSWRQ_BW_UB28} | |
391 | }; | |
392 | ||
393 | /* register addresses for write queues */ | |
394 | static const struct arb_line write_arb_addr[NUM_WR_Q-1] = { | |
395 | /* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, | |
396 | PXP2_REG_PSWRQ_BW_UB1}, | |
397 | {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, | |
398 | PXP2_REG_PSWRQ_BW_UB2}, | |
399 | {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, | |
400 | PXP2_REG_PSWRQ_BW_UB3}, | |
401 | {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, | |
402 | PXP2_REG_PSWRQ_BW_UB6}, | |
403 | {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, | |
404 | PXP2_REG_PSWRQ_BW_UB7}, | |
405 | {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, | |
406 | PXP2_REG_PSWRQ_BW_UB8}, | |
407 | {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, | |
408 | PXP2_REG_PSWRQ_BW_UB9}, | |
409 | {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, | |
410 | PXP2_REG_PSWRQ_BW_UB10}, | |
411 | {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, | |
412 | PXP2_REG_PSWRQ_BW_UB11}, | |
413 | /* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, | |
414 | PXP2_REG_PSWRQ_BW_UB28}, | |
415 | {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29, | |
416 | PXP2_REG_RQ_BW_WR_UBOUND29}, | |
417 | {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30, | |
418 | PXP2_REG_RQ_BW_WR_UBOUND30} | |
419 | }; | |
420 | ||
421 | static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order, | |
422 | int w_order) | |
423 | { | |
424 | uint32_t val, i; | |
425 | ||
426 | if (r_order > MAX_RD_ORD) { | |
9f95a23c | 427 | ECORE_MSG(sc, "read order of %d order adjusted to %d", |
7c673cae FG |
428 | r_order, MAX_RD_ORD); |
429 | r_order = MAX_RD_ORD; | |
430 | } | |
431 | if (w_order > MAX_WR_ORD) { | |
9f95a23c | 432 | ECORE_MSG(sc, "write order of %d order adjusted to %d", |
7c673cae FG |
433 | w_order, MAX_WR_ORD); |
434 | w_order = MAX_WR_ORD; | |
435 | } | |
436 | if (CHIP_REV_IS_FPGA(sc)) { | |
9f95a23c | 437 | ECORE_MSG(sc, "write order adjusted to 1 for FPGA"); |
7c673cae FG |
438 | w_order = 0; |
439 | } | |
9f95a23c | 440 | ECORE_MSG(sc, "read order %d write order %d", r_order, w_order); |
7c673cae FG |
441 | |
442 | for (i = 0; i < NUM_RD_Q-1; i++) { | |
443 | REG_WR(sc, read_arb_addr[i].l, read_arb_data[i][r_order].l); | |
444 | REG_WR(sc, read_arb_addr[i].add, | |
445 | read_arb_data[i][r_order].add); | |
446 | REG_WR(sc, read_arb_addr[i].ubound, | |
447 | read_arb_data[i][r_order].ubound); | |
448 | } | |
449 | ||
450 | for (i = 0; i < NUM_WR_Q-1; i++) { | |
451 | if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) || | |
452 | (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) { | |
453 | ||
454 | REG_WR(sc, write_arb_addr[i].l, | |
455 | write_arb_data[i][w_order].l); | |
456 | ||
457 | REG_WR(sc, write_arb_addr[i].add, | |
458 | write_arb_data[i][w_order].add); | |
459 | ||
460 | REG_WR(sc, write_arb_addr[i].ubound, | |
461 | write_arb_data[i][w_order].ubound); | |
462 | } else { | |
463 | ||
464 | val = REG_RD(sc, write_arb_addr[i].l); | |
465 | REG_WR(sc, write_arb_addr[i].l, | |
466 | val | (write_arb_data[i][w_order].l << 10)); | |
467 | ||
468 | val = REG_RD(sc, write_arb_addr[i].add); | |
469 | REG_WR(sc, write_arb_addr[i].add, | |
470 | val | (write_arb_data[i][w_order].add << 10)); | |
471 | ||
472 | val = REG_RD(sc, write_arb_addr[i].ubound); | |
473 | REG_WR(sc, write_arb_addr[i].ubound, | |
474 | val | (write_arb_data[i][w_order].ubound << 7)); | |
475 | } | |
476 | } | |
477 | ||
478 | val = write_arb_data[NUM_WR_Q-1][w_order].add; | |
479 | val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10; | |
480 | val += write_arb_data[NUM_WR_Q-1][w_order].l << 17; | |
481 | REG_WR(sc, PXP2_REG_PSWRQ_BW_RD, val); | |
482 | ||
483 | val = read_arb_data[NUM_RD_Q-1][r_order].add; | |
484 | val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10; | |
485 | val += read_arb_data[NUM_RD_Q-1][r_order].l << 17; | |
486 | REG_WR(sc, PXP2_REG_PSWRQ_BW_WR, val); | |
487 | ||
488 | REG_WR(sc, PXP2_REG_RQ_WR_MBS0, w_order); | |
489 | REG_WR(sc, PXP2_REG_RQ_WR_MBS1, w_order); | |
490 | REG_WR(sc, PXP2_REG_RQ_RD_MBS0, r_order); | |
491 | REG_WR(sc, PXP2_REG_RQ_RD_MBS1, r_order); | |
492 | ||
493 | if (CHIP_IS_E1H(sc) && (r_order == MAX_RD_ORD)) | |
494 | REG_WR(sc, PXP2_REG_RQ_PDR_LIMIT, 0xe00); | |
495 | ||
496 | if (CHIP_IS_E3(sc)) | |
497 | REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order)); | |
498 | else if (CHIP_IS_E2(sc)) | |
499 | REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); | |
500 | else | |
501 | REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); | |
502 | ||
503 | /* MPS w_order optimal TH presently TH | |
504 | * 128 0 0 2 | |
505 | * 256 1 1 3 | |
506 | * >=512 2 2 3 | |
507 | */ | |
508 | /* DMAE is special */ | |
509 | if (!CHIP_IS_E1H(sc)) { | |
510 | /* E2 can use optimal TH */ | |
511 | val = w_order; | |
512 | REG_WR(sc, PXP2_REG_WR_DMAE_MPS, val); | |
513 | } else { | |
514 | val = ((w_order == 0) ? 2 : 3); | |
515 | REG_WR(sc, PXP2_REG_WR_DMAE_MPS, 2); | |
516 | } | |
517 | ||
518 | REG_WR(sc, PXP2_REG_WR_HC_MPS, val); | |
519 | REG_WR(sc, PXP2_REG_WR_USDM_MPS, val); | |
520 | REG_WR(sc, PXP2_REG_WR_CSDM_MPS, val); | |
521 | REG_WR(sc, PXP2_REG_WR_TSDM_MPS, val); | |
522 | REG_WR(sc, PXP2_REG_WR_XSDM_MPS, val); | |
523 | REG_WR(sc, PXP2_REG_WR_QM_MPS, val); | |
524 | REG_WR(sc, PXP2_REG_WR_TM_MPS, val); | |
525 | REG_WR(sc, PXP2_REG_WR_SRC_MPS, val); | |
526 | REG_WR(sc, PXP2_REG_WR_DBG_MPS, val); | |
527 | REG_WR(sc, PXP2_REG_WR_CDU_MPS, val); | |
528 | ||
529 | /* Validate number of tags suppoted by device */ | |
530 | #define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980 | |
531 | val = REG_RD(sc, PCIE_REG_PCIER_TL_HDR_FC_ST); | |
532 | val &= 0xFF; | |
533 | if (val <= 0x20) | |
534 | REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x20); | |
535 | } | |
536 | ||
537 | /**************************************************************************** | |
538 | * ILT management | |
539 | ****************************************************************************/ | |
540 | /* | |
541 | * This codes hides the low level HW interaction for ILT management and | |
542 | * configuration. The API consists of a shadow ILT table which is set by the | |
543 | * driver and a set of routines to use it to configure the HW. | |
544 | * | |
545 | */ | |
546 | ||
547 | /* ILT HW init operations */ | |
548 | ||
549 | /* ILT memory management operations */ | |
550 | #define ILT_MEMOP_ALLOC 0 | |
551 | #define ILT_MEMOP_FREE 1 | |
552 | ||
553 | /* the phys address is shifted right 12 bits and has an added | |
554 | * 1=valid bit added to the 53rd bit | |
555 | * then since this is a wide register(TM) | |
556 | * we split it into two 32 bit writes | |
557 | */ | |
558 | #define ILT_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF)) | |
559 | #define ILT_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44))) | |
560 | #define ILT_RANGE(f, l) (((l) << 10) | f) | |
561 | ||
562 | static int ecore_ilt_line_mem_op(struct bnx2x_softc *sc, | |
563 | struct ilt_line *line, uint32_t size, uint8_t memop, int cli_num, int i) | |
564 | { | |
565 | #define ECORE_ILT_NAMESIZE 10 | |
566 | char str[ECORE_ILT_NAMESIZE]; | |
567 | ||
568 | if (memop == ILT_MEMOP_FREE) { | |
569 | ECORE_ILT_FREE(line->page, line->page_mapping, line->size); | |
570 | return 0; | |
571 | } | |
572 | snprintf(str, ECORE_ILT_NAMESIZE, "ILT_%d_%d", cli_num, i); | |
573 | ECORE_ILT_ZALLOC(line->page, &line->page_mapping, size, str); | |
574 | if (!line->page) | |
575 | return -1; | |
576 | line->size = size; | |
577 | return 0; | |
578 | } | |
579 | ||
580 | ||
581 | static int ecore_ilt_client_mem_op(struct bnx2x_softc *sc, int cli_num, | |
582 | uint8_t memop) | |
583 | { | |
584 | int i, rc = 0; | |
585 | struct ecore_ilt *ilt = SC_ILT(sc); | |
586 | struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; | |
587 | ||
588 | if (!ilt || !ilt->lines) | |
589 | return -1; | |
590 | ||
591 | if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM)) | |
592 | return 0; | |
593 | ||
594 | for (i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) { | |
595 | rc = ecore_ilt_line_mem_op(sc, &ilt->lines[i], | |
596 | ilt_cli->page_size, memop, cli_num, i); | |
597 | } | |
598 | return rc; | |
599 | } | |
600 | ||
601 | static inline int ecore_ilt_mem_op_cnic(struct bnx2x_softc *sc, uint8_t memop) | |
602 | { | |
603 | int rc = 0; | |
604 | ||
605 | if (CONFIGURE_NIC_MODE(sc)) | |
606 | rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop); | |
607 | if (!rc) | |
608 | rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_TM, memop); | |
609 | ||
610 | return rc; | |
611 | } | |
612 | ||
613 | static int ecore_ilt_mem_op(struct bnx2x_softc *sc, uint8_t memop) | |
614 | { | |
615 | int rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_CDU, memop); | |
616 | if (!rc) | |
617 | rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_QM, memop); | |
618 | if (!rc && CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc)) | |
619 | rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop); | |
620 | ||
621 | return rc; | |
622 | } | |
623 | ||
624 | static void ecore_ilt_line_wr(struct bnx2x_softc *sc, int abs_idx, | |
625 | ecore_dma_addr_t page_mapping) | |
626 | { | |
627 | uint32_t reg; | |
628 | ||
629 | reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8; | |
630 | ||
631 | ecore_wr_64(sc, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); | |
632 | } | |
633 | ||
634 | static void ecore_ilt_line_init_op(struct bnx2x_softc *sc, | |
635 | struct ecore_ilt *ilt, int idx, uint8_t initop) | |
636 | { | |
637 | ecore_dma_addr_t null_mapping; | |
638 | int abs_idx = ilt->start_line + idx; | |
639 | ||
640 | switch (initop) { | |
641 | case INITOP_INIT: | |
642 | /* set in the init-value array */ | |
643 | case INITOP_SET: | |
644 | ecore_ilt_line_wr(sc, abs_idx, ilt->lines[idx].page_mapping); | |
645 | break; | |
646 | case INITOP_CLEAR: | |
647 | null_mapping = 0; | |
648 | ecore_ilt_line_wr(sc, abs_idx, null_mapping); | |
649 | break; | |
650 | } | |
651 | } | |
652 | ||
653 | static void ecore_ilt_boundry_init_op(struct bnx2x_softc *sc, | |
654 | struct ilt_client_info *ilt_cli, | |
655 | uint32_t ilt_start) | |
656 | { | |
657 | uint32_t start_reg = 0; | |
658 | uint32_t end_reg = 0; | |
659 | ||
660 | /* The boundary is either SET or INIT, | |
661 | CLEAR => SET and for now SET ~~ INIT */ | |
662 | ||
663 | /* find the appropriate regs */ | |
664 | switch (ilt_cli->client_num) { | |
665 | case ILT_CLIENT_CDU: | |
666 | start_reg = PXP2_REG_RQ_CDU_FIRST_ILT; | |
667 | end_reg = PXP2_REG_RQ_CDU_LAST_ILT; | |
668 | break; | |
669 | case ILT_CLIENT_QM: | |
670 | start_reg = PXP2_REG_RQ_QM_FIRST_ILT; | |
671 | end_reg = PXP2_REG_RQ_QM_LAST_ILT; | |
672 | break; | |
673 | case ILT_CLIENT_SRC: | |
674 | start_reg = PXP2_REG_RQ_SRC_FIRST_ILT; | |
675 | end_reg = PXP2_REG_RQ_SRC_LAST_ILT; | |
676 | break; | |
677 | case ILT_CLIENT_TM: | |
678 | start_reg = PXP2_REG_RQ_TM_FIRST_ILT; | |
679 | end_reg = PXP2_REG_RQ_TM_LAST_ILT; | |
680 | break; | |
681 | } | |
682 | REG_WR(sc, start_reg, (ilt_start + ilt_cli->start)); | |
683 | REG_WR(sc, end_reg, (ilt_start + ilt_cli->end)); | |
684 | } | |
685 | ||
686 | static void ecore_ilt_client_init_op_ilt(struct bnx2x_softc *sc, | |
687 | struct ecore_ilt *ilt, | |
688 | struct ilt_client_info *ilt_cli, | |
689 | uint8_t initop) | |
690 | { | |
691 | int i; | |
692 | ||
693 | if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) | |
694 | return; | |
695 | ||
696 | for (i = ilt_cli->start; i <= ilt_cli->end; i++) | |
697 | ecore_ilt_line_init_op(sc, ilt, i, initop); | |
698 | ||
699 | /* init/clear the ILT boundries */ | |
700 | ecore_ilt_boundry_init_op(sc, ilt_cli, ilt->start_line); | |
701 | } | |
702 | ||
703 | static void ecore_ilt_client_init_op(struct bnx2x_softc *sc, | |
704 | struct ilt_client_info *ilt_cli, uint8_t initop) | |
705 | { | |
706 | struct ecore_ilt *ilt = SC_ILT(sc); | |
707 | ||
708 | ecore_ilt_client_init_op_ilt(sc, ilt, ilt_cli, initop); | |
709 | } | |
710 | ||
711 | static void ecore_ilt_client_id_init_op(struct bnx2x_softc *sc, | |
712 | int cli_num, uint8_t initop) | |
713 | { | |
714 | struct ecore_ilt *ilt = SC_ILT(sc); | |
715 | struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; | |
716 | ||
717 | ecore_ilt_client_init_op(sc, ilt_cli, initop); | |
718 | } | |
719 | ||
720 | static inline void ecore_ilt_init_op_cnic(struct bnx2x_softc *sc, uint8_t initop) | |
721 | { | |
722 | if (CONFIGURE_NIC_MODE(sc)) | |
723 | ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop); | |
724 | ecore_ilt_client_id_init_op(sc, ILT_CLIENT_TM, initop); | |
725 | } | |
726 | ||
727 | static void ecore_ilt_init_op(struct bnx2x_softc *sc, uint8_t initop) | |
728 | { | |
729 | ecore_ilt_client_id_init_op(sc, ILT_CLIENT_CDU, initop); | |
730 | ecore_ilt_client_id_init_op(sc, ILT_CLIENT_QM, initop); | |
731 | if (CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc)) | |
732 | ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop); | |
733 | } | |
734 | ||
735 | static void ecore_ilt_init_client_psz(struct bnx2x_softc *sc, int cli_num, | |
736 | uint32_t psz_reg, uint8_t initop) | |
737 | { | |
738 | struct ecore_ilt *ilt = SC_ILT(sc); | |
739 | struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; | |
740 | ||
741 | if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) | |
742 | return; | |
743 | ||
744 | switch (initop) { | |
745 | case INITOP_INIT: | |
746 | /* set in the init-value array */ | |
747 | case INITOP_SET: | |
748 | REG_WR(sc, psz_reg, ILOG2(ilt_cli->page_size >> 12)); | |
749 | break; | |
750 | case INITOP_CLEAR: | |
751 | break; | |
752 | } | |
753 | } | |
754 | ||
755 | /* | |
756 | * called during init common stage, ilt clients should be initialized | |
757 | * prioir to calling this function | |
758 | */ | |
759 | static void ecore_ilt_init_page_size(struct bnx2x_softc *sc, uint8_t initop) | |
760 | { | |
761 | ecore_ilt_init_client_psz(sc, ILT_CLIENT_CDU, | |
762 | PXP2_REG_RQ_CDU_P_SIZE, initop); | |
763 | ecore_ilt_init_client_psz(sc, ILT_CLIENT_QM, | |
764 | PXP2_REG_RQ_QM_P_SIZE, initop); | |
765 | ecore_ilt_init_client_psz(sc, ILT_CLIENT_SRC, | |
766 | PXP2_REG_RQ_SRC_P_SIZE, initop); | |
767 | ecore_ilt_init_client_psz(sc, ILT_CLIENT_TM, | |
768 | PXP2_REG_RQ_TM_P_SIZE, initop); | |
769 | } | |
770 | ||
771 | /**************************************************************************** | |
772 | * QM initializations | |
773 | ****************************************************************************/ | |
774 | #define QM_QUEUES_PER_FUNC 16 | |
775 | #define QM_INIT_MIN_CID_COUNT 31 | |
776 | #define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT) | |
777 | ||
778 | /* called during init port stage */ | |
779 | static void ecore_qm_init_cid_count(struct bnx2x_softc *sc, int qm_cid_count, | |
780 | uint8_t initop) | |
781 | { | |
782 | int port = SC_PORT(sc); | |
783 | ||
784 | if (QM_INIT(qm_cid_count)) { | |
785 | switch (initop) { | |
786 | case INITOP_INIT: | |
787 | /* set in the init-value array */ | |
788 | case INITOP_SET: | |
789 | REG_WR(sc, QM_REG_CONNNUM_0 + port*4, | |
790 | qm_cid_count/16 - 1); | |
791 | break; | |
792 | case INITOP_CLEAR: | |
793 | break; | |
794 | } | |
795 | } | |
796 | } | |
797 | ||
798 | static void ecore_qm_set_ptr_table(struct bnx2x_softc *sc, int qm_cid_count, | |
799 | uint32_t base_reg, uint32_t reg) | |
800 | { | |
801 | int i; | |
802 | uint32_t wb_data[2] = {0, 0}; | |
803 | for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) { | |
804 | REG_WR(sc, base_reg + i*4, | |
805 | qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC)); | |
806 | ecore_init_wr_wb(sc, reg + i*8, | |
807 | wb_data, 2); | |
808 | } | |
809 | } | |
810 | ||
811 | /* called during init common stage */ | |
812 | static void ecore_qm_init_ptr_table(struct bnx2x_softc *sc, int qm_cid_count, | |
813 | uint8_t initop) | |
814 | { | |
815 | if (!QM_INIT(qm_cid_count)) | |
816 | return; | |
817 | ||
818 | switch (initop) { | |
819 | case INITOP_INIT: | |
820 | /* set in the init-value array */ | |
821 | case INITOP_SET: | |
822 | ecore_qm_set_ptr_table(sc, qm_cid_count, | |
823 | QM_REG_BASEADDR, QM_REG_PTRTBL); | |
824 | if (CHIP_IS_E1H(sc)) | |
825 | ecore_qm_set_ptr_table(sc, qm_cid_count, | |
826 | QM_REG_BASEADDR_EXT_A, | |
827 | QM_REG_PTRTBL_EXT_A); | |
828 | break; | |
829 | case INITOP_CLEAR: | |
830 | break; | |
831 | } | |
832 | } | |
833 | ||
834 | /**************************************************************************** | |
835 | * SRC initializations | |
836 | ****************************************************************************/ | |
837 | #ifdef ECORE_L5 | |
838 | /* called during init func stage */ | |
839 | static void ecore_src_init_t2(struct bnx2x_softc *sc, struct src_ent *t2, | |
840 | ecore_dma_addr_t t2_mapping, int src_cid_count) | |
841 | { | |
842 | int i; | |
843 | int port = SC_PORT(sc); | |
844 | ||
845 | /* Initialize T2 */ | |
846 | for (i = 0; i < src_cid_count-1; i++) | |
847 | t2[i].next = (uint64_t)(t2_mapping + | |
848 | (i+1)*sizeof(struct src_ent)); | |
849 | ||
850 | /* tell the searcher where the T2 table is */ | |
851 | REG_WR(sc, SRC_REG_COUNTFREE0 + port*4, src_cid_count); | |
852 | ||
853 | ecore_wr_64(sc, SRC_REG_FIRSTFREE0 + port*16, | |
854 | U64_LO(t2_mapping), U64_HI(t2_mapping)); | |
855 | ||
856 | ecore_wr_64(sc, SRC_REG_LASTFREE0 + port*16, | |
857 | U64_LO((uint64_t)t2_mapping + | |
858 | (src_cid_count-1) * sizeof(struct src_ent)), | |
859 | U64_HI((uint64_t)t2_mapping + | |
860 | (src_cid_count-1) * sizeof(struct src_ent))); | |
861 | } | |
862 | #endif | |
863 | #endif /* ECORE_INIT_OPS_H */ |