]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/cxgbe/base/t4vf_hw.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / cxgbe / base / t4vf_hw.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
3 * All rights reserved.
4 */
5
6 #include <rte_ethdev_driver.h>
7 #include <rte_ether.h>
8
9 #include "common.h"
10 #include "t4_regs.h"
11
12 /**
13 * t4vf_wait_dev_ready - wait till to reads of registers work
14 *
15 * Wait for the device to become ready (signified by our "who am I" register
16 * returning a value other than all 1's). Return an error if it doesn't
17 * become ready ...
18 */
19 static int t4vf_wait_dev_ready(struct adapter *adapter)
20 {
21 const u32 whoami = T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI;
22 const u32 notready1 = 0xffffffff;
23 const u32 notready2 = 0xeeeeeeee;
24 u32 val;
25
26 val = t4_read_reg(adapter, whoami);
27 if (val != notready1 && val != notready2)
28 return 0;
29
30 msleep(500);
31 val = t4_read_reg(adapter, whoami);
32 if (val != notready1 && val != notready2)
33 return 0;
34
35 dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
36 val);
37 return -EIO;
38 }
39
40 /*
41 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
42 */
43 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
44 u32 mbox_addr)
45 {
46 for ( ; nflit; nflit--, mbox_addr += 8)
47 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
48 }
49
50 /**
51 * t4vf_wr_mbox_core - send a command to FW through the mailbox
52 * @adapter: the adapter
53 * @cmd: the command to write
54 * @size: command length in bytes
55 * @rpl: where to optionally store the reply
56 * @sleep_ok: if true we may sleep while awaiting command completion
57 *
58 * Sends the given command to FW through the mailbox and waits for the
59 * FW to execute the command. If @rpl is not %NULL it is used to store
60 * the FW's reply to the command. The command and its optional reply
61 * are of the same length. FW can take up to 500 ms to respond.
62 * @sleep_ok determines whether we may sleep while awaiting the response.
63 * If sleeping is allowed we use progressive backoff otherwise we spin.
64 *
65 * The return value is 0 on success or a negative errno on failure. A
66 * failure can happen either because we are not able to execute the
67 * command or FW executes it but signals an error. In the latter case
68 * the return value is the error code indicated by FW (negated).
69 */
70 int t4vf_wr_mbox_core(struct adapter *adapter,
71 const void __attribute__((__may_alias__)) *cmd,
72 int size, void *rpl, bool sleep_ok)
73 {
74 /*
75 * We delay in small increments at first in an effort to maintain
76 * responsiveness for simple, fast executing commands but then back
77 * off to larger delays to a maximum retry delay.
78 */
79 static const int delay[] = {
80 1, 1, 3, 5, 10, 10, 20, 50, 100
81 };
82
83
84 u32 mbox_ctl = T4VF_CIM_BASE_ADDR + A_CIM_VF_EXT_MAILBOX_CTRL;
85 __be64 cmd_rpl[MBOX_LEN / 8];
86 struct mbox_entry entry;
87 unsigned int delay_idx;
88 u32 v, mbox_data;
89 const __be64 *p;
90 int i, ret;
91 int ms;
92
93 /* In T6, mailbox size is changed to 128 bytes to avoid
94 * invalidating the entire prefetch buffer.
95 */
96 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
97 mbox_data = T4VF_MBDATA_BASE_ADDR;
98 else
99 mbox_data = T6VF_MBDATA_BASE_ADDR;
100
101 /*
102 * Commands must be multiples of 16 bytes in length and may not be
103 * larger than the size of the Mailbox Data register array.
104 */
105 if ((size % 16) != 0 ||
106 size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
107 return -EINVAL;
108
109 /*
110 * Queue ourselves onto the mailbox access list. When our entry is at
111 * the front of the list, we have rights to access the mailbox. So we
112 * wait [for a while] till we're at the front [or bail out with an
113 * EBUSY] ...
114 */
115 t4_os_atomic_add_tail(&entry, &adapter->mbox_list, &adapter->mbox_lock);
116
117 delay_idx = 0;
118 ms = delay[0];
119
120 for (i = 0; ; i += ms) {
121 /*
122 * If we've waited too long, return a busy indication. This
123 * really ought to be based on our initial position in the
124 * mailbox access list but this is a start. We very rarely
125 * contend on access to the mailbox ...
126 */
127 if (i > (2 * FW_CMD_MAX_TIMEOUT)) {
128 t4_os_atomic_list_del(&entry, &adapter->mbox_list,
129 &adapter->mbox_lock);
130 ret = -EBUSY;
131 return ret;
132 }
133
134 /*
135 * If we're at the head, break out and start the mailbox
136 * protocol.
137 */
138 if (t4_os_list_first_entry(&adapter->mbox_list) == &entry)
139 break;
140
141 /*
142 * Delay for a bit before checking again ...
143 */
144 if (sleep_ok) {
145 ms = delay[delay_idx]; /* last element may repeat */
146 if (delay_idx < ARRAY_SIZE(delay) - 1)
147 delay_idx++;
148 msleep(ms);
149 } else {
150 rte_delay_ms(ms);
151 }
152 }
153
154 /*
155 * Loop trying to get ownership of the mailbox. Return an error
156 * if we can't gain ownership.
157 */
158 v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl));
159 for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
160 v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl));
161
162 if (v != X_MBOWNER_PL) {
163 t4_os_atomic_list_del(&entry, &adapter->mbox_list,
164 &adapter->mbox_lock);
165 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
166 return ret;
167 }
168
169 /*
170 * Write the command array into the Mailbox Data register array and
171 * transfer ownership of the mailbox to the firmware.
172 */
173 for (i = 0, p = cmd; i < size; i += 8)
174 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
175
176 t4_read_reg(adapter, mbox_data); /* flush write */
177 t4_write_reg(adapter, mbox_ctl,
178 F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
179 t4_read_reg(adapter, mbox_ctl); /* flush write */
180 delay_idx = 0;
181 ms = delay[0];
182
183 /*
184 * Spin waiting for firmware to acknowledge processing our command.
185 */
186 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i++) {
187 if (sleep_ok) {
188 ms = delay[delay_idx]; /* last element may repeat */
189 if (delay_idx < ARRAY_SIZE(delay) - 1)
190 delay_idx++;
191 msleep(ms);
192 } else {
193 rte_delay_ms(ms);
194 }
195
196 /*
197 * If we're the owner, see if this is the reply we wanted.
198 */
199 v = t4_read_reg(adapter, mbox_ctl);
200 if (G_MBOWNER(v) == X_MBOWNER_PL) {
201 /*
202 * If the Message Valid bit isn't on, revoke ownership
203 * of the mailbox and continue waiting for our reply.
204 */
205 if ((v & F_MBMSGVALID) == 0) {
206 t4_write_reg(adapter, mbox_ctl,
207 V_MBOWNER(X_MBOWNER_NONE));
208 continue;
209 }
210
211 /*
212 * We now have our reply. Extract the command return
213 * value, copy the reply back to our caller's buffer
214 * (if specified) and revoke ownership of the mailbox.
215 * We return the (negated) firmware command return
216 * code (this depends on FW_SUCCESS == 0). (Again we
217 * avoid clogging the log with FW_VI_STATS_CMD
218 * reply results.)
219 */
220
221 /*
222 * Retrieve the command reply and release the mailbox.
223 */
224 get_mbox_rpl(adapter, cmd_rpl, size / 8, mbox_data);
225 t4_write_reg(adapter, mbox_ctl,
226 V_MBOWNER(X_MBOWNER_NONE));
227 t4_os_atomic_list_del(&entry, &adapter->mbox_list,
228 &adapter->mbox_lock);
229
230 /* return value in high-order host-endian word */
231 v = be64_to_cpu(cmd_rpl[0]);
232
233 if (rpl) {
234 /* request bit in high-order BE word */
235 WARN_ON((be32_to_cpu(*(const u32 *)cmd)
236 & F_FW_CMD_REQUEST) == 0);
237 memcpy(rpl, cmd_rpl, size);
238 }
239 return -((int)G_FW_CMD_RETVAL(v));
240 }
241 }
242
243 /*
244 * We timed out. Return the error ...
245 */
246 dev_err(adapter, "command %#x timed out\n",
247 *(const u8 *)cmd);
248 dev_err(adapter, " Control = %#x\n", t4_read_reg(adapter, mbox_ctl));
249 t4_os_atomic_list_del(&entry, &adapter->mbox_list, &adapter->mbox_lock);
250 ret = -ETIMEDOUT;
251 return ret;
252 }
253
254 /**
255 * t4vf_fw_reset - issue a reset to FW
256 * @adapter: the adapter
257 *
258 * Issues a reset command to FW. For a Physical Function this would
259 * result in the Firmware resetting all of its state. For a Virtual
260 * Function this just resets the state associated with the VF.
261 */
262 int t4vf_fw_reset(struct adapter *adapter)
263 {
264 struct fw_reset_cmd cmd;
265
266 memset(&cmd, 0, sizeof(cmd));
267 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RESET_CMD) |
268 F_FW_CMD_WRITE);
269 cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(FW_LEN16(cmd)));
270 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
271 }
272
273 /**
274 * t4vf_prep_adapter - prepare SW and HW for operation
275 * @adapter: the adapter
276 *
277 * Initialize adapter SW state for the various HW modules, set initial
278 * values for some adapter tunables, take PHYs out of reset, and
279 * initialize the MDIO interface.
280 */
281 int t4vf_prep_adapter(struct adapter *adapter)
282 {
283 u32 pl_vf_rev;
284 int ret, ver;
285
286 ret = t4vf_wait_dev_ready(adapter);
287 if (ret < 0)
288 return ret;
289
290 /*
291 * Default port and clock for debugging in case we can't reach
292 * firmware.
293 */
294 adapter->params.nports = 1;
295 adapter->params.vfres.pmask = 1;
296 adapter->params.vpd.cclk = 50000;
297
298 pl_vf_rev = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
299 adapter->params.pci.device_id = adapter->pdev->id.device_id;
300 adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
301
302 /*
303 * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
304 * ADAPTER (VERSION << 4 | REVISION)
305 */
306 ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
307 adapter->params.chip = 0;
308 switch (ver) {
309 case CHELSIO_T5:
310 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5,
311 pl_vf_rev);
312 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
313 adapter->params.arch.mps_tcam_size =
314 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
315 break;
316 case CHELSIO_T6:
317 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6,
318 pl_vf_rev);
319 adapter->params.arch.sge_fl_db = 0;
320 adapter->params.arch.mps_tcam_size =
321 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
322 break;
323 default:
324 dev_err(adapter, "%s: Device %d is not supported\n",
325 __func__, adapter->params.pci.device_id);
326 return -EINVAL;
327 }
328 return 0;
329 }
330
331 /**
332 * t4vf_query_params - query FW or device parameters
333 * @adapter: the adapter
334 * @nparams: the number of parameters
335 * @params: the parameter names
336 * @vals: the parameter values
337 *
338 * Reads the values of firmware or device parameters. Up to 7 parameters
339 * can be queried at once.
340 */
341 int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
342 const u32 *params, u32 *vals)
343 {
344 struct fw_params_cmd cmd, rpl;
345 struct fw_params_param *p;
346 unsigned int i;
347 size_t len16;
348 int ret;
349
350 if (nparams > 7)
351 return -EINVAL;
352
353 memset(&cmd, 0, sizeof(cmd));
354 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
355 F_FW_CMD_REQUEST |
356 F_FW_CMD_READ);
357 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
358 param[nparams]), 16);
359 cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
360 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
361 p->mnem = cpu_to_be32(*params++);
362 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
363 if (ret == 0)
364 for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
365 *vals++ = be32_to_cpu(p->val);
366 return ret;
367 }
368
369 /**
370 * t4vf_get_vpd_params - retrieve device VPD paremeters
371 * @adapter: the adapter
372 *
373 * Retrives various device Vital Product Data parameters. The parameters
374 * are stored in @adapter->params.vpd.
375 */
376 int t4vf_get_vpd_params(struct adapter *adapter)
377 {
378 struct vpd_params *vpd_params = &adapter->params.vpd;
379 u32 params[7], vals[7];
380 int v;
381
382 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
383 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
384 v = t4vf_query_params(adapter, 1, params, vals);
385 if (v != FW_SUCCESS)
386 return v;
387 vpd_params->cclk = vals[0];
388 dev_debug(adapter, "%s: vpd_params->cclk = %u\n",
389 __func__, vpd_params->cclk);
390 return 0;
391 }
392
393 /**
394 * t4vf_get_dev_params - retrieve device paremeters
395 * @adapter: the adapter
396 *
397 * Retrives fw and tp version.
398 */
399 int t4vf_get_dev_params(struct adapter *adapter)
400 {
401 u32 params[7], vals[7];
402 int v;
403
404 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
405 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV));
406 params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
407 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV));
408 v = t4vf_query_params(adapter, 2, params, vals);
409 if (v != FW_SUCCESS)
410 return v;
411 adapter->params.fw_vers = vals[0];
412 adapter->params.tp_vers = vals[1];
413
414 dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
415 G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
416 G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
417 G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
418 G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
419
420 dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
421 G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
422 G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
423 G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
424 G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
425 return 0;
426 }
427
428 /**
429 * t4vf_set_params - sets FW or device parameters
430 * @adapter: the adapter
431 * @nparams: the number of parameters
432 * @params: the parameter names
433 * @vals: the parameter values
434 *
435 * Sets the values of firmware or device parameters. Up to 7 parameters
436 * can be specified at once.
437 */
438 int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
439 const u32 *params, const u32 *vals)
440 {
441 struct fw_params_param *p;
442 struct fw_params_cmd cmd;
443 unsigned int i;
444 size_t len16;
445
446 if (nparams > 7)
447 return -EINVAL;
448
449 memset(&cmd, 0, sizeof(cmd));
450 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
451 F_FW_CMD_REQUEST |
452 F_FW_CMD_WRITE);
453 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
454 param[nparams]), 16);
455 cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
456 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
457 p->mnem = cpu_to_be32(*params++);
458 p->val = cpu_to_be32(*vals++);
459 }
460 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
461 }
462
463 /**
464 * t4vf_fl_pkt_align - return the fl packet alignment
465 * @adapter: the adapter
466 *
467 * T4 has a single field to specify the packing and padding boundary.
468 * T5 onwards has separate fields for this and hence the alignment for
469 * next packet offset is maximum of these two.
470 */
471 int t4vf_fl_pkt_align(struct adapter *adapter, u32 sge_control,
472 u32 sge_control2)
473 {
474 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
475
476 /* T4 uses a single control field to specify both the PCIe Padding and
477 * Packing Boundary. T5 introduced the ability to specify these
478 * separately. The actual Ingress Packet Data alignment boundary
479 * within Packed Buffer Mode is the maximum of these two
480 * specifications.
481 */
482 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
483 ingpad_shift = X_INGPADBOUNDARY_SHIFT;
484 else
485 ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
486
487 ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
488
489 fl_align = ingpadboundary;
490 if (!is_t4(adapter->params.chip)) {
491 ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
492 if (ingpackboundary == X_INGPACKBOUNDARY_16B)
493 ingpackboundary = 16;
494 else
495 ingpackboundary = 1 << (ingpackboundary +
496 X_INGPACKBOUNDARY_SHIFT);
497
498 fl_align = max(ingpadboundary, ingpackboundary);
499 }
500 return fl_align;
501 }
502
503 unsigned int t4vf_get_pf_from_vf(struct adapter *adapter)
504 {
505 u32 whoami;
506
507 whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI);
508 return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
509 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami));
510 }
511
512 /**
513 * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
514 * @adapter: the adapter
515 *
516 * Retrieves global RSS mode and parameters with which we have to live
517 * and stores them in the @adapter's RSS parameters.
518 */
519 int t4vf_get_rss_glb_config(struct adapter *adapter)
520 {
521 struct rss_params *rss = &adapter->params.rss;
522 struct fw_rss_glb_config_cmd cmd, rpl;
523 int v;
524
525 /*
526 * Execute an RSS Global Configuration read command to retrieve
527 * our RSS configuration.
528 */
529 memset(&cmd, 0, sizeof(cmd));
530 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
531 F_FW_CMD_REQUEST |
532 F_FW_CMD_READ);
533 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
534 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
535 if (v != FW_SUCCESS)
536 return v;
537
538 /*
539 * Translate the big-endian RSS Global Configuration into our
540 * cpu-endian format based on the RSS mode. We also do first level
541 * filtering at this point to weed out modes which don't support
542 * VF Drivers ...
543 */
544 rss->mode = G_FW_RSS_GLB_CONFIG_CMD_MODE
545 (be32_to_cpu(rpl.u.manual.mode_pkd));
546 switch (rss->mode) {
547 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
548 u32 word = be32_to_cpu
549 (rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
550
551 rss->u.basicvirtual.synmapen =
552 ((word & F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
553 rss->u.basicvirtual.syn4tupenipv6 =
554 ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
555 rss->u.basicvirtual.syn2tupenipv6 =
556 ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
557 rss->u.basicvirtual.syn4tupenipv4 =
558 ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
559 rss->u.basicvirtual.syn2tupenipv4 =
560 ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
561 rss->u.basicvirtual.ofdmapen =
562 ((word & F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
563 rss->u.basicvirtual.tnlmapen =
564 ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
565 rss->u.basicvirtual.tnlalllookup =
566 ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
567 rss->u.basicvirtual.hashtoeplitz =
568 ((word & F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
569
570 /* we need at least Tunnel Map Enable to be set */
571 if (!rss->u.basicvirtual.tnlmapen)
572 return -EINVAL;
573 break;
574 }
575
576 default:
577 /* all unknown/unsupported RSS modes result in an error */
578 return -EINVAL;
579 }
580 return 0;
581 }
582
583 /**
584 * t4vf_get_vfres - retrieve VF resource limits
585 * @adapter: the adapter
586 *
587 * Retrieves configured resource limits and capabilities for a virtual
588 * function. The results are stored in @adapter->vfres.
589 */
590 int t4vf_get_vfres(struct adapter *adapter)
591 {
592 struct vf_resources *vfres = &adapter->params.vfres;
593 struct fw_pfvf_cmd cmd, rpl;
594 u32 word;
595 int v;
596
597 /*
598 * Execute PFVF Read command to get VF resource limits; bail out early
599 * with error on command failure.
600 */
601 memset(&cmd, 0, sizeof(cmd));
602 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
603 F_FW_CMD_REQUEST |
604 F_FW_CMD_READ);
605 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
606 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
607 if (v != FW_SUCCESS)
608 return v;
609
610 /*
611 * Extract VF resource limits and return success.
612 */
613 word = be32_to_cpu(rpl.niqflint_niq);
614 vfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
615 vfres->niq = G_FW_PFVF_CMD_NIQ(word);
616
617 word = be32_to_cpu(rpl.type_to_neq);
618 vfres->neq = G_FW_PFVF_CMD_NEQ(word);
619 vfres->pmask = G_FW_PFVF_CMD_PMASK(word);
620
621 word = be32_to_cpu(rpl.tc_to_nexactf);
622 vfres->tc = G_FW_PFVF_CMD_TC(word);
623 vfres->nvi = G_FW_PFVF_CMD_NVI(word);
624 vfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word);
625
626 word = be32_to_cpu(rpl.r_caps_to_nethctrl);
627 vfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word);
628 vfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word);
629 vfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
630 return 0;
631 }
632
633 /**
634 * t4vf_get_port_stats_fw - collect "port" statistics via Firmware
635 * @adapter: the adapter
636 * @pidx: the port index
637 * @s: the stats structure to fill
638 *
639 * Collect statistics for the "port"'s Virtual Interface via Firmware
640 * commands.
641 */
642 static int t4vf_get_port_stats_fw(struct adapter *adapter, int pidx,
643 struct port_stats *p)
644 {
645 struct port_info *pi = adap2pinfo(adapter, pidx);
646 unsigned int rem = VI_VF_NUM_STATS;
647 struct fw_vi_stats_vf fwstats;
648 __be64 *fwsp = (__be64 *)&fwstats;
649
650 /*
651 * Grab the Virtual Interface statistics a chunk at a time via mailbox
652 * commands. We could use a Work Request and get all of them at once
653 * but that's an asynchronous interface which is awkward to use.
654 */
655 while (rem) {
656 unsigned int ix = VI_VF_NUM_STATS - rem;
657 unsigned int nstats = min(6U, rem);
658 struct fw_vi_stats_cmd cmd, rpl;
659 size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
660 sizeof(struct fw_vi_stats_ctl));
661 size_t len16 = DIV_ROUND_UP(len, 16);
662 int ret;
663
664 memset(&cmd, 0, sizeof(cmd));
665 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_STATS_CMD) |
666 V_FW_VI_STATS_CMD_VIID(pi->viid) |
667 F_FW_CMD_REQUEST |
668 F_FW_CMD_READ);
669 cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
670 cmd.u.ctl.nstats_ix =
671 cpu_to_be16(V_FW_VI_STATS_CMD_IX(ix) |
672 V_FW_VI_STATS_CMD_NSTATS(nstats));
673 ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
674 if (ret != FW_SUCCESS)
675 return ret;
676
677 memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
678
679 rem -= nstats;
680 fwsp += nstats;
681 }
682
683 /*
684 * Translate firmware statistics into host native statistics.
685 */
686 p->tx_octets = be64_to_cpu(fwstats.tx_bcast_bytes) +
687 be64_to_cpu(fwstats.tx_mcast_bytes) +
688 be64_to_cpu(fwstats.tx_ucast_bytes);
689 p->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
690 p->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
691 p->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
692 p->tx_drop = be64_to_cpu(fwstats.tx_drop_frames);
693
694 p->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
695 p->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
696 p->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
697 p->rx_len_err = be64_to_cpu(fwstats.rx_err_frames);
698
699 return 0;
700 }
701
702 /**
703 * t4vf_get_port_stats - collect "port" statistics
704 * @adapter: the adapter
705 * @pidx: the port index
706 * @s: the stats structure to fill
707 *
708 * Collect statistics for the "port"'s Virtual Interface.
709 */
710 void t4vf_get_port_stats(struct adapter *adapter, int pidx,
711 struct port_stats *p)
712 {
713 /*
714 * If this is not the first Virtual Interface for our Virtual
715 * Function, we need to use Firmware commands to retrieve its
716 * MPS statistics.
717 */
718 if (pidx != 0)
719 t4vf_get_port_stats_fw(adapter, pidx, p);
720
721 /*
722 * But for the first VI, we can grab its statistics via the MPS
723 * register mapped into the VF register space.
724 */
725 #define GET_STAT(name) \
726 t4_read_reg64(adapter, \
727 T4VF_MPS_BASE_ADDR + A_MPS_VF_STAT_##name##_L)
728 p->tx_octets = GET_STAT(TX_VF_BCAST_BYTES) +
729 GET_STAT(TX_VF_MCAST_BYTES) +
730 GET_STAT(TX_VF_UCAST_BYTES);
731 p->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
732 p->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
733 p->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
734 p->tx_drop = GET_STAT(TX_VF_DROP_FRAMES);
735
736 p->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
737 p->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
738 p->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
739
740 p->rx_len_err = GET_STAT(RX_VF_ERR_FRAMES);
741 #undef GET_STAT
742 }
743
744 static int t4vf_alloc_vi(struct adapter *adapter, int port_id)
745 {
746 struct fw_vi_cmd cmd, rpl;
747 int v;
748
749 /*
750 * Execute a VI command to allocate Virtual Interface and return its
751 * VIID.
752 */
753 memset(&cmd, 0, sizeof(cmd));
754 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
755 F_FW_CMD_REQUEST |
756 F_FW_CMD_WRITE |
757 F_FW_CMD_EXEC);
758 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
759 F_FW_VI_CMD_ALLOC);
760 cmd.portid_pkd = V_FW_VI_CMD_PORTID(port_id);
761 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
762 if (v != FW_SUCCESS)
763 return v;
764 return G_FW_VI_CMD_VIID(be16_to_cpu(rpl.type_to_viid));
765 }
766
767 int t4vf_port_init(struct adapter *adapter)
768 {
769 unsigned int fw_caps = adapter->params.fw_caps_support;
770 struct fw_port_cmd port_cmd, port_rpl;
771 struct fw_vi_cmd vi_cmd, vi_rpl;
772 fw_port_cap32_t pcaps, acaps;
773 enum fw_port_type port_type;
774 int mdio_addr;
775 int ret, i;
776
777 for_each_port(adapter, i) {
778 struct port_info *p = adap2pinfo(adapter, i);
779
780 /*
781 * If we haven't yet determined if we're talking to Firmware
782 * which knows the new 32-bit Port Caps, it's time to find
783 * out now. This will also tell new Firmware to send us Port
784 * Status Updates using the new 32-bit Port Capabilities
785 * version of the Port Information message.
786 */
787 if (fw_caps == FW_CAPS_UNKNOWN) {
788 u32 param, val;
789
790 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
791 V_FW_PARAMS_PARAM_X
792 (FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
793 val = 1;
794 ret = t4vf_set_params(adapter, 1, &param, &val);
795 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
796 adapter->params.fw_caps_support = fw_caps;
797 }
798
799 ret = t4vf_alloc_vi(adapter, p->port_id);
800 if (ret < 0) {
801 dev_err(&pdev->dev, "cannot allocate VI for port %d:"
802 " err=%d\n", p->port_id, ret);
803 return ret;
804 }
805 p->viid = ret;
806
807 /*
808 * Execute a VI Read command to get our Virtual Interface
809 * information like MAC address, etc.
810 */
811 memset(&vi_cmd, 0, sizeof(vi_cmd));
812 vi_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
813 F_FW_CMD_REQUEST |
814 F_FW_CMD_READ);
815 vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
816 vi_cmd.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(p->viid));
817 ret = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
818 if (ret != FW_SUCCESS)
819 return ret;
820
821 p->rss_size = G_FW_VI_CMD_RSSSIZE
822 (be16_to_cpu(vi_rpl.norss_rsssize));
823 t4_os_set_hw_addr(adapter, i, vi_rpl.mac);
824
825 /*
826 * If we don't have read access to our port information, we're
827 * done now. Else, execute a PORT Read command to get it ...
828 */
829 if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
830 return 0;
831
832 memset(&port_cmd, 0, sizeof(port_cmd));
833 port_cmd.op_to_portid = cpu_to_be32
834 (V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
835 F_FW_CMD_READ |
836 V_FW_PORT_CMD_PORTID(p->port_id));
837 port_cmd.action_to_len16 = cpu_to_be32
838 (V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ?
839 FW_PORT_ACTION_GET_PORT_INFO :
840 FW_PORT_ACTION_GET_PORT_INFO32) |
841 FW_LEN16(port_cmd));
842 ret = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd),
843 &port_rpl);
844 if (ret != FW_SUCCESS)
845 return ret;
846
847 /*
848 * Extract the various fields from the Port Information message.
849 */
850 if (fw_caps == FW_CAPS16) {
851 u32 lstatus = be32_to_cpu
852 (port_rpl.u.info.lstatus_to_modtype);
853
854 port_type = G_FW_PORT_CMD_PTYPE(lstatus);
855 mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP) ?
856 (int)G_FW_PORT_CMD_MDIOADDR(lstatus) :
857 -1);
858 pcaps = fwcaps16_to_caps32
859 (be16_to_cpu(port_rpl.u.info.pcap));
860 acaps = fwcaps16_to_caps32
861 (be16_to_cpu(port_rpl.u.info.acap));
862 } else {
863 u32 lstatus32 = be32_to_cpu
864 (port_rpl.u.info32.lstatus32_to_cbllen32);
865
866 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
867 mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
868 (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) :
869 -1);
870 pcaps = be32_to_cpu(port_rpl.u.info32.pcaps32);
871 acaps = be32_to_cpu(port_rpl.u.info32.acaps32);
872 }
873
874 p->port_type = port_type;
875 p->mdio_addr = mdio_addr;
876 p->mod_type = FW_PORT_MOD_TYPE_NA;
877 init_link_config(&p->link_cfg, pcaps, acaps);
878 }
879 return 0;
880 }