]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/infiniband/hw/hfi1/platform.c
IB/hfi1: Add flag for platform config scratch register read
[mirror_ubuntu-focal-kernel.git] / drivers / infiniband / hw / hfi1 / platform.c
CommitLineData
8ebd4cf1 1/*
05d6ac1d 2 * Copyright(c) 2015, 2016 Intel Corporation.
8ebd4cf1
EH
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
8ebd4cf1
EH
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
8ebd4cf1
EH
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
c3838b39 47
8ebd4cf1 48#include "hfi.h"
c3838b39 49#include "efivar.h"
e83eba21 50#include "eprom.h"
c3838b39 51
fe4d9243
EH
52static int validate_scratch_checksum(struct hfi1_devdata *dd)
53{
54 u64 checksum = 0, temp_scratch = 0;
55 int i, j, version;
56
57 temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
58 version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
59
60 /* Prevent power on default of all zeroes from passing checksum */
c822652e
JS
61 if (!version) {
62 dd_dev_err(dd, "%s: Config bitmap uninitialized\n", __func__);
63 dd_dev_err(dd,
64 "%s: Please update your BIOS to support active channels\n",
65 __func__);
fe4d9243 66 return 0;
c822652e 67 }
fe4d9243
EH
68
69 /*
70 * ASIC scratch 0 only contains the checksum and bitmap version as
71 * fields of interest, both of which are handled separately from the
72 * loop below, so skip it
73 */
74 checksum += version;
75 for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
76 temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
77 for (j = sizeof(u64); j != 0; j -= 2) {
78 checksum += (temp_scratch & 0xFFFF);
79 temp_scratch >>= 16;
80 }
81 }
82
83 while (checksum >> 16)
84 checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
85
86 temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
87 temp_scratch &= CHECKSUM_SMASK;
88 temp_scratch >>= CHECKSUM_SHIFT;
89
90 if (checksum + temp_scratch == 0xFFFF)
91 return 1;
c822652e
JS
92
93 dd_dev_err(dd, "%s: Configuration bitmap corrupted\n", __func__);
fe4d9243
EH
94 return 0;
95}
96
97static void save_platform_config_fields(struct hfi1_devdata *dd)
98{
99 struct hfi1_pportdata *ppd = dd->pport;
100 u64 temp_scratch = 0, temp_dest = 0;
101
102 temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
103
104 temp_dest = temp_scratch &
105 (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
106 PORT0_PORT_TYPE_SMASK);
107 ppd->port_type = temp_dest >>
108 (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
109 PORT0_PORT_TYPE_SHIFT);
110
111 temp_dest = temp_scratch &
112 (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
113 PORT0_LOCAL_ATTEN_SMASK);
114 ppd->local_atten = temp_dest >>
115 (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
116 PORT0_LOCAL_ATTEN_SHIFT);
117
118 temp_dest = temp_scratch &
119 (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
120 PORT0_REMOTE_ATTEN_SMASK);
121 ppd->remote_atten = temp_dest >>
122 (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
123 PORT0_REMOTE_ATTEN_SHIFT);
124
125 temp_dest = temp_scratch &
126 (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
127 PORT0_DEFAULT_ATTEN_SMASK);
128 ppd->default_atten = temp_dest >>
129 (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
130 PORT0_DEFAULT_ATTEN_SHIFT);
131
132 temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
133 ASIC_CFG_SCRATCH_2);
134
135 ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
136 ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
137 ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
138
139 ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
140 QSFP_MAX_POWER_SHIFT;
91618604
JB
141
142 ppd->config_from_scratch = true;
fe4d9243
EH
143}
144
c3838b39
EH
145void get_platform_config(struct hfi1_devdata *dd)
146{
147 int ret = 0;
c3838b39 148 u8 *temp_platform_config = NULL;
e83eba21
DL
149 u32 esize;
150
fe4d9243
EH
151 if (is_integrated(dd)) {
152 if (validate_scratch_checksum(dd)) {
153 save_platform_config_fields(dd);
154 return;
155 }
fe4d9243
EH
156 } else {
157 ret = eprom_read_platform_config(dd,
158 (void **)&temp_platform_config,
159 &esize);
160 if (!ret) {
161 /* success */
162 dd->platform_config.data = temp_platform_config;
163 dd->platform_config.size = esize;
164 return;
165 }
fe4d9243
EH
166 }
167 dd_dev_err(dd,
168 "%s: Failed to get platform config, falling back to sub-optimal default file\n",
169 __func__);
e83eba21
DL
170 /* fall back to request firmware */
171 platform_config_load = 1;
c3838b39
EH
172}
173
174void free_platform_config(struct hfi1_devdata *dd)
175{
176 if (!platform_config_load) {
177 /*
fe4d9243
EH
178 * was loaded from EFI or the EPROM, release memory
179 * allocated by read_efi_var/eprom_read_platform_config
c3838b39
EH
180 */
181 kfree(dd->platform_config.data);
182 }
183 /*
184 * else do nothing, dispose_firmware will release
185 * struct firmware platform_config on driver exit
186 */
187}
8ebd4cf1 188
9775a991
EH
189void get_port_type(struct hfi1_pportdata *ppd)
190{
191 int ret;
fe4d9243 192 u32 temp;
9775a991
EH
193
194 ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
fe4d9243 195 PORT_TABLE_PORT_TYPE, &temp,
9775a991 196 4);
fe4d9243 197 if (ret) {
9775a991 198 ppd->port_type = PORT_TYPE_UNKNOWN;
fe4d9243
EH
199 return;
200 }
201 ppd->port_type = temp;
9775a991
EH
202}
203
8ebd4cf1
EH
204int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
205{
206 u8 tx_ctrl_byte = on ? 0x0 : 0xF;
207 int ret = 0;
208
209 ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
210 &tx_ctrl_byte, 1);
211 /* we expected 1, so consider 0 an error */
212 if (ret == 0)
213 ret = -EIO;
214 else if (ret == 1)
215 ret = 0;
216 return ret;
217}
218
219static int qual_power(struct hfi1_pportdata *ppd)
220{
221 u32 cable_power_class = 0, power_class_max = 0;
222 u8 *cache = ppd->qsfp_info.cache;
223 int ret = 0;
224
225 ret = get_platform_config_field(
226 ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
227 SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
228 if (ret)
229 return ret;
230
145dd2b3 231 cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
8ebd4cf1 232
145dd2b3 233 if (cable_power_class > power_class_max)
8ebd4cf1
EH
234 ppd->offline_disabled_reason =
235 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
8ebd4cf1
EH
236
237 if (ppd->offline_disabled_reason ==
238 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
b4e9e2f0 239 dd_dev_err(
8ebd4cf1
EH
240 ppd->dd,
241 "%s: Port disabled due to system power restrictions\n",
242 __func__);
243 ret = -EPERM;
244 }
245 return ret;
246}
247
248static int qual_bitrate(struct hfi1_pportdata *ppd)
249{
250 u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
251 u8 *cache = ppd->qsfp_info.cache;
252
253 if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
254 cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
255 ppd->offline_disabled_reason =
256 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
257
258 if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
259 cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
260 ppd->offline_disabled_reason =
261 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
262
263 if (ppd->offline_disabled_reason ==
264 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
b4e9e2f0 265 dd_dev_err(
8ebd4cf1
EH
266 ppd->dd,
267 "%s: Cable failed bitrate check, disabling port\n",
268 __func__);
269 return -EPERM;
270 }
271 return 0;
272}
273
274static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
275{
276 u8 cable_power_class = 0, power_ctrl_byte = 0;
277 u8 *cache = ppd->qsfp_info.cache;
278 int ret;
279
145dd2b3 280 cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
8ebd4cf1 281
145dd2b3 282 if (cable_power_class > QSFP_POWER_CLASS_1) {
8ebd4cf1
EH
283 power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
284
285 power_ctrl_byte |= 1;
286 power_ctrl_byte &= ~(0x2);
287
288 ret = qsfp_write(ppd, ppd->dd->hfi1_id,
289 QSFP_PWR_CTRL_BYTE_OFFS,
290 &power_ctrl_byte, 1);
291 if (ret != 1)
292 return -EIO;
293
145dd2b3 294 if (cable_power_class > QSFP_POWER_CLASS_4) {
8ebd4cf1
EH
295 power_ctrl_byte |= (1 << 2);
296 ret = qsfp_write(ppd, ppd->dd->hfi1_id,
297 QSFP_PWR_CTRL_BYTE_OFFS,
298 &power_ctrl_byte, 1);
299 if (ret != 1)
300 return -EIO;
301 }
302
303 /* SFF 8679 rev 1.7 LPMode Deassert time */
304 msleep(300);
305 }
306 return 0;
307}
308
309static void apply_rx_cdr(struct hfi1_pportdata *ppd,
310 u32 rx_preset_index,
311 u8 *cdr_ctrl_byte)
312{
313 u32 rx_preset;
314 u8 *cache = ppd->qsfp_info.cache;
145dd2b3 315 int cable_power_class;
8ebd4cf1
EH
316
317 if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
318 (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
319 return;
320
145dd2b3
EH
321 /* RX CDR present, bypass supported */
322 cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
323
324 if (cable_power_class <= QSFP_POWER_CLASS_3) {
325 /* Power class <= 3, ignore config & turn RX CDR on */
326 *cdr_ctrl_byte |= 0xF;
327 return;
328 }
329
8ebd4cf1
EH
330 get_platform_config_field(
331 ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
332 rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
333 &rx_preset, 4);
334
335 if (!rx_preset) {
336 dd_dev_info(
337 ppd->dd,
338 "%s: RX_CDR_APPLY is set to disabled\n",
339 __func__);
340 return;
341 }
342 get_platform_config_field(
343 ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
344 rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
345 &rx_preset, 4);
346
347 /* Expand cdr setting to all 4 lanes */
348 rx_preset = (rx_preset | (rx_preset << 1) |
349 (rx_preset << 2) | (rx_preset << 3));
350
351 if (rx_preset) {
352 *cdr_ctrl_byte |= rx_preset;
353 } else {
354 *cdr_ctrl_byte &= rx_preset;
355 /* Preserve current TX CDR status */
356 *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
357 }
358}
359
360static void apply_tx_cdr(struct hfi1_pportdata *ppd,
361 u32 tx_preset_index,
145dd2b3 362 u8 *cdr_ctrl_byte)
8ebd4cf1
EH
363{
364 u32 tx_preset;
365 u8 *cache = ppd->qsfp_info.cache;
145dd2b3 366 int cable_power_class;
8ebd4cf1
EH
367
368 if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
369 (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
370 return;
371
145dd2b3
EH
372 /* TX CDR present, bypass supported */
373 cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
374
375 if (cable_power_class <= QSFP_POWER_CLASS_3) {
376 /* Power class <= 3, ignore config & turn TX CDR on */
377 *cdr_ctrl_byte |= 0xF0;
378 return;
379 }
380
8ebd4cf1
EH
381 get_platform_config_field(
382 ppd->dd,
383 PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
384 TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
385
386 if (!tx_preset) {
387 dd_dev_info(
388 ppd->dd,
389 "%s: TX_CDR_APPLY is set to disabled\n",
390 __func__);
391 return;
392 }
393 get_platform_config_field(
394 ppd->dd,
395 PLATFORM_CONFIG_TX_PRESET_TABLE,
396 tx_preset_index,
397 TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
398
399 /* Expand cdr setting to all 4 lanes */
400 tx_preset = (tx_preset | (tx_preset << 1) |
401 (tx_preset << 2) | (tx_preset << 3));
402
403 if (tx_preset)
145dd2b3 404 *cdr_ctrl_byte |= (tx_preset << 4);
8ebd4cf1
EH
405 else
406 /* Preserve current/determined RX CDR status */
145dd2b3 407 *cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
8ebd4cf1
EH
408}
409
410static void apply_cdr_settings(
411 struct hfi1_pportdata *ppd, u32 rx_preset_index,
412 u32 tx_preset_index)
413{
414 u8 *cache = ppd->qsfp_info.cache;
415 u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
416
417 apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
418
419 apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
420
421 qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
422 &cdr_ctrl_byte, 1);
423}
424
425static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
426{
427 u8 *cache = ppd->qsfp_info.cache;
428 u8 tx_eq;
429
430 if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
431 return;
432 /* Disable adaptive TX EQ if present */
433 tx_eq = cache[(128 * 3) + 241];
434 tx_eq &= 0xF0;
435 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
436}
437
438static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
439{
440 u8 *cache = ppd->qsfp_info.cache;
441 u32 tx_preset;
442 u8 tx_eq;
443
444 if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
445 return;
446
447 get_platform_config_field(
448 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
449 tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
450 &tx_preset, 4);
451 if (!tx_preset) {
452 dd_dev_info(
453 ppd->dd,
454 "%s: TX_EQ_APPLY is set to disabled\n",
455 __func__);
456 return;
457 }
458 get_platform_config_field(
459 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
460 tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
461 &tx_preset, 4);
462
463 if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
464 dd_dev_info(
465 ppd->dd,
466 "%s: TX EQ %x unsupported\n",
467 __func__, tx_preset);
468
469 dd_dev_info(
470 ppd->dd,
471 "%s: Applying EQ %x\n",
472 __func__, cache[608] & 0xF0);
473
474 tx_preset = (cache[608] & 0xF0) >> 4;
475 }
476
477 tx_eq = tx_preset | (tx_preset << 4);
478 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
479 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
480}
481
482static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
483{
484 u32 rx_preset;
485 u8 rx_eq, *cache = ppd->qsfp_info.cache;
486
487 if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
488 return;
489 get_platform_config_field(
490 ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
491 rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
492 &rx_preset, 4);
493
494 if (!rx_preset) {
495 dd_dev_info(
496 ppd->dd,
497 "%s: RX_EMP_APPLY is set to disabled\n",
498 __func__);
499 return;
500 }
501 get_platform_config_field(
502 ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
503 rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
504 &rx_preset, 4);
505
506 if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
507 dd_dev_info(
508 ppd->dd,
509 "%s: Requested RX EMP %x\n",
510 __func__, rx_preset);
511
512 dd_dev_info(
513 ppd->dd,
514 "%s: Applying supported EMP %x\n",
515 __func__, cache[608] & 0xF);
516
517 rx_preset = cache[608] & 0xF;
518 }
519
520 rx_eq = rx_preset | (rx_preset << 4);
521
522 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
523 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
524}
525
526static void apply_eq_settings(struct hfi1_pportdata *ppd,
527 u32 rx_preset_index, u32 tx_preset_index)
528{
529 u8 *cache = ppd->qsfp_info.cache;
530
531 /* no point going on w/o a page 3 */
532 if (cache[2] & 4) {
533 dd_dev_info(ppd->dd,
534 "%s: Upper page 03 not present\n",
535 __func__);
536 return;
537 }
538
539 apply_tx_eq_auto(ppd);
540
541 apply_tx_eq_prog(ppd, tx_preset_index);
542
543 apply_rx_eq_emp(ppd, rx_preset_index);
544}
545
546static void apply_rx_amplitude_settings(
547 struct hfi1_pportdata *ppd, u32 rx_preset_index,
548 u32 tx_preset_index)
549{
550 u32 rx_preset;
551 u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
552
553 /* no point going on w/o a page 3 */
554 if (cache[2] & 4) {
555 dd_dev_info(ppd->dd,
556 "%s: Upper page 03 not present\n",
557 __func__);
558 return;
559 }
560 if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
561 dd_dev_info(ppd->dd,
562 "%s: RX_AMP_APPLY is set to disabled\n",
563 __func__);
564 return;
565 }
566
567 get_platform_config_field(ppd->dd,
568 PLATFORM_CONFIG_RX_PRESET_TABLE,
569 rx_preset_index,
570 RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
571 &rx_preset, 4);
572
573 if (!rx_preset) {
574 dd_dev_info(ppd->dd,
575 "%s: RX_AMP_APPLY is set to disabled\n",
576 __func__);
577 return;
578 }
579 get_platform_config_field(ppd->dd,
580 PLATFORM_CONFIG_RX_PRESET_TABLE,
581 rx_preset_index,
582 RX_PRESET_TABLE_QSFP_RX_AMP,
583 &rx_preset, 4);
584
585 dd_dev_info(ppd->dd,
586 "%s: Requested RX AMP %x\n",
587 __func__,
588 rx_preset);
589
590 for (i = 0; i < 4; i++) {
591 if (cache[(128 * 3) + 225] & (1 << i)) {
592 preferred = i;
593 if (preferred == rx_preset)
594 break;
595 }
596 }
597
598 /*
599 * Verify that preferred RX amplitude is not just a
600 * fall through of the default
601 */
602 if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
603 dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
604 return;
605 }
606
607 dd_dev_info(ppd->dd,
608 "%s: Applying RX AMP %x\n", __func__, preferred);
609
610 rx_amp = preferred | (preferred << 4);
611 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
612 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
613}
614
615#define OPA_INVALID_INDEX 0xFFF
616
97167e81
EH
617static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
618 u32 config_data, const char *message)
8ebd4cf1
EH
619{
620 u8 i;
621 int ret = HCMD_SUCCESS;
622
623 for (i = 0; i < 4; i++) {
97167e81 624 ret = load_8051_config(ppd->dd, field_id, i, config_data);
8ebd4cf1
EH
625 if (ret != HCMD_SUCCESS) {
626 dd_dev_err(
627 ppd->dd,
628 "%s: %s for lane %u failed\n",
629 message, __func__, i);
630 }
631 }
632}
633
5213006a
DL
634/*
635 * Return a special SerDes setting for low power AOC cables. The power class
636 * threshold and setting being used were all found by empirical testing.
637 *
638 * Summary of the logic:
639 *
640 * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
641 * return 0xe
642 * return 0; // leave at default
643 */
644static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
645{
646 u8 *cache = ppd->qsfp_info.cache;
647 int power_class;
648
649 /* QSFP only */
650 if (ppd->port_type != PORT_TYPE_QSFP)
651 return 0; /* leave at default */
652
653 /* active optical cables only */
654 switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
655 case 0x0 ... 0x9: /* fallthrough */
656 case 0xC: /* fallthrough */
657 case 0xE:
658 /* active AOC */
659 power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
660 if (power_class < QSFP_POWER_CLASS_4)
661 return 0xe;
662 }
663 return 0; /* leave at default */
664}
665
8ebd4cf1
EH
666static void apply_tunings(
667 struct hfi1_pportdata *ppd, u32 tx_preset_index,
668 u8 tuning_method, u32 total_atten, u8 limiting_active)
669{
670 int ret = 0;
671 u32 config_data = 0, tx_preset = 0;
672 u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
673 u8 *cache = ppd->qsfp_info.cache;
674
97167e81 675 /* Pass tuning method to 8051 */
8ebd4cf1
EH
676 read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
677 &config_data);
f6de3d39
DL
678 config_data &= ~(0xff << TUNING_METHOD_SHIFT);
679 config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
8ebd4cf1
EH
680 ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
681 config_data);
682 if (ret != HCMD_SUCCESS)
683 dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
684 __func__);
685
97167e81
EH
686 /* Set same channel loss for both TX and RX */
687 config_data = 0 | (total_atten << 16) | (total_atten << 24);
688 apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
689 "Setting channel loss");
690
691 /* Inform 8051 of cable capabilities */
692 if (ppd->qsfp_info.cache_valid) {
693 external_device_config =
694 ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
695 ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
696 ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
697 (cache[QSFP_EQ_INFO_OFFS] & 0x4);
698 ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
699 GENERAL_CONFIG, &config_data);
700 /* Clear, then set the external device config field */
27a340f6
EH
701 config_data &= ~(u32)0xFF;
702 config_data |= external_device_config;
97167e81
EH
703 ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
704 GENERAL_CONFIG, config_data);
705 if (ret != HCMD_SUCCESS)
b4e9e2f0
SS
706 dd_dev_err(ppd->dd,
707 "%s: Failed set ext device config params\n",
708 __func__);
97167e81 709 }
8ebd4cf1 710
97167e81
EH
711 if (tx_preset_index == OPA_INVALID_INDEX) {
712 if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
b4e9e2f0
SS
713 dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n",
714 __func__);
8ebd4cf1 715 return;
97167e81 716 }
8ebd4cf1 717
97167e81 718 /* Following for limiting active channels only */
8ebd4cf1
EH
719 get_platform_config_field(
720 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
721 TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
722 precur = tx_preset;
723
724 get_platform_config_field(
725 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
726 tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
727 attn = tx_preset;
728
729 get_platform_config_field(
730 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
731 tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
732 postcur = tx_preset;
733
5213006a
DL
734 /*
735 * NOTES:
736 * o The aoc_low_power_setting is applied to all lanes even
737 * though only lane 0's value is examined by the firmware.
738 * o A lingering low power setting after a cable swap does
739 * not occur. On cable unplug the 8051 is reset and
740 * restarted on cable insert. This resets all settings to
741 * their default, erasing any previous low power setting.
742 */
743 config_data = precur | (attn << 8) | (postcur << 16) |
744 (aoc_low_power_setting(ppd) << 24);
8ebd4cf1 745
97167e81
EH
746 apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
747 "Applying TX settings");
8ebd4cf1
EH
748}
749
e4e0e39c 750/* Must be holding the QSFP i2c resource */
8ebd4cf1
EH
751static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
752 u32 *ptr_rx_preset, u32 *ptr_total_atten)
753{
765a6fac 754 int ret;
8ebd4cf1
EH
755 u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
756 u8 *cache = ppd->qsfp_info.cache;
757
758 ppd->qsfp_info.limiting_active = 1;
759
760 ret = set_qsfp_tx(ppd, 0);
761 if (ret)
e4e0e39c 762 return ret;
8ebd4cf1
EH
763
764 ret = qual_power(ppd);
765 if (ret)
e4e0e39c 766 return ret;
8ebd4cf1
EH
767
768 ret = qual_bitrate(ppd);
769 if (ret)
e4e0e39c 770 return ret;
8ebd4cf1 771
b5e71019
EH
772 /*
773 * We'll change the QSFP memory contents from here on out, thus we set a
774 * flag here to remind ourselves to reset the QSFP module. This prevents
775 * reuse of stale settings established in our previous pass through.
776 */
8ebd4cf1
EH
777 if (ppd->qsfp_info.reset_needed) {
778 reset_qsfp(ppd);
8ebd4cf1
EH
779 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
780 } else {
781 ppd->qsfp_info.reset_needed = 1;
782 }
783
784 ret = set_qsfp_high_power(ppd);
785 if (ret)
e4e0e39c 786 return ret;
8ebd4cf1
EH
787
788 if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
789 ret = get_platform_config_field(
790 ppd->dd,
791 PLATFORM_CONFIG_PORT_TABLE, 0,
792 PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
793 ptr_tx_preset, 4);
794 if (ret) {
795 *ptr_tx_preset = OPA_INVALID_INDEX;
e4e0e39c 796 return ret;
8ebd4cf1
EH
797 }
798 } else {
799 ret = get_platform_config_field(
800 ppd->dd,
801 PLATFORM_CONFIG_PORT_TABLE, 0,
802 PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
803 ptr_tx_preset, 4);
804 if (ret) {
805 *ptr_tx_preset = OPA_INVALID_INDEX;
e4e0e39c 806 return ret;
8ebd4cf1
EH
807 }
808 }
809
810 ret = get_platform_config_field(
811 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
812 PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
813 if (ret) {
814 *ptr_rx_preset = OPA_INVALID_INDEX;
e4e0e39c 815 return ret;
8ebd4cf1
EH
816 }
817
818 if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
819 get_platform_config_field(
820 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
821 PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
822 else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
823 get_platform_config_field(
824 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
825 PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
826
827 apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
828
829 apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
830
831 apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
832
833 ret = set_qsfp_tx(ppd, 1);
765a6fac 834
8ebd4cf1
EH
835 return ret;
836}
837
838static int tune_qsfp(struct hfi1_pportdata *ppd,
839 u32 *ptr_tx_preset, u32 *ptr_rx_preset,
840 u8 *ptr_tuning_method, u32 *ptr_total_atten)
841{
842 u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
843 u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
844 int ret = 0;
845 u8 *cache = ppd->qsfp_info.cache;
846
847 switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
848 case 0xA ... 0xB:
849 ret = get_platform_config_field(
850 ppd->dd,
851 PLATFORM_CONFIG_PORT_TABLE, 0,
852 PORT_TABLE_LOCAL_ATTEN_25G,
853 &platform_atten, 4);
854 if (ret)
855 return ret;
856
857 if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
858 cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
859 else if ((lss & OPA_LINK_SPEED_12_5G) &&
860 (lse & OPA_LINK_SPEED_12_5G))
861 cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
862
863 /* Fallback to configured attenuation if cable memory is bad */
864 if (cable_atten == 0 || cable_atten > 36) {
865 ret = get_platform_config_field(
866 ppd->dd,
867 PLATFORM_CONFIG_SYSTEM_TABLE, 0,
868 SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
869 &cable_atten, 4);
870 if (ret)
871 return ret;
872 }
873
874 ret = get_platform_config_field(
875 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
876 PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
877 if (ret)
878 return ret;
879
880 *ptr_total_atten = platform_atten + cable_atten + remote_atten;
881
882 *ptr_tuning_method = OPA_PASSIVE_TUNING;
883 break;
884 case 0x0 ... 0x9: /* fallthrough */
885 case 0xC: /* fallthrough */
886 case 0xE:
887 ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
888 ptr_total_atten);
889 if (ret)
890 return ret;
891
892 *ptr_tuning_method = OPA_ACTIVE_TUNING;
893 break;
894 case 0xD: /* fallthrough */
895 case 0xF:
896 default:
b4e9e2f0 897 dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n",
8ebd4cf1
EH
898 __func__);
899 break;
900 }
901 return ret;
902}
903
904/*
905 * This function communicates its success or failure via ppd->driver_link_ready
906 * Thus, it depends on its association with start_link(...) which checks
907 * driver_link_ready before proceeding with the link negotiation and
908 * initialization process.
909 */
910void tune_serdes(struct hfi1_pportdata *ppd)
911{
912 int ret = 0;
913 u32 total_atten = 0;
914 u32 remote_atten = 0, platform_atten = 0;
915 u32 rx_preset_index, tx_preset_index;
97167e81 916 u8 tuning_method = 0, limiting_active = 0;
8ebd4cf1
EH
917 struct hfi1_devdata *dd = ppd->dd;
918
919 rx_preset_index = OPA_INVALID_INDEX;
920 tx_preset_index = OPA_INVALID_INDEX;
921
922 /* the link defaults to enabled */
923 ppd->link_enabled = 1;
924 /* the driver link ready state defaults to not ready */
925 ppd->driver_link_ready = 0;
926 ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
927
715c430c
EH
928 /* Skip the tuning for testing (loopback != none) and simulations */
929 if (loopback != LOOPBACK_NONE ||
c3838b39 930 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8ebd4cf1
EH
931 ppd->driver_link_ready = 1;
932 return;
933 }
934
8ebd4cf1
EH
935 switch (ppd->port_type) {
936 case PORT_TYPE_DISCONNECTED:
937 ppd->offline_disabled_reason =
938 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
b4e9e2f0 939 dd_dev_warn(dd, "%s: Port disconnected, disabling port\n",
8ebd4cf1
EH
940 __func__);
941 goto bail;
942 case PORT_TYPE_FIXED:
943 /* platform_atten, remote_atten pre-zeroed to catch error */
944 get_platform_config_field(
945 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
946 PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
947
948 get_platform_config_field(
949 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
950 PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
951
952 total_atten = platform_atten + remote_atten;
953
954 tuning_method = OPA_PASSIVE_TUNING;
955 break;
956 case PORT_TYPE_VARIABLE:
957 if (qsfp_mod_present(ppd)) {
958 /*
959 * platform_atten, remote_atten pre-zeroed to
960 * catch error
961 */
962 get_platform_config_field(
963 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
964 PORT_TABLE_LOCAL_ATTEN_25G,
965 &platform_atten, 4);
966
967 get_platform_config_field(
968 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
969 PORT_TABLE_REMOTE_ATTEN_25G,
970 &remote_atten, 4);
971
972 total_atten = platform_atten + remote_atten;
973
974 tuning_method = OPA_PASSIVE_TUNING;
623bba2d 975 } else {
8ebd4cf1
EH
976 ppd->offline_disabled_reason =
977 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
623bba2d
EH
978 goto bail;
979 }
8ebd4cf1
EH
980 break;
981 case PORT_TYPE_QSFP:
982 if (qsfp_mod_present(ppd)) {
e4e0e39c
DL
983 ret = acquire_chip_resource(ppd->dd,
984 qsfp_resource(ppd->dd),
985 QSFP_WAIT);
986 if (ret) {
987 dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
988 __func__, (int)ppd->dd->hfi1_id);
989 goto bail;
990 }
8ebd4cf1
EH
991 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
992
993 if (ppd->qsfp_info.cache_valid) {
994 ret = tune_qsfp(ppd,
995 &tx_preset_index,
996 &rx_preset_index,
997 &tuning_method,
998 &total_atten);
c3838b39
EH
999
1000 /*
1001 * We may have modified the QSFP memory, so
1002 * update the cache to reflect the changes
1003 */
1004 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
97167e81
EH
1005 limiting_active =
1006 ppd->qsfp_info.limiting_active;
8ebd4cf1
EH
1007 } else {
1008 dd_dev_err(dd,
1009 "%s: Reading QSFP memory failed\n",
1010 __func__);
e4e0e39c 1011 ret = -EINVAL; /* a fail indication */
8ebd4cf1 1012 }
e4e0e39c
DL
1013 release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
1014 if (ret)
1015 goto bail;
623bba2d 1016 } else {
8ebd4cf1
EH
1017 ppd->offline_disabled_reason =
1018 HFI1_ODR_MASK(
e1bf0d5e 1019 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
623bba2d
EH
1020 goto bail;
1021 }
8ebd4cf1
EH
1022 break;
1023 default:
b4e9e2f0 1024 dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__);
97167e81
EH
1025 ppd->port_type = PORT_TYPE_UNKNOWN;
1026 tuning_method = OPA_UNKNOWN_TUNING;
1027 total_atten = 0;
1028 limiting_active = 0;
1029 tx_preset_index = OPA_INVALID_INDEX;
1030 break;
8ebd4cf1 1031 }
97167e81 1032
8ebd4cf1
EH
1033 if (ppd->offline_disabled_reason ==
1034 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
1035 apply_tunings(ppd, tx_preset_index, tuning_method,
97167e81 1036 total_atten, limiting_active);
8ebd4cf1 1037
c3838b39
EH
1038 if (!ret)
1039 ppd->driver_link_ready = 1;
8ebd4cf1
EH
1040
1041 return;
1042bail:
1043 ppd->driver_link_ready = 0;
1044}