]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/infiniband/hw/hfi1/platform.c
Merge tag 'sh-for-4.17-fixes' of git://git.libc.org/linux-sh
[mirror_ubuntu-eoan-kernel.git] / drivers / infiniband / hw / hfi1 / platform.c
CommitLineData
8ebd4cf1 1/*
05d6ac1d 2 * Copyright(c) 2015, 2016 Intel Corporation.
8ebd4cf1
EH
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
8ebd4cf1
EH
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
8ebd4cf1
EH
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
c3838b39 47
76ae6222
JB
48#include <linux/firmware.h>
49
8ebd4cf1 50#include "hfi.h"
c3838b39 51#include "efivar.h"
e83eba21 52#include "eprom.h"
c3838b39 53
76ae6222
JB
54#define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat"
55
fe4d9243
EH
56static int validate_scratch_checksum(struct hfi1_devdata *dd)
57{
58 u64 checksum = 0, temp_scratch = 0;
59 int i, j, version;
60
61 temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
62 version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
63
64 /* Prevent power on default of all zeroes from passing checksum */
c822652e
JS
65 if (!version) {
66 dd_dev_err(dd, "%s: Config bitmap uninitialized\n", __func__);
67 dd_dev_err(dd,
68 "%s: Please update your BIOS to support active channels\n",
69 __func__);
fe4d9243 70 return 0;
c822652e 71 }
fe4d9243
EH
72
73 /*
74 * ASIC scratch 0 only contains the checksum and bitmap version as
75 * fields of interest, both of which are handled separately from the
76 * loop below, so skip it
77 */
78 checksum += version;
79 for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
80 temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
81 for (j = sizeof(u64); j != 0; j -= 2) {
82 checksum += (temp_scratch & 0xFFFF);
83 temp_scratch >>= 16;
84 }
85 }
86
87 while (checksum >> 16)
88 checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
89
90 temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
91 temp_scratch &= CHECKSUM_SMASK;
92 temp_scratch >>= CHECKSUM_SHIFT;
93
94 if (checksum + temp_scratch == 0xFFFF)
95 return 1;
c822652e
JS
96
97 dd_dev_err(dd, "%s: Configuration bitmap corrupted\n", __func__);
fe4d9243
EH
98 return 0;
99}
100
101static void save_platform_config_fields(struct hfi1_devdata *dd)
102{
103 struct hfi1_pportdata *ppd = dd->pport;
104 u64 temp_scratch = 0, temp_dest = 0;
105
106 temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
107
108 temp_dest = temp_scratch &
109 (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
110 PORT0_PORT_TYPE_SMASK);
111 ppd->port_type = temp_dest >>
112 (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
113 PORT0_PORT_TYPE_SHIFT);
114
115 temp_dest = temp_scratch &
116 (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
117 PORT0_LOCAL_ATTEN_SMASK);
118 ppd->local_atten = temp_dest >>
119 (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
120 PORT0_LOCAL_ATTEN_SHIFT);
121
122 temp_dest = temp_scratch &
123 (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
124 PORT0_REMOTE_ATTEN_SMASK);
125 ppd->remote_atten = temp_dest >>
126 (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
127 PORT0_REMOTE_ATTEN_SHIFT);
128
129 temp_dest = temp_scratch &
130 (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
131 PORT0_DEFAULT_ATTEN_SMASK);
132 ppd->default_atten = temp_dest >>
133 (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
134 PORT0_DEFAULT_ATTEN_SHIFT);
135
136 temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
137 ASIC_CFG_SCRATCH_2);
138
139 ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
140 ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
141 ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
142
143 ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
144 QSFP_MAX_POWER_SHIFT;
91618604
JB
145
146 ppd->config_from_scratch = true;
fe4d9243
EH
147}
148
c3838b39
EH
149void get_platform_config(struct hfi1_devdata *dd)
150{
151 int ret = 0;
c3838b39 152 u8 *temp_platform_config = NULL;
e83eba21 153 u32 esize;
76ae6222 154 const struct firmware *platform_config_file = NULL;
e83eba21 155
fe4d9243
EH
156 if (is_integrated(dd)) {
157 if (validate_scratch_checksum(dd)) {
158 save_platform_config_fields(dd);
159 return;
160 }
fe4d9243
EH
161 } else {
162 ret = eprom_read_platform_config(dd,
163 (void **)&temp_platform_config,
164 &esize);
165 if (!ret) {
166 /* success */
167 dd->platform_config.data = temp_platform_config;
168 dd->platform_config.size = esize;
169 return;
170 }
fe4d9243
EH
171 }
172 dd_dev_err(dd,
173 "%s: Failed to get platform config, falling back to sub-optimal default file\n",
174 __func__);
c3838b39 175
76ae6222
JB
176 ret = request_firmware(&platform_config_file,
177 DEFAULT_PLATFORM_CONFIG_NAME,
178 &dd->pcidev->dev);
179 if (ret) {
180 dd_dev_err(dd,
181 "%s: No default platform config file found\n",
182 __func__);
183 return;
c3838b39 184 }
76ae6222 185
c3838b39 186 /*
76ae6222
JB
187 * Allocate separate memory block to store data and free firmware
188 * structure. This allows free_platform_config to treat EPROM and
189 * fallback configs in the same manner.
c3838b39 190 */
76ae6222
JB
191 dd->platform_config.data = kmemdup(platform_config_file->data,
192 platform_config_file->size,
193 GFP_KERNEL);
194 dd->platform_config.size = platform_config_file->size;
195 release_firmware(platform_config_file);
196}
197
198void free_platform_config(struct hfi1_devdata *dd)
199{
200 /* Release memory allocated for eprom or fallback file read. */
201 kfree(dd->platform_config.data);
e9777ad4 202 dd->platform_config.data = NULL;
c3838b39 203}
8ebd4cf1 204
9775a991
EH
205void get_port_type(struct hfi1_pportdata *ppd)
206{
207 int ret;
fe4d9243 208 u32 temp;
9775a991
EH
209
210 ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
fe4d9243 211 PORT_TABLE_PORT_TYPE, &temp,
9775a991 212 4);
fe4d9243 213 if (ret) {
9775a991 214 ppd->port_type = PORT_TYPE_UNKNOWN;
fe4d9243
EH
215 return;
216 }
217 ppd->port_type = temp;
9775a991
EH
218}
219
8ebd4cf1
EH
220int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
221{
222 u8 tx_ctrl_byte = on ? 0x0 : 0xF;
223 int ret = 0;
224
225 ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
226 &tx_ctrl_byte, 1);
227 /* we expected 1, so consider 0 an error */
228 if (ret == 0)
229 ret = -EIO;
230 else if (ret == 1)
231 ret = 0;
232 return ret;
233}
234
235static int qual_power(struct hfi1_pportdata *ppd)
236{
237 u32 cable_power_class = 0, power_class_max = 0;
238 u8 *cache = ppd->qsfp_info.cache;
239 int ret = 0;
240
241 ret = get_platform_config_field(
242 ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
243 SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
244 if (ret)
245 return ret;
246
145dd2b3 247 cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
8ebd4cf1 248
145dd2b3 249 if (cable_power_class > power_class_max)
8ebd4cf1
EH
250 ppd->offline_disabled_reason =
251 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
8ebd4cf1
EH
252
253 if (ppd->offline_disabled_reason ==
254 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
b4e9e2f0 255 dd_dev_err(
8ebd4cf1
EH
256 ppd->dd,
257 "%s: Port disabled due to system power restrictions\n",
258 __func__);
259 ret = -EPERM;
260 }
261 return ret;
262}
263
264static int qual_bitrate(struct hfi1_pportdata *ppd)
265{
266 u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
267 u8 *cache = ppd->qsfp_info.cache;
268
269 if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
270 cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
271 ppd->offline_disabled_reason =
272 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
273
274 if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
275 cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
276 ppd->offline_disabled_reason =
277 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
278
279 if (ppd->offline_disabled_reason ==
280 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
b4e9e2f0 281 dd_dev_err(
8ebd4cf1
EH
282 ppd->dd,
283 "%s: Cable failed bitrate check, disabling port\n",
284 __func__);
285 return -EPERM;
286 }
287 return 0;
288}
289
290static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
291{
292 u8 cable_power_class = 0, power_ctrl_byte = 0;
293 u8 *cache = ppd->qsfp_info.cache;
294 int ret;
295
145dd2b3 296 cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
8ebd4cf1 297
145dd2b3 298 if (cable_power_class > QSFP_POWER_CLASS_1) {
8ebd4cf1
EH
299 power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
300
301 power_ctrl_byte |= 1;
302 power_ctrl_byte &= ~(0x2);
303
304 ret = qsfp_write(ppd, ppd->dd->hfi1_id,
305 QSFP_PWR_CTRL_BYTE_OFFS,
306 &power_ctrl_byte, 1);
307 if (ret != 1)
308 return -EIO;
309
145dd2b3 310 if (cable_power_class > QSFP_POWER_CLASS_4) {
8ebd4cf1
EH
311 power_ctrl_byte |= (1 << 2);
312 ret = qsfp_write(ppd, ppd->dd->hfi1_id,
313 QSFP_PWR_CTRL_BYTE_OFFS,
314 &power_ctrl_byte, 1);
315 if (ret != 1)
316 return -EIO;
317 }
318
319 /* SFF 8679 rev 1.7 LPMode Deassert time */
320 msleep(300);
321 }
322 return 0;
323}
324
325static void apply_rx_cdr(struct hfi1_pportdata *ppd,
326 u32 rx_preset_index,
327 u8 *cdr_ctrl_byte)
328{
329 u32 rx_preset;
330 u8 *cache = ppd->qsfp_info.cache;
145dd2b3 331 int cable_power_class;
8ebd4cf1
EH
332
333 if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
334 (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
335 return;
336
145dd2b3
EH
337 /* RX CDR present, bypass supported */
338 cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
339
340 if (cable_power_class <= QSFP_POWER_CLASS_3) {
341 /* Power class <= 3, ignore config & turn RX CDR on */
342 *cdr_ctrl_byte |= 0xF;
343 return;
344 }
345
8ebd4cf1
EH
346 get_platform_config_field(
347 ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
348 rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
349 &rx_preset, 4);
350
351 if (!rx_preset) {
352 dd_dev_info(
353 ppd->dd,
354 "%s: RX_CDR_APPLY is set to disabled\n",
355 __func__);
356 return;
357 }
358 get_platform_config_field(
359 ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
360 rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
361 &rx_preset, 4);
362
363 /* Expand cdr setting to all 4 lanes */
364 rx_preset = (rx_preset | (rx_preset << 1) |
365 (rx_preset << 2) | (rx_preset << 3));
366
367 if (rx_preset) {
368 *cdr_ctrl_byte |= rx_preset;
369 } else {
370 *cdr_ctrl_byte &= rx_preset;
371 /* Preserve current TX CDR status */
372 *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
373 }
374}
375
376static void apply_tx_cdr(struct hfi1_pportdata *ppd,
377 u32 tx_preset_index,
145dd2b3 378 u8 *cdr_ctrl_byte)
8ebd4cf1
EH
379{
380 u32 tx_preset;
381 u8 *cache = ppd->qsfp_info.cache;
145dd2b3 382 int cable_power_class;
8ebd4cf1
EH
383
384 if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
385 (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
386 return;
387
145dd2b3
EH
388 /* TX CDR present, bypass supported */
389 cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
390
391 if (cable_power_class <= QSFP_POWER_CLASS_3) {
392 /* Power class <= 3, ignore config & turn TX CDR on */
393 *cdr_ctrl_byte |= 0xF0;
394 return;
395 }
396
8ebd4cf1
EH
397 get_platform_config_field(
398 ppd->dd,
399 PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
400 TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
401
402 if (!tx_preset) {
403 dd_dev_info(
404 ppd->dd,
405 "%s: TX_CDR_APPLY is set to disabled\n",
406 __func__);
407 return;
408 }
409 get_platform_config_field(
410 ppd->dd,
411 PLATFORM_CONFIG_TX_PRESET_TABLE,
412 tx_preset_index,
413 TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
414
415 /* Expand cdr setting to all 4 lanes */
416 tx_preset = (tx_preset | (tx_preset << 1) |
417 (tx_preset << 2) | (tx_preset << 3));
418
419 if (tx_preset)
145dd2b3 420 *cdr_ctrl_byte |= (tx_preset << 4);
8ebd4cf1
EH
421 else
422 /* Preserve current/determined RX CDR status */
145dd2b3 423 *cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
8ebd4cf1
EH
424}
425
426static void apply_cdr_settings(
427 struct hfi1_pportdata *ppd, u32 rx_preset_index,
428 u32 tx_preset_index)
429{
430 u8 *cache = ppd->qsfp_info.cache;
431 u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
432
433 apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
434
435 apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
436
437 qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
438 &cdr_ctrl_byte, 1);
439}
440
441static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
442{
443 u8 *cache = ppd->qsfp_info.cache;
444 u8 tx_eq;
445
446 if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
447 return;
448 /* Disable adaptive TX EQ if present */
449 tx_eq = cache[(128 * 3) + 241];
450 tx_eq &= 0xF0;
451 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
452}
453
454static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
455{
456 u8 *cache = ppd->qsfp_info.cache;
457 u32 tx_preset;
458 u8 tx_eq;
459
460 if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
461 return;
462
463 get_platform_config_field(
464 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
465 tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
466 &tx_preset, 4);
467 if (!tx_preset) {
468 dd_dev_info(
469 ppd->dd,
470 "%s: TX_EQ_APPLY is set to disabled\n",
471 __func__);
472 return;
473 }
474 get_platform_config_field(
475 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
476 tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
477 &tx_preset, 4);
478
479 if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
480 dd_dev_info(
481 ppd->dd,
482 "%s: TX EQ %x unsupported\n",
483 __func__, tx_preset);
484
485 dd_dev_info(
486 ppd->dd,
487 "%s: Applying EQ %x\n",
488 __func__, cache[608] & 0xF0);
489
490 tx_preset = (cache[608] & 0xF0) >> 4;
491 }
492
493 tx_eq = tx_preset | (tx_preset << 4);
494 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
495 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
496}
497
498static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
499{
500 u32 rx_preset;
501 u8 rx_eq, *cache = ppd->qsfp_info.cache;
502
503 if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
504 return;
505 get_platform_config_field(
506 ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
507 rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
508 &rx_preset, 4);
509
510 if (!rx_preset) {
511 dd_dev_info(
512 ppd->dd,
513 "%s: RX_EMP_APPLY is set to disabled\n",
514 __func__);
515 return;
516 }
517 get_platform_config_field(
518 ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
519 rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
520 &rx_preset, 4);
521
522 if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
523 dd_dev_info(
524 ppd->dd,
525 "%s: Requested RX EMP %x\n",
526 __func__, rx_preset);
527
528 dd_dev_info(
529 ppd->dd,
530 "%s: Applying supported EMP %x\n",
531 __func__, cache[608] & 0xF);
532
533 rx_preset = cache[608] & 0xF;
534 }
535
536 rx_eq = rx_preset | (rx_preset << 4);
537
538 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
539 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
540}
541
542static void apply_eq_settings(struct hfi1_pportdata *ppd,
543 u32 rx_preset_index, u32 tx_preset_index)
544{
545 u8 *cache = ppd->qsfp_info.cache;
546
547 /* no point going on w/o a page 3 */
548 if (cache[2] & 4) {
549 dd_dev_info(ppd->dd,
550 "%s: Upper page 03 not present\n",
551 __func__);
552 return;
553 }
554
555 apply_tx_eq_auto(ppd);
556
557 apply_tx_eq_prog(ppd, tx_preset_index);
558
559 apply_rx_eq_emp(ppd, rx_preset_index);
560}
561
562static void apply_rx_amplitude_settings(
563 struct hfi1_pportdata *ppd, u32 rx_preset_index,
564 u32 tx_preset_index)
565{
566 u32 rx_preset;
567 u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
568
569 /* no point going on w/o a page 3 */
570 if (cache[2] & 4) {
571 dd_dev_info(ppd->dd,
572 "%s: Upper page 03 not present\n",
573 __func__);
574 return;
575 }
576 if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
577 dd_dev_info(ppd->dd,
578 "%s: RX_AMP_APPLY is set to disabled\n",
579 __func__);
580 return;
581 }
582
583 get_platform_config_field(ppd->dd,
584 PLATFORM_CONFIG_RX_PRESET_TABLE,
585 rx_preset_index,
586 RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
587 &rx_preset, 4);
588
589 if (!rx_preset) {
590 dd_dev_info(ppd->dd,
591 "%s: RX_AMP_APPLY is set to disabled\n",
592 __func__);
593 return;
594 }
595 get_platform_config_field(ppd->dd,
596 PLATFORM_CONFIG_RX_PRESET_TABLE,
597 rx_preset_index,
598 RX_PRESET_TABLE_QSFP_RX_AMP,
599 &rx_preset, 4);
600
601 dd_dev_info(ppd->dd,
602 "%s: Requested RX AMP %x\n",
603 __func__,
604 rx_preset);
605
606 for (i = 0; i < 4; i++) {
607 if (cache[(128 * 3) + 225] & (1 << i)) {
608 preferred = i;
609 if (preferred == rx_preset)
610 break;
611 }
612 }
613
614 /*
615 * Verify that preferred RX amplitude is not just a
616 * fall through of the default
617 */
618 if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
619 dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
620 return;
621 }
622
623 dd_dev_info(ppd->dd,
624 "%s: Applying RX AMP %x\n", __func__, preferred);
625
626 rx_amp = preferred | (preferred << 4);
627 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
628 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
629}
630
631#define OPA_INVALID_INDEX 0xFFF
632
97167e81
EH
633static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
634 u32 config_data, const char *message)
8ebd4cf1
EH
635{
636 u8 i;
637 int ret = HCMD_SUCCESS;
638
639 for (i = 0; i < 4; i++) {
97167e81 640 ret = load_8051_config(ppd->dd, field_id, i, config_data);
8ebd4cf1
EH
641 if (ret != HCMD_SUCCESS) {
642 dd_dev_err(
643 ppd->dd,
644 "%s: %s for lane %u failed\n",
645 message, __func__, i);
646 }
647 }
648}
649
5213006a
DL
650/*
651 * Return a special SerDes setting for low power AOC cables. The power class
652 * threshold and setting being used were all found by empirical testing.
653 *
654 * Summary of the logic:
655 *
656 * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
657 * return 0xe
658 * return 0; // leave at default
659 */
660static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
661{
662 u8 *cache = ppd->qsfp_info.cache;
663 int power_class;
664
665 /* QSFP only */
666 if (ppd->port_type != PORT_TYPE_QSFP)
667 return 0; /* leave at default */
668
669 /* active optical cables only */
670 switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
671 case 0x0 ... 0x9: /* fallthrough */
672 case 0xC: /* fallthrough */
673 case 0xE:
674 /* active AOC */
675 power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
676 if (power_class < QSFP_POWER_CLASS_4)
677 return 0xe;
678 }
679 return 0; /* leave at default */
680}
681
8ebd4cf1
EH
682static void apply_tunings(
683 struct hfi1_pportdata *ppd, u32 tx_preset_index,
684 u8 tuning_method, u32 total_atten, u8 limiting_active)
685{
686 int ret = 0;
687 u32 config_data = 0, tx_preset = 0;
688 u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
689 u8 *cache = ppd->qsfp_info.cache;
690
97167e81 691 /* Pass tuning method to 8051 */
8ebd4cf1
EH
692 read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
693 &config_data);
f6de3d39
DL
694 config_data &= ~(0xff << TUNING_METHOD_SHIFT);
695 config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
8ebd4cf1
EH
696 ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
697 config_data);
698 if (ret != HCMD_SUCCESS)
699 dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
700 __func__);
701
97167e81
EH
702 /* Set same channel loss for both TX and RX */
703 config_data = 0 | (total_atten << 16) | (total_atten << 24);
704 apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
705 "Setting channel loss");
706
707 /* Inform 8051 of cable capabilities */
708 if (ppd->qsfp_info.cache_valid) {
709 external_device_config =
710 ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
711 ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
712 ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
713 (cache[QSFP_EQ_INFO_OFFS] & 0x4);
714 ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
715 GENERAL_CONFIG, &config_data);
716 /* Clear, then set the external device config field */
27a340f6
EH
717 config_data &= ~(u32)0xFF;
718 config_data |= external_device_config;
97167e81
EH
719 ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
720 GENERAL_CONFIG, config_data);
721 if (ret != HCMD_SUCCESS)
b4e9e2f0
SS
722 dd_dev_err(ppd->dd,
723 "%s: Failed set ext device config params\n",
724 __func__);
97167e81 725 }
8ebd4cf1 726
97167e81
EH
727 if (tx_preset_index == OPA_INVALID_INDEX) {
728 if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
b4e9e2f0
SS
729 dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n",
730 __func__);
8ebd4cf1 731 return;
97167e81 732 }
8ebd4cf1 733
97167e81 734 /* Following for limiting active channels only */
8ebd4cf1
EH
735 get_platform_config_field(
736 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
737 TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
738 precur = tx_preset;
739
740 get_platform_config_field(
741 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
742 tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
743 attn = tx_preset;
744
745 get_platform_config_field(
746 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
747 tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
748 postcur = tx_preset;
749
5213006a
DL
750 /*
751 * NOTES:
752 * o The aoc_low_power_setting is applied to all lanes even
753 * though only lane 0's value is examined by the firmware.
754 * o A lingering low power setting after a cable swap does
755 * not occur. On cable unplug the 8051 is reset and
756 * restarted on cable insert. This resets all settings to
757 * their default, erasing any previous low power setting.
758 */
759 config_data = precur | (attn << 8) | (postcur << 16) |
760 (aoc_low_power_setting(ppd) << 24);
8ebd4cf1 761
97167e81
EH
762 apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
763 "Applying TX settings");
8ebd4cf1
EH
764}
765
e4e0e39c 766/* Must be holding the QSFP i2c resource */
8ebd4cf1
EH
767static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
768 u32 *ptr_rx_preset, u32 *ptr_total_atten)
769{
765a6fac 770 int ret;
8ebd4cf1
EH
771 u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
772 u8 *cache = ppd->qsfp_info.cache;
773
774 ppd->qsfp_info.limiting_active = 1;
775
776 ret = set_qsfp_tx(ppd, 0);
777 if (ret)
e4e0e39c 778 return ret;
8ebd4cf1
EH
779
780 ret = qual_power(ppd);
781 if (ret)
e4e0e39c 782 return ret;
8ebd4cf1
EH
783
784 ret = qual_bitrate(ppd);
785 if (ret)
e4e0e39c 786 return ret;
8ebd4cf1 787
b5e71019
EH
788 /*
789 * We'll change the QSFP memory contents from here on out, thus we set a
790 * flag here to remind ourselves to reset the QSFP module. This prevents
791 * reuse of stale settings established in our previous pass through.
792 */
8ebd4cf1 793 if (ppd->qsfp_info.reset_needed) {
30e10527
SS
794 ret = reset_qsfp(ppd);
795 if (ret)
796 return ret;
8ebd4cf1
EH
797 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
798 } else {
799 ppd->qsfp_info.reset_needed = 1;
800 }
801
802 ret = set_qsfp_high_power(ppd);
803 if (ret)
e4e0e39c 804 return ret;
8ebd4cf1
EH
805
806 if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
807 ret = get_platform_config_field(
808 ppd->dd,
809 PLATFORM_CONFIG_PORT_TABLE, 0,
810 PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
811 ptr_tx_preset, 4);
812 if (ret) {
813 *ptr_tx_preset = OPA_INVALID_INDEX;
e4e0e39c 814 return ret;
8ebd4cf1
EH
815 }
816 } else {
817 ret = get_platform_config_field(
818 ppd->dd,
819 PLATFORM_CONFIG_PORT_TABLE, 0,
820 PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
821 ptr_tx_preset, 4);
822 if (ret) {
823 *ptr_tx_preset = OPA_INVALID_INDEX;
e4e0e39c 824 return ret;
8ebd4cf1
EH
825 }
826 }
827
828 ret = get_platform_config_field(
829 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
830 PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
831 if (ret) {
832 *ptr_rx_preset = OPA_INVALID_INDEX;
e4e0e39c 833 return ret;
8ebd4cf1
EH
834 }
835
836 if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
837 get_platform_config_field(
838 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
839 PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
840 else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
841 get_platform_config_field(
842 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
843 PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
844
845 apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
846
847 apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
848
849 apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
850
851 ret = set_qsfp_tx(ppd, 1);
765a6fac 852
8ebd4cf1
EH
853 return ret;
854}
855
856static int tune_qsfp(struct hfi1_pportdata *ppd,
857 u32 *ptr_tx_preset, u32 *ptr_rx_preset,
858 u8 *ptr_tuning_method, u32 *ptr_total_atten)
859{
860 u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
861 u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
862 int ret = 0;
863 u8 *cache = ppd->qsfp_info.cache;
864
865 switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
866 case 0xA ... 0xB:
867 ret = get_platform_config_field(
868 ppd->dd,
869 PLATFORM_CONFIG_PORT_TABLE, 0,
870 PORT_TABLE_LOCAL_ATTEN_25G,
871 &platform_atten, 4);
872 if (ret)
873 return ret;
874
875 if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
876 cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
877 else if ((lss & OPA_LINK_SPEED_12_5G) &&
878 (lse & OPA_LINK_SPEED_12_5G))
879 cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
880
881 /* Fallback to configured attenuation if cable memory is bad */
882 if (cable_atten == 0 || cable_atten > 36) {
883 ret = get_platform_config_field(
884 ppd->dd,
885 PLATFORM_CONFIG_SYSTEM_TABLE, 0,
886 SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
887 &cable_atten, 4);
888 if (ret)
889 return ret;
890 }
891
892 ret = get_platform_config_field(
893 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
894 PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
895 if (ret)
896 return ret;
897
898 *ptr_total_atten = platform_atten + cable_atten + remote_atten;
899
900 *ptr_tuning_method = OPA_PASSIVE_TUNING;
901 break;
902 case 0x0 ... 0x9: /* fallthrough */
903 case 0xC: /* fallthrough */
904 case 0xE:
905 ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
906 ptr_total_atten);
907 if (ret)
908 return ret;
909
910 *ptr_tuning_method = OPA_ACTIVE_TUNING;
911 break;
912 case 0xD: /* fallthrough */
913 case 0xF:
914 default:
b4e9e2f0 915 dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n",
8ebd4cf1
EH
916 __func__);
917 break;
918 }
919 return ret;
920}
921
922/*
923 * This function communicates its success or failure via ppd->driver_link_ready
924 * Thus, it depends on its association with start_link(...) which checks
925 * driver_link_ready before proceeding with the link negotiation and
926 * initialization process.
927 */
928void tune_serdes(struct hfi1_pportdata *ppd)
929{
930 int ret = 0;
931 u32 total_atten = 0;
932 u32 remote_atten = 0, platform_atten = 0;
933 u32 rx_preset_index, tx_preset_index;
97167e81 934 u8 tuning_method = 0, limiting_active = 0;
8ebd4cf1
EH
935 struct hfi1_devdata *dd = ppd->dd;
936
937 rx_preset_index = OPA_INVALID_INDEX;
938 tx_preset_index = OPA_INVALID_INDEX;
939
940 /* the link defaults to enabled */
941 ppd->link_enabled = 1;
942 /* the driver link ready state defaults to not ready */
943 ppd->driver_link_ready = 0;
944 ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
945
715c430c
EH
946 /* Skip the tuning for testing (loopback != none) and simulations */
947 if (loopback != LOOPBACK_NONE ||
c3838b39 948 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8ebd4cf1 949 ppd->driver_link_ready = 1;
ba81a427
JS
950
951 if (qsfp_mod_present(ppd)) {
952 ret = acquire_chip_resource(ppd->dd,
953 qsfp_resource(ppd->dd),
954 QSFP_WAIT);
955 if (ret) {
956 dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
957 __func__, (int)ppd->dd->hfi1_id);
958 goto bail;
959 }
960
961 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
962 release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
963 }
964
8ebd4cf1
EH
965 return;
966 }
967
8ebd4cf1
EH
968 switch (ppd->port_type) {
969 case PORT_TYPE_DISCONNECTED:
970 ppd->offline_disabled_reason =
971 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
b4e9e2f0 972 dd_dev_warn(dd, "%s: Port disconnected, disabling port\n",
8ebd4cf1
EH
973 __func__);
974 goto bail;
975 case PORT_TYPE_FIXED:
976 /* platform_atten, remote_atten pre-zeroed to catch error */
977 get_platform_config_field(
978 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
979 PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
980
981 get_platform_config_field(
982 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
983 PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
984
985 total_atten = platform_atten + remote_atten;
986
987 tuning_method = OPA_PASSIVE_TUNING;
988 break;
989 case PORT_TYPE_VARIABLE:
990 if (qsfp_mod_present(ppd)) {
991 /*
992 * platform_atten, remote_atten pre-zeroed to
993 * catch error
994 */
995 get_platform_config_field(
996 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
997 PORT_TABLE_LOCAL_ATTEN_25G,
998 &platform_atten, 4);
999
1000 get_platform_config_field(
1001 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
1002 PORT_TABLE_REMOTE_ATTEN_25G,
1003 &remote_atten, 4);
1004
1005 total_atten = platform_atten + remote_atten;
1006
1007 tuning_method = OPA_PASSIVE_TUNING;
623bba2d 1008 } else {
8ebd4cf1
EH
1009 ppd->offline_disabled_reason =
1010 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
623bba2d
EH
1011 goto bail;
1012 }
8ebd4cf1
EH
1013 break;
1014 case PORT_TYPE_QSFP:
1015 if (qsfp_mod_present(ppd)) {
e4e0e39c
DL
1016 ret = acquire_chip_resource(ppd->dd,
1017 qsfp_resource(ppd->dd),
1018 QSFP_WAIT);
1019 if (ret) {
1020 dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
1021 __func__, (int)ppd->dd->hfi1_id);
1022 goto bail;
1023 }
8ebd4cf1
EH
1024 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
1025
1026 if (ppd->qsfp_info.cache_valid) {
1027 ret = tune_qsfp(ppd,
1028 &tx_preset_index,
1029 &rx_preset_index,
1030 &tuning_method,
1031 &total_atten);
c3838b39
EH
1032
1033 /*
1034 * We may have modified the QSFP memory, so
1035 * update the cache to reflect the changes
1036 */
1037 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
97167e81
EH
1038 limiting_active =
1039 ppd->qsfp_info.limiting_active;
8ebd4cf1
EH
1040 } else {
1041 dd_dev_err(dd,
1042 "%s: Reading QSFP memory failed\n",
1043 __func__);
e4e0e39c 1044 ret = -EINVAL; /* a fail indication */
8ebd4cf1 1045 }
e4e0e39c
DL
1046 release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
1047 if (ret)
1048 goto bail;
623bba2d 1049 } else {
8ebd4cf1
EH
1050 ppd->offline_disabled_reason =
1051 HFI1_ODR_MASK(
e1bf0d5e 1052 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
623bba2d
EH
1053 goto bail;
1054 }
8ebd4cf1
EH
1055 break;
1056 default:
b4e9e2f0 1057 dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__);
97167e81
EH
1058 ppd->port_type = PORT_TYPE_UNKNOWN;
1059 tuning_method = OPA_UNKNOWN_TUNING;
1060 total_atten = 0;
1061 limiting_active = 0;
1062 tx_preset_index = OPA_INVALID_INDEX;
1063 break;
8ebd4cf1 1064 }
97167e81 1065
8ebd4cf1
EH
1066 if (ppd->offline_disabled_reason ==
1067 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
1068 apply_tunings(ppd, tx_preset_index, tuning_method,
97167e81 1069 total_atten, limiting_active);
8ebd4cf1 1070
c3838b39
EH
1071 if (!ret)
1072 ppd->driver_link_ready = 1;
8ebd4cf1
EH
1073
1074 return;
1075bail:
1076 ppd->driver_link_ready = 0;
1077}