]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/infiniband/hw/hfi1/platform.c
Merge tag 'acpi-4.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / hw / hfi1 / platform.c
1 /*
2 * Copyright(c) 2015, 2016 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48 #include <linux/firmware.h>
49
50 #include "hfi.h"
51 #include "efivar.h"
52 #include "eprom.h"
53
54 #define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat"
55
56 static int validate_scratch_checksum(struct hfi1_devdata *dd)
57 {
58 u64 checksum = 0, temp_scratch = 0;
59 int i, j, version;
60
61 temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
62 version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
63
64 /* Prevent power on default of all zeroes from passing checksum */
65 if (!version) {
66 dd_dev_err(dd, "%s: Config bitmap uninitialized\n", __func__);
67 dd_dev_err(dd,
68 "%s: Please update your BIOS to support active channels\n",
69 __func__);
70 return 0;
71 }
72
73 /*
74 * ASIC scratch 0 only contains the checksum and bitmap version as
75 * fields of interest, both of which are handled separately from the
76 * loop below, so skip it
77 */
78 checksum += version;
79 for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
80 temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
81 for (j = sizeof(u64); j != 0; j -= 2) {
82 checksum += (temp_scratch & 0xFFFF);
83 temp_scratch >>= 16;
84 }
85 }
86
87 while (checksum >> 16)
88 checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
89
90 temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
91 temp_scratch &= CHECKSUM_SMASK;
92 temp_scratch >>= CHECKSUM_SHIFT;
93
94 if (checksum + temp_scratch == 0xFFFF)
95 return 1;
96
97 dd_dev_err(dd, "%s: Configuration bitmap corrupted\n", __func__);
98 return 0;
99 }
100
101 static void save_platform_config_fields(struct hfi1_devdata *dd)
102 {
103 struct hfi1_pportdata *ppd = dd->pport;
104 u64 temp_scratch = 0, temp_dest = 0;
105
106 temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
107
108 temp_dest = temp_scratch &
109 (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
110 PORT0_PORT_TYPE_SMASK);
111 ppd->port_type = temp_dest >>
112 (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
113 PORT0_PORT_TYPE_SHIFT);
114
115 temp_dest = temp_scratch &
116 (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
117 PORT0_LOCAL_ATTEN_SMASK);
118 ppd->local_atten = temp_dest >>
119 (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
120 PORT0_LOCAL_ATTEN_SHIFT);
121
122 temp_dest = temp_scratch &
123 (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
124 PORT0_REMOTE_ATTEN_SMASK);
125 ppd->remote_atten = temp_dest >>
126 (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
127 PORT0_REMOTE_ATTEN_SHIFT);
128
129 temp_dest = temp_scratch &
130 (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
131 PORT0_DEFAULT_ATTEN_SMASK);
132 ppd->default_atten = temp_dest >>
133 (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
134 PORT0_DEFAULT_ATTEN_SHIFT);
135
136 temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
137 ASIC_CFG_SCRATCH_2);
138
139 ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
140 ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
141 ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
142
143 ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
144 QSFP_MAX_POWER_SHIFT;
145
146 ppd->config_from_scratch = true;
147 }
148
149 void get_platform_config(struct hfi1_devdata *dd)
150 {
151 int ret = 0;
152 u8 *temp_platform_config = NULL;
153 u32 esize;
154 const struct firmware *platform_config_file = NULL;
155
156 if (is_integrated(dd)) {
157 if (validate_scratch_checksum(dd)) {
158 save_platform_config_fields(dd);
159 return;
160 }
161 } else {
162 ret = eprom_read_platform_config(dd,
163 (void **)&temp_platform_config,
164 &esize);
165 if (!ret) {
166 /* success */
167 dd->platform_config.data = temp_platform_config;
168 dd->platform_config.size = esize;
169 return;
170 }
171 }
172 dd_dev_err(dd,
173 "%s: Failed to get platform config, falling back to sub-optimal default file\n",
174 __func__);
175
176 ret = request_firmware(&platform_config_file,
177 DEFAULT_PLATFORM_CONFIG_NAME,
178 &dd->pcidev->dev);
179 if (ret) {
180 dd_dev_err(dd,
181 "%s: No default platform config file found\n",
182 __func__);
183 return;
184 }
185
186 /*
187 * Allocate separate memory block to store data and free firmware
188 * structure. This allows free_platform_config to treat EPROM and
189 * fallback configs in the same manner.
190 */
191 dd->platform_config.data = kmemdup(platform_config_file->data,
192 platform_config_file->size,
193 GFP_KERNEL);
194 dd->platform_config.size = platform_config_file->size;
195 release_firmware(platform_config_file);
196 }
197
198 void free_platform_config(struct hfi1_devdata *dd)
199 {
200 /* Release memory allocated for eprom or fallback file read. */
201 kfree(dd->platform_config.data);
202 }
203
204 void get_port_type(struct hfi1_pportdata *ppd)
205 {
206 int ret;
207 u32 temp;
208
209 ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
210 PORT_TABLE_PORT_TYPE, &temp,
211 4);
212 if (ret) {
213 ppd->port_type = PORT_TYPE_UNKNOWN;
214 return;
215 }
216 ppd->port_type = temp;
217 }
218
219 int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
220 {
221 u8 tx_ctrl_byte = on ? 0x0 : 0xF;
222 int ret = 0;
223
224 ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
225 &tx_ctrl_byte, 1);
226 /* we expected 1, so consider 0 an error */
227 if (ret == 0)
228 ret = -EIO;
229 else if (ret == 1)
230 ret = 0;
231 return ret;
232 }
233
234 static int qual_power(struct hfi1_pportdata *ppd)
235 {
236 u32 cable_power_class = 0, power_class_max = 0;
237 u8 *cache = ppd->qsfp_info.cache;
238 int ret = 0;
239
240 ret = get_platform_config_field(
241 ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
242 SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
243 if (ret)
244 return ret;
245
246 cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
247
248 if (cable_power_class > power_class_max)
249 ppd->offline_disabled_reason =
250 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
251
252 if (ppd->offline_disabled_reason ==
253 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
254 dd_dev_err(
255 ppd->dd,
256 "%s: Port disabled due to system power restrictions\n",
257 __func__);
258 ret = -EPERM;
259 }
260 return ret;
261 }
262
263 static int qual_bitrate(struct hfi1_pportdata *ppd)
264 {
265 u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
266 u8 *cache = ppd->qsfp_info.cache;
267
268 if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
269 cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
270 ppd->offline_disabled_reason =
271 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
272
273 if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
274 cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
275 ppd->offline_disabled_reason =
276 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
277
278 if (ppd->offline_disabled_reason ==
279 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
280 dd_dev_err(
281 ppd->dd,
282 "%s: Cable failed bitrate check, disabling port\n",
283 __func__);
284 return -EPERM;
285 }
286 return 0;
287 }
288
289 static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
290 {
291 u8 cable_power_class = 0, power_ctrl_byte = 0;
292 u8 *cache = ppd->qsfp_info.cache;
293 int ret;
294
295 cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
296
297 if (cable_power_class > QSFP_POWER_CLASS_1) {
298 power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
299
300 power_ctrl_byte |= 1;
301 power_ctrl_byte &= ~(0x2);
302
303 ret = qsfp_write(ppd, ppd->dd->hfi1_id,
304 QSFP_PWR_CTRL_BYTE_OFFS,
305 &power_ctrl_byte, 1);
306 if (ret != 1)
307 return -EIO;
308
309 if (cable_power_class > QSFP_POWER_CLASS_4) {
310 power_ctrl_byte |= (1 << 2);
311 ret = qsfp_write(ppd, ppd->dd->hfi1_id,
312 QSFP_PWR_CTRL_BYTE_OFFS,
313 &power_ctrl_byte, 1);
314 if (ret != 1)
315 return -EIO;
316 }
317
318 /* SFF 8679 rev 1.7 LPMode Deassert time */
319 msleep(300);
320 }
321 return 0;
322 }
323
324 static void apply_rx_cdr(struct hfi1_pportdata *ppd,
325 u32 rx_preset_index,
326 u8 *cdr_ctrl_byte)
327 {
328 u32 rx_preset;
329 u8 *cache = ppd->qsfp_info.cache;
330 int cable_power_class;
331
332 if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
333 (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
334 return;
335
336 /* RX CDR present, bypass supported */
337 cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
338
339 if (cable_power_class <= QSFP_POWER_CLASS_3) {
340 /* Power class <= 3, ignore config & turn RX CDR on */
341 *cdr_ctrl_byte |= 0xF;
342 return;
343 }
344
345 get_platform_config_field(
346 ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
347 rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
348 &rx_preset, 4);
349
350 if (!rx_preset) {
351 dd_dev_info(
352 ppd->dd,
353 "%s: RX_CDR_APPLY is set to disabled\n",
354 __func__);
355 return;
356 }
357 get_platform_config_field(
358 ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
359 rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
360 &rx_preset, 4);
361
362 /* Expand cdr setting to all 4 lanes */
363 rx_preset = (rx_preset | (rx_preset << 1) |
364 (rx_preset << 2) | (rx_preset << 3));
365
366 if (rx_preset) {
367 *cdr_ctrl_byte |= rx_preset;
368 } else {
369 *cdr_ctrl_byte &= rx_preset;
370 /* Preserve current TX CDR status */
371 *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
372 }
373 }
374
375 static void apply_tx_cdr(struct hfi1_pportdata *ppd,
376 u32 tx_preset_index,
377 u8 *cdr_ctrl_byte)
378 {
379 u32 tx_preset;
380 u8 *cache = ppd->qsfp_info.cache;
381 int cable_power_class;
382
383 if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
384 (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
385 return;
386
387 /* TX CDR present, bypass supported */
388 cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
389
390 if (cable_power_class <= QSFP_POWER_CLASS_3) {
391 /* Power class <= 3, ignore config & turn TX CDR on */
392 *cdr_ctrl_byte |= 0xF0;
393 return;
394 }
395
396 get_platform_config_field(
397 ppd->dd,
398 PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
399 TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
400
401 if (!tx_preset) {
402 dd_dev_info(
403 ppd->dd,
404 "%s: TX_CDR_APPLY is set to disabled\n",
405 __func__);
406 return;
407 }
408 get_platform_config_field(
409 ppd->dd,
410 PLATFORM_CONFIG_TX_PRESET_TABLE,
411 tx_preset_index,
412 TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
413
414 /* Expand cdr setting to all 4 lanes */
415 tx_preset = (tx_preset | (tx_preset << 1) |
416 (tx_preset << 2) | (tx_preset << 3));
417
418 if (tx_preset)
419 *cdr_ctrl_byte |= (tx_preset << 4);
420 else
421 /* Preserve current/determined RX CDR status */
422 *cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
423 }
424
425 static void apply_cdr_settings(
426 struct hfi1_pportdata *ppd, u32 rx_preset_index,
427 u32 tx_preset_index)
428 {
429 u8 *cache = ppd->qsfp_info.cache;
430 u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
431
432 apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
433
434 apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
435
436 qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
437 &cdr_ctrl_byte, 1);
438 }
439
440 static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
441 {
442 u8 *cache = ppd->qsfp_info.cache;
443 u8 tx_eq;
444
445 if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
446 return;
447 /* Disable adaptive TX EQ if present */
448 tx_eq = cache[(128 * 3) + 241];
449 tx_eq &= 0xF0;
450 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
451 }
452
453 static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
454 {
455 u8 *cache = ppd->qsfp_info.cache;
456 u32 tx_preset;
457 u8 tx_eq;
458
459 if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
460 return;
461
462 get_platform_config_field(
463 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
464 tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
465 &tx_preset, 4);
466 if (!tx_preset) {
467 dd_dev_info(
468 ppd->dd,
469 "%s: TX_EQ_APPLY is set to disabled\n",
470 __func__);
471 return;
472 }
473 get_platform_config_field(
474 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
475 tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
476 &tx_preset, 4);
477
478 if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
479 dd_dev_info(
480 ppd->dd,
481 "%s: TX EQ %x unsupported\n",
482 __func__, tx_preset);
483
484 dd_dev_info(
485 ppd->dd,
486 "%s: Applying EQ %x\n",
487 __func__, cache[608] & 0xF0);
488
489 tx_preset = (cache[608] & 0xF0) >> 4;
490 }
491
492 tx_eq = tx_preset | (tx_preset << 4);
493 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
494 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
495 }
496
497 static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
498 {
499 u32 rx_preset;
500 u8 rx_eq, *cache = ppd->qsfp_info.cache;
501
502 if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
503 return;
504 get_platform_config_field(
505 ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
506 rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
507 &rx_preset, 4);
508
509 if (!rx_preset) {
510 dd_dev_info(
511 ppd->dd,
512 "%s: RX_EMP_APPLY is set to disabled\n",
513 __func__);
514 return;
515 }
516 get_platform_config_field(
517 ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
518 rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
519 &rx_preset, 4);
520
521 if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
522 dd_dev_info(
523 ppd->dd,
524 "%s: Requested RX EMP %x\n",
525 __func__, rx_preset);
526
527 dd_dev_info(
528 ppd->dd,
529 "%s: Applying supported EMP %x\n",
530 __func__, cache[608] & 0xF);
531
532 rx_preset = cache[608] & 0xF;
533 }
534
535 rx_eq = rx_preset | (rx_preset << 4);
536
537 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
538 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
539 }
540
541 static void apply_eq_settings(struct hfi1_pportdata *ppd,
542 u32 rx_preset_index, u32 tx_preset_index)
543 {
544 u8 *cache = ppd->qsfp_info.cache;
545
546 /* no point going on w/o a page 3 */
547 if (cache[2] & 4) {
548 dd_dev_info(ppd->dd,
549 "%s: Upper page 03 not present\n",
550 __func__);
551 return;
552 }
553
554 apply_tx_eq_auto(ppd);
555
556 apply_tx_eq_prog(ppd, tx_preset_index);
557
558 apply_rx_eq_emp(ppd, rx_preset_index);
559 }
560
561 static void apply_rx_amplitude_settings(
562 struct hfi1_pportdata *ppd, u32 rx_preset_index,
563 u32 tx_preset_index)
564 {
565 u32 rx_preset;
566 u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
567
568 /* no point going on w/o a page 3 */
569 if (cache[2] & 4) {
570 dd_dev_info(ppd->dd,
571 "%s: Upper page 03 not present\n",
572 __func__);
573 return;
574 }
575 if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
576 dd_dev_info(ppd->dd,
577 "%s: RX_AMP_APPLY is set to disabled\n",
578 __func__);
579 return;
580 }
581
582 get_platform_config_field(ppd->dd,
583 PLATFORM_CONFIG_RX_PRESET_TABLE,
584 rx_preset_index,
585 RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
586 &rx_preset, 4);
587
588 if (!rx_preset) {
589 dd_dev_info(ppd->dd,
590 "%s: RX_AMP_APPLY is set to disabled\n",
591 __func__);
592 return;
593 }
594 get_platform_config_field(ppd->dd,
595 PLATFORM_CONFIG_RX_PRESET_TABLE,
596 rx_preset_index,
597 RX_PRESET_TABLE_QSFP_RX_AMP,
598 &rx_preset, 4);
599
600 dd_dev_info(ppd->dd,
601 "%s: Requested RX AMP %x\n",
602 __func__,
603 rx_preset);
604
605 for (i = 0; i < 4; i++) {
606 if (cache[(128 * 3) + 225] & (1 << i)) {
607 preferred = i;
608 if (preferred == rx_preset)
609 break;
610 }
611 }
612
613 /*
614 * Verify that preferred RX amplitude is not just a
615 * fall through of the default
616 */
617 if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
618 dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
619 return;
620 }
621
622 dd_dev_info(ppd->dd,
623 "%s: Applying RX AMP %x\n", __func__, preferred);
624
625 rx_amp = preferred | (preferred << 4);
626 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
627 qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
628 }
629
630 #define OPA_INVALID_INDEX 0xFFF
631
632 static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
633 u32 config_data, const char *message)
634 {
635 u8 i;
636 int ret = HCMD_SUCCESS;
637
638 for (i = 0; i < 4; i++) {
639 ret = load_8051_config(ppd->dd, field_id, i, config_data);
640 if (ret != HCMD_SUCCESS) {
641 dd_dev_err(
642 ppd->dd,
643 "%s: %s for lane %u failed\n",
644 message, __func__, i);
645 }
646 }
647 }
648
649 /*
650 * Return a special SerDes setting for low power AOC cables. The power class
651 * threshold and setting being used were all found by empirical testing.
652 *
653 * Summary of the logic:
654 *
655 * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
656 * return 0xe
657 * return 0; // leave at default
658 */
659 static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
660 {
661 u8 *cache = ppd->qsfp_info.cache;
662 int power_class;
663
664 /* QSFP only */
665 if (ppd->port_type != PORT_TYPE_QSFP)
666 return 0; /* leave at default */
667
668 /* active optical cables only */
669 switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
670 case 0x0 ... 0x9: /* fallthrough */
671 case 0xC: /* fallthrough */
672 case 0xE:
673 /* active AOC */
674 power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
675 if (power_class < QSFP_POWER_CLASS_4)
676 return 0xe;
677 }
678 return 0; /* leave at default */
679 }
680
681 static void apply_tunings(
682 struct hfi1_pportdata *ppd, u32 tx_preset_index,
683 u8 tuning_method, u32 total_atten, u8 limiting_active)
684 {
685 int ret = 0;
686 u32 config_data = 0, tx_preset = 0;
687 u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
688 u8 *cache = ppd->qsfp_info.cache;
689
690 /* Pass tuning method to 8051 */
691 read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
692 &config_data);
693 config_data &= ~(0xff << TUNING_METHOD_SHIFT);
694 config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
695 ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
696 config_data);
697 if (ret != HCMD_SUCCESS)
698 dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
699 __func__);
700
701 /* Set same channel loss for both TX and RX */
702 config_data = 0 | (total_atten << 16) | (total_atten << 24);
703 apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
704 "Setting channel loss");
705
706 /* Inform 8051 of cable capabilities */
707 if (ppd->qsfp_info.cache_valid) {
708 external_device_config =
709 ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
710 ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
711 ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
712 (cache[QSFP_EQ_INFO_OFFS] & 0x4);
713 ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
714 GENERAL_CONFIG, &config_data);
715 /* Clear, then set the external device config field */
716 config_data &= ~(u32)0xFF;
717 config_data |= external_device_config;
718 ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
719 GENERAL_CONFIG, config_data);
720 if (ret != HCMD_SUCCESS)
721 dd_dev_err(ppd->dd,
722 "%s: Failed set ext device config params\n",
723 __func__);
724 }
725
726 if (tx_preset_index == OPA_INVALID_INDEX) {
727 if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
728 dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n",
729 __func__);
730 return;
731 }
732
733 /* Following for limiting active channels only */
734 get_platform_config_field(
735 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
736 TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
737 precur = tx_preset;
738
739 get_platform_config_field(
740 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
741 tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
742 attn = tx_preset;
743
744 get_platform_config_field(
745 ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
746 tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
747 postcur = tx_preset;
748
749 /*
750 * NOTES:
751 * o The aoc_low_power_setting is applied to all lanes even
752 * though only lane 0's value is examined by the firmware.
753 * o A lingering low power setting after a cable swap does
754 * not occur. On cable unplug the 8051 is reset and
755 * restarted on cable insert. This resets all settings to
756 * their default, erasing any previous low power setting.
757 */
758 config_data = precur | (attn << 8) | (postcur << 16) |
759 (aoc_low_power_setting(ppd) << 24);
760
761 apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
762 "Applying TX settings");
763 }
764
765 /* Must be holding the QSFP i2c resource */
766 static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
767 u32 *ptr_rx_preset, u32 *ptr_total_atten)
768 {
769 int ret;
770 u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
771 u8 *cache = ppd->qsfp_info.cache;
772
773 ppd->qsfp_info.limiting_active = 1;
774
775 ret = set_qsfp_tx(ppd, 0);
776 if (ret)
777 return ret;
778
779 ret = qual_power(ppd);
780 if (ret)
781 return ret;
782
783 ret = qual_bitrate(ppd);
784 if (ret)
785 return ret;
786
787 /*
788 * We'll change the QSFP memory contents from here on out, thus we set a
789 * flag here to remind ourselves to reset the QSFP module. This prevents
790 * reuse of stale settings established in our previous pass through.
791 */
792 if (ppd->qsfp_info.reset_needed) {
793 ret = reset_qsfp(ppd);
794 if (ret)
795 return ret;
796 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
797 } else {
798 ppd->qsfp_info.reset_needed = 1;
799 }
800
801 ret = set_qsfp_high_power(ppd);
802 if (ret)
803 return ret;
804
805 if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
806 ret = get_platform_config_field(
807 ppd->dd,
808 PLATFORM_CONFIG_PORT_TABLE, 0,
809 PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
810 ptr_tx_preset, 4);
811 if (ret) {
812 *ptr_tx_preset = OPA_INVALID_INDEX;
813 return ret;
814 }
815 } else {
816 ret = get_platform_config_field(
817 ppd->dd,
818 PLATFORM_CONFIG_PORT_TABLE, 0,
819 PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
820 ptr_tx_preset, 4);
821 if (ret) {
822 *ptr_tx_preset = OPA_INVALID_INDEX;
823 return ret;
824 }
825 }
826
827 ret = get_platform_config_field(
828 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
829 PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
830 if (ret) {
831 *ptr_rx_preset = OPA_INVALID_INDEX;
832 return ret;
833 }
834
835 if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
836 get_platform_config_field(
837 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
838 PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
839 else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
840 get_platform_config_field(
841 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
842 PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
843
844 apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
845
846 apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
847
848 apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
849
850 ret = set_qsfp_tx(ppd, 1);
851
852 return ret;
853 }
854
855 static int tune_qsfp(struct hfi1_pportdata *ppd,
856 u32 *ptr_tx_preset, u32 *ptr_rx_preset,
857 u8 *ptr_tuning_method, u32 *ptr_total_atten)
858 {
859 u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
860 u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
861 int ret = 0;
862 u8 *cache = ppd->qsfp_info.cache;
863
864 switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
865 case 0xA ... 0xB:
866 ret = get_platform_config_field(
867 ppd->dd,
868 PLATFORM_CONFIG_PORT_TABLE, 0,
869 PORT_TABLE_LOCAL_ATTEN_25G,
870 &platform_atten, 4);
871 if (ret)
872 return ret;
873
874 if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
875 cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
876 else if ((lss & OPA_LINK_SPEED_12_5G) &&
877 (lse & OPA_LINK_SPEED_12_5G))
878 cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
879
880 /* Fallback to configured attenuation if cable memory is bad */
881 if (cable_atten == 0 || cable_atten > 36) {
882 ret = get_platform_config_field(
883 ppd->dd,
884 PLATFORM_CONFIG_SYSTEM_TABLE, 0,
885 SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
886 &cable_atten, 4);
887 if (ret)
888 return ret;
889 }
890
891 ret = get_platform_config_field(
892 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
893 PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
894 if (ret)
895 return ret;
896
897 *ptr_total_atten = platform_atten + cable_atten + remote_atten;
898
899 *ptr_tuning_method = OPA_PASSIVE_TUNING;
900 break;
901 case 0x0 ... 0x9: /* fallthrough */
902 case 0xC: /* fallthrough */
903 case 0xE:
904 ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
905 ptr_total_atten);
906 if (ret)
907 return ret;
908
909 *ptr_tuning_method = OPA_ACTIVE_TUNING;
910 break;
911 case 0xD: /* fallthrough */
912 case 0xF:
913 default:
914 dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n",
915 __func__);
916 break;
917 }
918 return ret;
919 }
920
921 /*
922 * This function communicates its success or failure via ppd->driver_link_ready
923 * Thus, it depends on its association with start_link(...) which checks
924 * driver_link_ready before proceeding with the link negotiation and
925 * initialization process.
926 */
927 void tune_serdes(struct hfi1_pportdata *ppd)
928 {
929 int ret = 0;
930 u32 total_atten = 0;
931 u32 remote_atten = 0, platform_atten = 0;
932 u32 rx_preset_index, tx_preset_index;
933 u8 tuning_method = 0, limiting_active = 0;
934 struct hfi1_devdata *dd = ppd->dd;
935
936 rx_preset_index = OPA_INVALID_INDEX;
937 tx_preset_index = OPA_INVALID_INDEX;
938
939 /* the link defaults to enabled */
940 ppd->link_enabled = 1;
941 /* the driver link ready state defaults to not ready */
942 ppd->driver_link_ready = 0;
943 ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
944
945 /* Skip the tuning for testing (loopback != none) and simulations */
946 if (loopback != LOOPBACK_NONE ||
947 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
948 ppd->driver_link_ready = 1;
949
950 if (qsfp_mod_present(ppd)) {
951 ret = acquire_chip_resource(ppd->dd,
952 qsfp_resource(ppd->dd),
953 QSFP_WAIT);
954 if (ret) {
955 dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
956 __func__, (int)ppd->dd->hfi1_id);
957 goto bail;
958 }
959
960 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
961 release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
962 }
963
964 return;
965 }
966
967 switch (ppd->port_type) {
968 case PORT_TYPE_DISCONNECTED:
969 ppd->offline_disabled_reason =
970 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
971 dd_dev_warn(dd, "%s: Port disconnected, disabling port\n",
972 __func__);
973 goto bail;
974 case PORT_TYPE_FIXED:
975 /* platform_atten, remote_atten pre-zeroed to catch error */
976 get_platform_config_field(
977 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
978 PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
979
980 get_platform_config_field(
981 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
982 PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
983
984 total_atten = platform_atten + remote_atten;
985
986 tuning_method = OPA_PASSIVE_TUNING;
987 break;
988 case PORT_TYPE_VARIABLE:
989 if (qsfp_mod_present(ppd)) {
990 /*
991 * platform_atten, remote_atten pre-zeroed to
992 * catch error
993 */
994 get_platform_config_field(
995 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
996 PORT_TABLE_LOCAL_ATTEN_25G,
997 &platform_atten, 4);
998
999 get_platform_config_field(
1000 ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
1001 PORT_TABLE_REMOTE_ATTEN_25G,
1002 &remote_atten, 4);
1003
1004 total_atten = platform_atten + remote_atten;
1005
1006 tuning_method = OPA_PASSIVE_TUNING;
1007 } else {
1008 ppd->offline_disabled_reason =
1009 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
1010 goto bail;
1011 }
1012 break;
1013 case PORT_TYPE_QSFP:
1014 if (qsfp_mod_present(ppd)) {
1015 ret = acquire_chip_resource(ppd->dd,
1016 qsfp_resource(ppd->dd),
1017 QSFP_WAIT);
1018 if (ret) {
1019 dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
1020 __func__, (int)ppd->dd->hfi1_id);
1021 goto bail;
1022 }
1023 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
1024
1025 if (ppd->qsfp_info.cache_valid) {
1026 ret = tune_qsfp(ppd,
1027 &tx_preset_index,
1028 &rx_preset_index,
1029 &tuning_method,
1030 &total_atten);
1031
1032 /*
1033 * We may have modified the QSFP memory, so
1034 * update the cache to reflect the changes
1035 */
1036 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
1037 limiting_active =
1038 ppd->qsfp_info.limiting_active;
1039 } else {
1040 dd_dev_err(dd,
1041 "%s: Reading QSFP memory failed\n",
1042 __func__);
1043 ret = -EINVAL; /* a fail indication */
1044 }
1045 release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
1046 if (ret)
1047 goto bail;
1048 } else {
1049 ppd->offline_disabled_reason =
1050 HFI1_ODR_MASK(
1051 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
1052 goto bail;
1053 }
1054 break;
1055 default:
1056 dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__);
1057 ppd->port_type = PORT_TYPE_UNKNOWN;
1058 tuning_method = OPA_UNKNOWN_TUNING;
1059 total_atten = 0;
1060 limiting_active = 0;
1061 tx_preset_index = OPA_INVALID_INDEX;
1062 break;
1063 }
1064
1065 if (ppd->offline_disabled_reason ==
1066 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
1067 apply_tunings(ppd, tx_preset_index, tuning_method,
1068 total_atten, limiting_active);
1069
1070 if (!ret)
1071 ppd->driver_link_ready = 1;
1072
1073 return;
1074 bail:
1075 ppd->driver_link_ready = 0;
1076 }