1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #include "ifpga_feature_dev.h"
7 #include "opae_intel_max10.h"
9 #include "opae_at24_eeprom.h"
11 #define PWR_THRESHOLD_MAX 0x7F
13 int fme_get_prop(struct ifpga_fme_hw
*fme
, struct feature_prop
*prop
)
15 struct ifpga_feature
*feature
;
20 feature
= get_fme_feature_by_id(fme
, prop
->feature_id
);
22 if (feature
&& feature
->ops
&& feature
->ops
->get_prop
)
23 return feature
->ops
->get_prop(feature
, prop
);
28 int fme_set_prop(struct ifpga_fme_hw
*fme
, struct feature_prop
*prop
)
30 struct ifpga_feature
*feature
;
35 feature
= get_fme_feature_by_id(fme
, prop
->feature_id
);
37 if (feature
&& feature
->ops
&& feature
->ops
->set_prop
)
38 return feature
->ops
->set_prop(feature
, prop
);
43 int fme_set_irq(struct ifpga_fme_hw
*fme
, u32 feature_id
, void *irq_set
)
45 struct ifpga_feature
*feature
;
50 feature
= get_fme_feature_by_id(fme
, feature_id
);
52 if (feature
&& feature
->ops
&& feature
->ops
->set_irq
)
53 return feature
->ops
->set_irq(feature
, irq_set
);
58 /* fme private feature head */
59 static int fme_hdr_init(struct ifpga_feature
*feature
)
61 struct feature_fme_header
*fme_hdr
;
63 fme_hdr
= (struct feature_fme_header
*)feature
->addr
;
65 dev_info(NULL
, "FME HDR Init.\n");
66 dev_info(NULL
, "FME cap %llx.\n",
67 (unsigned long long)fme_hdr
->capability
.csr
);
72 static void fme_hdr_uinit(struct ifpga_feature
*feature
)
76 dev_info(NULL
, "FME HDR UInit.\n");
79 static int fme_hdr_get_revision(struct ifpga_fme_hw
*fme
, u64
*revision
)
81 struct feature_fme_header
*fme_hdr
82 = get_fme_feature_ioaddr_by_index(fme
, FME_FEATURE_ID_HEADER
);
83 struct feature_header header
;
85 header
.csr
= readq(&fme_hdr
->header
);
86 *revision
= header
.revision
;
91 static int fme_hdr_get_ports_num(struct ifpga_fme_hw
*fme
, u64
*ports_num
)
93 struct feature_fme_header
*fme_hdr
94 = get_fme_feature_ioaddr_by_index(fme
, FME_FEATURE_ID_HEADER
);
95 struct feature_fme_capability fme_capability
;
97 fme_capability
.csr
= readq(&fme_hdr
->capability
);
98 *ports_num
= fme_capability
.num_ports
;
103 static int fme_hdr_get_cache_size(struct ifpga_fme_hw
*fme
, u64
*cache_size
)
105 struct feature_fme_header
*fme_hdr
106 = get_fme_feature_ioaddr_by_index(fme
, FME_FEATURE_ID_HEADER
);
107 struct feature_fme_capability fme_capability
;
109 fme_capability
.csr
= readq(&fme_hdr
->capability
);
110 *cache_size
= fme_capability
.cache_size
;
115 static int fme_hdr_get_version(struct ifpga_fme_hw
*fme
, u64
*version
)
117 struct feature_fme_header
*fme_hdr
118 = get_fme_feature_ioaddr_by_index(fme
, FME_FEATURE_ID_HEADER
);
119 struct feature_fme_capability fme_capability
;
121 fme_capability
.csr
= readq(&fme_hdr
->capability
);
122 *version
= fme_capability
.fabric_verid
;
127 static int fme_hdr_get_socket_id(struct ifpga_fme_hw
*fme
, u64
*socket_id
)
129 struct feature_fme_header
*fme_hdr
130 = get_fme_feature_ioaddr_by_index(fme
, FME_FEATURE_ID_HEADER
);
131 struct feature_fme_capability fme_capability
;
133 fme_capability
.csr
= readq(&fme_hdr
->capability
);
134 *socket_id
= fme_capability
.socket_id
;
139 static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw
*fme
,
142 struct feature_fme_header
*fme_hdr
143 = get_fme_feature_ioaddr_by_index(fme
, FME_FEATURE_ID_HEADER
);
145 *bitstream_id
= readq(&fme_hdr
->bitstream_id
);
150 static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw
*fme
,
151 u64
*bitstream_metadata
)
153 struct feature_fme_header
*fme_hdr
154 = get_fme_feature_ioaddr_by_index(fme
, FME_FEATURE_ID_HEADER
);
156 *bitstream_metadata
= readq(&fme_hdr
->bitstream_md
);
162 fme_hdr_get_prop(struct ifpga_feature
*feature
, struct feature_prop
*prop
)
164 struct ifpga_fme_hw
*fme
= feature
->parent
;
166 switch (prop
->prop_id
) {
167 case FME_HDR_PROP_REVISION
:
168 return fme_hdr_get_revision(fme
, &prop
->data
);
169 case FME_HDR_PROP_PORTS_NUM
:
170 return fme_hdr_get_ports_num(fme
, &prop
->data
);
171 case FME_HDR_PROP_CACHE_SIZE
:
172 return fme_hdr_get_cache_size(fme
, &prop
->data
);
173 case FME_HDR_PROP_VERSION
:
174 return fme_hdr_get_version(fme
, &prop
->data
);
175 case FME_HDR_PROP_SOCKET_ID
:
176 return fme_hdr_get_socket_id(fme
, &prop
->data
);
177 case FME_HDR_PROP_BITSTREAM_ID
:
178 return fme_hdr_get_bitstream_id(fme
, &prop
->data
);
179 case FME_HDR_PROP_BITSTREAM_METADATA
:
180 return fme_hdr_get_bitstream_metadata(fme
, &prop
->data
);
186 struct ifpga_feature_ops fme_hdr_ops
= {
187 .init
= fme_hdr_init
,
188 .uinit
= fme_hdr_uinit
,
189 .get_prop
= fme_hdr_get_prop
,
192 /* thermal management */
193 static int fme_thermal_get_threshold1(struct ifpga_fme_hw
*fme
, u64
*thres1
)
195 struct feature_fme_thermal
*thermal
;
196 struct feature_fme_tmp_threshold temp_threshold
;
198 thermal
= get_fme_feature_ioaddr_by_index(fme
,
199 FME_FEATURE_ID_THERMAL_MGMT
);
201 temp_threshold
.csr
= readq(&thermal
->threshold
);
202 *thres1
= temp_threshold
.tmp_thshold1
;
207 static int fme_thermal_set_threshold1(struct ifpga_fme_hw
*fme
, u64 thres1
)
209 struct feature_fme_thermal
*thermal
;
210 struct feature_fme_header
*fme_hdr
;
211 struct feature_fme_tmp_threshold tmp_threshold
;
212 struct feature_fme_capability fme_capability
;
214 thermal
= get_fme_feature_ioaddr_by_index(fme
,
215 FME_FEATURE_ID_THERMAL_MGMT
);
216 fme_hdr
= get_fme_feature_ioaddr_by_index(fme
, FME_FEATURE_ID_HEADER
);
218 spinlock_lock(&fme
->lock
);
219 tmp_threshold
.csr
= readq(&thermal
->threshold
);
220 fme_capability
.csr
= readq(&fme_hdr
->capability
);
222 if (fme_capability
.lock_bit
== 1) {
223 spinlock_unlock(&fme
->lock
);
225 } else if (thres1
> 100) {
226 spinlock_unlock(&fme
->lock
);
228 } else if (thres1
== 0) {
229 tmp_threshold
.tmp_thshold1_enable
= 0;
230 tmp_threshold
.tmp_thshold1
= thres1
;
232 tmp_threshold
.tmp_thshold1_enable
= 1;
233 tmp_threshold
.tmp_thshold1
= thres1
;
236 writeq(tmp_threshold
.csr
, &thermal
->threshold
);
237 spinlock_unlock(&fme
->lock
);
242 static int fme_thermal_get_threshold2(struct ifpga_fme_hw
*fme
, u64
*thres2
)
244 struct feature_fme_thermal
*thermal
;
245 struct feature_fme_tmp_threshold temp_threshold
;
247 thermal
= get_fme_feature_ioaddr_by_index(fme
,
248 FME_FEATURE_ID_THERMAL_MGMT
);
250 temp_threshold
.csr
= readq(&thermal
->threshold
);
251 *thres2
= temp_threshold
.tmp_thshold2
;
256 static int fme_thermal_set_threshold2(struct ifpga_fme_hw
*fme
, u64 thres2
)
258 struct feature_fme_thermal
*thermal
;
259 struct feature_fme_header
*fme_hdr
;
260 struct feature_fme_tmp_threshold tmp_threshold
;
261 struct feature_fme_capability fme_capability
;
263 thermal
= get_fme_feature_ioaddr_by_index(fme
,
264 FME_FEATURE_ID_THERMAL_MGMT
);
265 fme_hdr
= get_fme_feature_ioaddr_by_index(fme
, FME_FEATURE_ID_HEADER
);
267 spinlock_lock(&fme
->lock
);
268 tmp_threshold
.csr
= readq(&thermal
->threshold
);
269 fme_capability
.csr
= readq(&fme_hdr
->capability
);
271 if (fme_capability
.lock_bit
== 1) {
272 spinlock_unlock(&fme
->lock
);
274 } else if (thres2
> 100) {
275 spinlock_unlock(&fme
->lock
);
277 } else if (thres2
== 0) {
278 tmp_threshold
.tmp_thshold2_enable
= 0;
279 tmp_threshold
.tmp_thshold2
= thres2
;
281 tmp_threshold
.tmp_thshold2_enable
= 1;
282 tmp_threshold
.tmp_thshold2
= thres2
;
285 writeq(tmp_threshold
.csr
, &thermal
->threshold
);
286 spinlock_unlock(&fme
->lock
);
291 static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw
*fme
,
294 struct feature_fme_thermal
*thermal
;
295 struct feature_fme_tmp_threshold temp_threshold
;
297 thermal
= get_fme_feature_ioaddr_by_index(fme
,
298 FME_FEATURE_ID_THERMAL_MGMT
);
300 temp_threshold
.csr
= readq(&thermal
->threshold
);
301 *thres_trip
= temp_threshold
.therm_trip_thshold
;
306 static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw
*fme
,
309 struct feature_fme_thermal
*thermal
;
310 struct feature_fme_tmp_threshold temp_threshold
;
312 thermal
= get_fme_feature_ioaddr_by_index(fme
,
313 FME_FEATURE_ID_THERMAL_MGMT
);
315 temp_threshold
.csr
= readq(&thermal
->threshold
);
316 *thres1_reached
= temp_threshold
.thshold1_status
;
321 static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw
*fme
,
324 struct feature_fme_thermal
*thermal
;
325 struct feature_fme_tmp_threshold temp_threshold
;
327 thermal
= get_fme_feature_ioaddr_by_index(fme
,
328 FME_FEATURE_ID_THERMAL_MGMT
);
330 temp_threshold
.csr
= readq(&thermal
->threshold
);
331 *thres1_reached
= temp_threshold
.thshold2_status
;
336 static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw
*fme
,
339 struct feature_fme_thermal
*thermal
;
340 struct feature_fme_tmp_threshold temp_threshold
;
342 thermal
= get_fme_feature_ioaddr_by_index(fme
,
343 FME_FEATURE_ID_THERMAL_MGMT
);
345 temp_threshold
.csr
= readq(&thermal
->threshold
);
346 *thres1_policy
= temp_threshold
.thshold_policy
;
351 static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw
*fme
,
354 struct feature_fme_thermal
*thermal
;
355 struct feature_fme_tmp_threshold tmp_threshold
;
357 thermal
= get_fme_feature_ioaddr_by_index(fme
,
358 FME_FEATURE_ID_THERMAL_MGMT
);
360 spinlock_lock(&fme
->lock
);
361 tmp_threshold
.csr
= readq(&thermal
->threshold
);
363 if (thres1_policy
== 0) {
364 tmp_threshold
.thshold_policy
= 0;
365 } else if (thres1_policy
== 1) {
366 tmp_threshold
.thshold_policy
= 1;
368 spinlock_unlock(&fme
->lock
);
372 writeq(tmp_threshold
.csr
, &thermal
->threshold
);
373 spinlock_unlock(&fme
->lock
);
378 static int fme_thermal_get_temperature(struct ifpga_fme_hw
*fme
, u64
*temp
)
380 struct feature_fme_thermal
*thermal
;
381 struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1
;
383 thermal
= get_fme_feature_ioaddr_by_index(fme
,
384 FME_FEATURE_ID_THERMAL_MGMT
);
386 temp_rdsensor_fmt1
.csr
= readq(&thermal
->rdsensor_fm1
);
387 *temp
= temp_rdsensor_fmt1
.fpga_temp
;
392 static int fme_thermal_get_revision(struct ifpga_fme_hw
*fme
, u64
*revision
)
394 struct feature_fme_thermal
*fme_thermal
395 = get_fme_feature_ioaddr_by_index(fme
,
396 FME_FEATURE_ID_THERMAL_MGMT
);
397 struct feature_header header
;
399 header
.csr
= readq(&fme_thermal
->header
);
400 *revision
= header
.revision
;
405 #define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
407 static int fme_thermal_mgmt_init(struct ifpga_feature
*feature
)
409 struct feature_fme_thermal
*fme_thermal
;
410 struct feature_fme_tmp_threshold_cap thermal_cap
;
414 dev_info(NULL
, "FME thermal mgmt Init.\n");
416 fme_thermal
= (struct feature_fme_thermal
*)feature
->addr
;
417 thermal_cap
.csr
= readq(&fme_thermal
->threshold_cap
);
419 dev_info(NULL
, "FME thermal cap %llx.\n",
420 (unsigned long long)fme_thermal
->threshold_cap
.csr
);
422 if (thermal_cap
.tmp_thshold_disabled
)
423 feature
->cap
|= FME_THERMAL_CAP_NO_TMP_THRESHOLD
;
428 static void fme_thermal_mgmt_uinit(struct ifpga_feature
*feature
)
432 dev_info(NULL
, "FME thermal mgmt UInit.\n");
436 fme_thermal_set_prop(struct ifpga_feature
*feature
, struct feature_prop
*prop
)
438 struct ifpga_fme_hw
*fme
= feature
->parent
;
440 if (feature
->cap
& FME_THERMAL_CAP_NO_TMP_THRESHOLD
)
443 switch (prop
->prop_id
) {
444 case FME_THERMAL_PROP_THRESHOLD1
:
445 return fme_thermal_set_threshold1(fme
, prop
->data
);
446 case FME_THERMAL_PROP_THRESHOLD2
:
447 return fme_thermal_set_threshold2(fme
, prop
->data
);
448 case FME_THERMAL_PROP_THRESHOLD1_POLICY
:
449 return fme_thermal_set_threshold1_policy(fme
, prop
->data
);
456 fme_thermal_get_prop(struct ifpga_feature
*feature
, struct feature_prop
*prop
)
458 struct ifpga_fme_hw
*fme
= feature
->parent
;
460 if (feature
->cap
& FME_THERMAL_CAP_NO_TMP_THRESHOLD
&&
461 prop
->prop_id
!= FME_THERMAL_PROP_TEMPERATURE
&&
462 prop
->prop_id
!= FME_THERMAL_PROP_REVISION
)
465 switch (prop
->prop_id
) {
466 case FME_THERMAL_PROP_THRESHOLD1
:
467 return fme_thermal_get_threshold1(fme
, &prop
->data
);
468 case FME_THERMAL_PROP_THRESHOLD2
:
469 return fme_thermal_get_threshold2(fme
, &prop
->data
);
470 case FME_THERMAL_PROP_THRESHOLD_TRIP
:
471 return fme_thermal_get_threshold_trip(fme
, &prop
->data
);
472 case FME_THERMAL_PROP_THRESHOLD1_REACHED
:
473 return fme_thermal_get_threshold1_reached(fme
, &prop
->data
);
474 case FME_THERMAL_PROP_THRESHOLD2_REACHED
:
475 return fme_thermal_get_threshold2_reached(fme
, &prop
->data
);
476 case FME_THERMAL_PROP_THRESHOLD1_POLICY
:
477 return fme_thermal_get_threshold1_policy(fme
, &prop
->data
);
478 case FME_THERMAL_PROP_TEMPERATURE
:
479 return fme_thermal_get_temperature(fme
, &prop
->data
);
480 case FME_THERMAL_PROP_REVISION
:
481 return fme_thermal_get_revision(fme
, &prop
->data
);
487 struct ifpga_feature_ops fme_thermal_mgmt_ops
= {
488 .init
= fme_thermal_mgmt_init
,
489 .uinit
= fme_thermal_mgmt_uinit
,
490 .get_prop
= fme_thermal_get_prop
,
491 .set_prop
= fme_thermal_set_prop
,
494 static int fme_pwr_get_consumed(struct ifpga_fme_hw
*fme
, u64
*consumed
)
496 struct feature_fme_power
*fme_power
497 = get_fme_feature_ioaddr_by_index(fme
,
498 FME_FEATURE_ID_POWER_MGMT
);
499 struct feature_fme_pm_status pm_status
;
501 pm_status
.csr
= readq(&fme_power
->status
);
503 *consumed
= pm_status
.pwr_consumed
;
508 static int fme_pwr_get_threshold1(struct ifpga_fme_hw
*fme
, u64
*threshold
)
510 struct feature_fme_power
*fme_power
511 = get_fme_feature_ioaddr_by_index(fme
,
512 FME_FEATURE_ID_POWER_MGMT
);
513 struct feature_fme_pm_ap_threshold pm_ap_threshold
;
515 pm_ap_threshold
.csr
= readq(&fme_power
->threshold
);
517 *threshold
= pm_ap_threshold
.threshold1
;
522 static int fme_pwr_set_threshold1(struct ifpga_fme_hw
*fme
, u64 threshold
)
524 struct feature_fme_power
*fme_power
525 = get_fme_feature_ioaddr_by_index(fme
,
526 FME_FEATURE_ID_POWER_MGMT
);
527 struct feature_fme_pm_ap_threshold pm_ap_threshold
;
529 spinlock_lock(&fme
->lock
);
530 pm_ap_threshold
.csr
= readq(&fme_power
->threshold
);
532 if (threshold
<= PWR_THRESHOLD_MAX
) {
533 pm_ap_threshold
.threshold1
= threshold
;
535 spinlock_unlock(&fme
->lock
);
539 writeq(pm_ap_threshold
.csr
, &fme_power
->threshold
);
540 spinlock_unlock(&fme
->lock
);
545 static int fme_pwr_get_threshold2(struct ifpga_fme_hw
*fme
, u64
*threshold
)
547 struct feature_fme_power
*fme_power
548 = get_fme_feature_ioaddr_by_index(fme
,
549 FME_FEATURE_ID_POWER_MGMT
);
550 struct feature_fme_pm_ap_threshold pm_ap_threshold
;
552 pm_ap_threshold
.csr
= readq(&fme_power
->threshold
);
554 *threshold
= pm_ap_threshold
.threshold2
;
559 static int fme_pwr_set_threshold2(struct ifpga_fme_hw
*fme
, u64 threshold
)
561 struct feature_fme_power
*fme_power
562 = get_fme_feature_ioaddr_by_index(fme
,
563 FME_FEATURE_ID_POWER_MGMT
);
564 struct feature_fme_pm_ap_threshold pm_ap_threshold
;
566 spinlock_lock(&fme
->lock
);
567 pm_ap_threshold
.csr
= readq(&fme_power
->threshold
);
569 if (threshold
<= PWR_THRESHOLD_MAX
) {
570 pm_ap_threshold
.threshold2
= threshold
;
572 spinlock_unlock(&fme
->lock
);
576 writeq(pm_ap_threshold
.csr
, &fme_power
->threshold
);
577 spinlock_unlock(&fme
->lock
);
582 static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw
*fme
,
583 u64
*threshold_status
)
585 struct feature_fme_power
*fme_power
586 = get_fme_feature_ioaddr_by_index(fme
,
587 FME_FEATURE_ID_POWER_MGMT
);
588 struct feature_fme_pm_ap_threshold pm_ap_threshold
;
590 pm_ap_threshold
.csr
= readq(&fme_power
->threshold
);
592 *threshold_status
= pm_ap_threshold
.threshold1_status
;
597 static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw
*fme
,
598 u64
*threshold_status
)
600 struct feature_fme_power
*fme_power
601 = get_fme_feature_ioaddr_by_index(fme
,
602 FME_FEATURE_ID_POWER_MGMT
);
603 struct feature_fme_pm_ap_threshold pm_ap_threshold
;
605 pm_ap_threshold
.csr
= readq(&fme_power
->threshold
);
607 *threshold_status
= pm_ap_threshold
.threshold2_status
;
612 static int fme_pwr_get_rtl(struct ifpga_fme_hw
*fme
, u64
*rtl
)
614 struct feature_fme_power
*fme_power
615 = get_fme_feature_ioaddr_by_index(fme
,
616 FME_FEATURE_ID_POWER_MGMT
);
617 struct feature_fme_pm_status pm_status
;
619 pm_status
.csr
= readq(&fme_power
->status
);
621 *rtl
= pm_status
.fpga_latency_report
;
626 static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw
*fme
, u64
*limit
)
628 struct feature_fme_power
*fme_power
629 = get_fme_feature_ioaddr_by_index(fme
,
630 FME_FEATURE_ID_POWER_MGMT
);
631 struct feature_fme_pm_xeon_limit xeon_limit
;
633 xeon_limit
.csr
= readq(&fme_power
->xeon_limit
);
635 if (!xeon_limit
.enable
)
636 xeon_limit
.pwr_limit
= 0;
638 *limit
= xeon_limit
.pwr_limit
;
643 static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw
*fme
, u64
*limit
)
645 struct feature_fme_power
*fme_power
646 = get_fme_feature_ioaddr_by_index(fme
,
647 FME_FEATURE_ID_POWER_MGMT
);
648 struct feature_fme_pm_fpga_limit fpga_limit
;
650 fpga_limit
.csr
= readq(&fme_power
->fpga_limit
);
652 if (!fpga_limit
.enable
)
653 fpga_limit
.pwr_limit
= 0;
655 *limit
= fpga_limit
.pwr_limit
;
660 static int fme_pwr_get_revision(struct ifpga_fme_hw
*fme
, u64
*revision
)
662 struct feature_fme_power
*fme_power
663 = get_fme_feature_ioaddr_by_index(fme
,
664 FME_FEATURE_ID_POWER_MGMT
);
665 struct feature_header header
;
667 header
.csr
= readq(&fme_power
->header
);
668 *revision
= header
.revision
;
673 static int fme_power_mgmt_init(struct ifpga_feature
*feature
)
677 dev_info(NULL
, "FME power mgmt Init.\n");
682 static void fme_power_mgmt_uinit(struct ifpga_feature
*feature
)
686 dev_info(NULL
, "FME power mgmt UInit.\n");
689 static int fme_power_mgmt_get_prop(struct ifpga_feature
*feature
,
690 struct feature_prop
*prop
)
692 struct ifpga_fme_hw
*fme
= feature
->parent
;
694 switch (prop
->prop_id
) {
695 case FME_PWR_PROP_CONSUMED
:
696 return fme_pwr_get_consumed(fme
, &prop
->data
);
697 case FME_PWR_PROP_THRESHOLD1
:
698 return fme_pwr_get_threshold1(fme
, &prop
->data
);
699 case FME_PWR_PROP_THRESHOLD2
:
700 return fme_pwr_get_threshold2(fme
, &prop
->data
);
701 case FME_PWR_PROP_THRESHOLD1_STATUS
:
702 return fme_pwr_get_threshold1_status(fme
, &prop
->data
);
703 case FME_PWR_PROP_THRESHOLD2_STATUS
:
704 return fme_pwr_get_threshold2_status(fme
, &prop
->data
);
705 case FME_PWR_PROP_RTL
:
706 return fme_pwr_get_rtl(fme
, &prop
->data
);
707 case FME_PWR_PROP_XEON_LIMIT
:
708 return fme_pwr_get_xeon_limit(fme
, &prop
->data
);
709 case FME_PWR_PROP_FPGA_LIMIT
:
710 return fme_pwr_get_fpga_limit(fme
, &prop
->data
);
711 case FME_PWR_PROP_REVISION
:
712 return fme_pwr_get_revision(fme
, &prop
->data
);
718 static int fme_power_mgmt_set_prop(struct ifpga_feature
*feature
,
719 struct feature_prop
*prop
)
721 struct ifpga_fme_hw
*fme
= feature
->parent
;
723 switch (prop
->prop_id
) {
724 case FME_PWR_PROP_THRESHOLD1
:
725 return fme_pwr_set_threshold1(fme
, prop
->data
);
726 case FME_PWR_PROP_THRESHOLD2
:
727 return fme_pwr_set_threshold2(fme
, prop
->data
);
733 struct ifpga_feature_ops fme_power_mgmt_ops
= {
734 .init
= fme_power_mgmt_init
,
735 .uinit
= fme_power_mgmt_uinit
,
736 .get_prop
= fme_power_mgmt_get_prop
,
737 .set_prop
= fme_power_mgmt_set_prop
,
740 static int fme_hssi_eth_init(struct ifpga_feature
*feature
)
746 static void fme_hssi_eth_uinit(struct ifpga_feature
*feature
)
751 struct ifpga_feature_ops fme_hssi_eth_ops
= {
752 .init
= fme_hssi_eth_init
,
753 .uinit
= fme_hssi_eth_uinit
,
756 static int fme_emif_init(struct ifpga_feature
*feature
)
762 static void fme_emif_uinit(struct ifpga_feature
*feature
)
767 struct ifpga_feature_ops fme_emif_ops
= {
768 .init
= fme_emif_init
,
769 .uinit
= fme_emif_uinit
,
772 static const char *board_type_to_string(u32 type
)
782 return "VC_4x25G+2x25G";
790 static int board_type_to_info(u32 type
,
791 struct ifpga_fme_board_info
*info
)
795 info
->nums_of_retimer
= 2;
796 info
->ports_per_retimer
= 4;
797 info
->nums_of_fvl
= 2;
798 info
->ports_per_fvl
= 4;
801 info
->nums_of_retimer
= 1;
802 info
->ports_per_retimer
= 4;
803 info
->nums_of_fvl
= 2;
804 info
->ports_per_fvl
= 2;
807 info
->nums_of_retimer
= 2;
808 info
->ports_per_retimer
= 1;
809 info
->nums_of_fvl
= 1;
810 info
->ports_per_fvl
= 2;
813 info
->nums_of_retimer
= 2;
814 info
->ports_per_retimer
= 2;
815 info
->nums_of_fvl
= 2;
816 info
->ports_per_fvl
= 2;
825 static int fme_get_board_interface(struct ifpga_fme_hw
*fme
)
827 struct fme_bitstream_id id
;
829 if (fme_hdr_get_bitstream_id(fme
, &id
.id
))
832 fme
->board_info
.type
= id
.interface
;
833 fme
->board_info
.build_hash
= id
.hash
;
834 fme
->board_info
.debug_version
= id
.debug
;
835 fme
->board_info
.major_version
= id
.major
;
836 fme
->board_info
.minor_version
= id
.minor
;
838 dev_info(fme
, "board type: %s major_version:%u minor_version:%u build_hash:%u\n",
839 board_type_to_string(fme
->board_info
.type
),
840 fme
->board_info
.major_version
,
841 fme
->board_info
.minor_version
,
842 fme
->board_info
.build_hash
);
844 if (board_type_to_info(fme
->board_info
.type
, &fme
->board_info
))
847 dev_info(fme
, "get board info: nums_retimers %d ports_per_retimer %d nums_fvl %d ports_per_fvl %d\n",
848 fme
->board_info
.nums_of_retimer
,
849 fme
->board_info
.ports_per_retimer
,
850 fme
->board_info
.nums_of_fvl
,
851 fme
->board_info
.ports_per_fvl
);
856 static int spi_self_checking(void)
861 ret
= max10_reg_read(0x30043c, &val
);
865 if (val
!= 0x87654321) {
866 dev_err(NULL
, "Read MAX10 test register fail: 0x%x\n", val
);
870 dev_info(NULL
, "Read MAX10 test register success, SPI self-test done\n");
875 static int fme_spi_init(struct ifpga_feature
*feature
)
877 struct ifpga_fme_hw
*fme
= (struct ifpga_fme_hw
*)feature
->parent
;
878 struct altera_spi_device
*spi_master
;
879 struct intel_max10_device
*max10
;
882 dev_info(fme
, "FME SPI Master (Max10) Init.\n");
883 dev_debug(fme
, "FME SPI base addr %p.\n",
885 dev_debug(fme
, "spi param=0x%llx\n",
886 (unsigned long long)opae_readq(feature
->addr
+ 0x8));
888 spi_master
= altera_spi_alloc(feature
->addr
, TYPE_SPI
);
892 altera_spi_init(spi_master
);
894 max10
= intel_max10_device_probe(spi_master
, 0);
897 dev_err(fme
, "max10 init fail\n");
901 fme
->max10_dev
= max10
;
904 if (spi_self_checking()) {
912 intel_max10_device_remove(fme
->max10_dev
);
914 altera_spi_release(spi_master
);
918 static void fme_spi_uinit(struct ifpga_feature
*feature
)
920 struct ifpga_fme_hw
*fme
= (struct ifpga_fme_hw
*)feature
->parent
;
923 intel_max10_device_remove(fme
->max10_dev
);
926 struct ifpga_feature_ops fme_spi_master_ops
= {
927 .init
= fme_spi_init
,
928 .uinit
= fme_spi_uinit
,
931 static int nios_spi_wait_init_done(struct altera_spi_device
*dev
)
934 unsigned long timeout
= msecs_to_timer_cycles(10000);
938 if (spi_reg_read(dev
, NIOS_SPI_INIT_DONE
, &val
))
943 ticks
= rte_get_timer_cycles();
944 if (time_after(ticks
, timeout
))
952 static int nios_spi_check_error(struct altera_spi_device
*dev
)
956 if (spi_reg_read(dev
, NIOS_SPI_INIT_STS0
, &value
))
959 dev_debug(dev
, "SPI init status0 0x%x\n", value
);
961 /* Error code: 0xFFF0 to 0xFFFC */
962 if (value
>= 0xFFF0 && value
<= 0xFFFC)
966 if (spi_reg_read(dev
, NIOS_SPI_INIT_STS1
, &value
))
969 dev_debug(dev
, "SPI init status1 0x%x\n", value
);
971 /* Error code: 0xFFF0 to 0xFFFC */
972 if (value
>= 0xFFF0 && value
<= 0xFFFC)
978 static int fme_nios_spi_init(struct ifpga_feature
*feature
)
980 struct ifpga_fme_hw
*fme
= (struct ifpga_fme_hw
*)feature
->parent
;
981 struct altera_spi_device
*spi_master
;
982 struct intel_max10_device
*max10
;
985 dev_info(fme
, "FME SPI Master (NIOS) Init.\n");
986 dev_debug(fme
, "FME SPI base addr %p.\n",
988 dev_debug(fme
, "spi param=0x%llx\n",
989 (unsigned long long)opae_readq(feature
->addr
+ 0x8));
991 spi_master
= altera_spi_alloc(feature
->addr
, TYPE_NIOS_SPI
);
996 * 1. wait A10 NIOS initial finished and
997 * release the SPI master to Host
999 ret
= nios_spi_wait_init_done(spi_master
);
1001 dev_err(fme
, "FME NIOS_SPI init fail\n");
1005 dev_info(fme
, "FME NIOS_SPI initial done\n");
1007 /* 2. check if error occur? */
1008 if (nios_spi_check_error(spi_master
))
1009 dev_info(fme
, "NIOS_SPI INIT done, but found some error\n");
1011 /* 3. init the spi master*/
1012 altera_spi_init(spi_master
);
1014 /* init the max10 device */
1015 max10
= intel_max10_device_probe(spi_master
, 0);
1018 dev_err(fme
, "max10 init fail\n");
1022 fme_get_board_interface(fme
);
1024 fme
->max10_dev
= max10
;
1027 if (spi_self_checking())
1033 intel_max10_device_remove(fme
->max10_dev
);
1035 altera_spi_release(spi_master
);
1039 static void fme_nios_spi_uinit(struct ifpga_feature
*feature
)
1041 struct ifpga_fme_hw
*fme
= (struct ifpga_fme_hw
*)feature
->parent
;
1044 intel_max10_device_remove(fme
->max10_dev
);
1047 struct ifpga_feature_ops fme_nios_spi_master_ops
= {
1048 .init
= fme_nios_spi_init
,
1049 .uinit
= fme_nios_spi_uinit
,
1052 static int i2c_mac_rom_test(struct altera_i2c_dev
*dev
)
1056 char read_buf
[20] = {0,};
1057 const char *string
= "1a2b3c4d5e";
1059 opae_memcpy(buf
, string
, strlen(string
));
1061 ret
= at24_eeprom_write(dev
, AT24512_SLAVE_ADDR
, 0,
1062 (u8
*)buf
, strlen(string
));
1064 dev_err(NULL
, "write i2c error:%d\n", ret
);
1068 ret
= at24_eeprom_read(dev
, AT24512_SLAVE_ADDR
, 0,
1069 (u8
*)read_buf
, strlen(string
));
1071 dev_err(NULL
, "read i2c error:%d\n", ret
);
1075 if (memcmp(buf
, read_buf
, strlen(string
))) {
1076 dev_err(NULL
, "%s test fail!\n", __func__
);
1080 dev_info(NULL
, "%s test successful\n", __func__
);
1085 static int fme_i2c_init(struct ifpga_feature
*feature
)
1087 struct feature_fme_i2c
*i2c
;
1088 struct ifpga_fme_hw
*fme
= (struct ifpga_fme_hw
*)feature
->parent
;
1090 i2c
= (struct feature_fme_i2c
*)feature
->addr
;
1092 dev_info(NULL
, "FME I2C Master Init.\n");
1094 fme
->i2c_master
= altera_i2c_probe(i2c
);
1095 if (!fme
->i2c_master
)
1098 /* MAC ROM self test */
1099 i2c_mac_rom_test(fme
->i2c_master
);
1104 static void fme_i2c_uninit(struct ifpga_feature
*feature
)
1106 struct ifpga_fme_hw
*fme
= (struct ifpga_fme_hw
*)feature
->parent
;
1108 altera_i2c_remove(fme
->i2c_master
);
1111 struct ifpga_feature_ops fme_i2c_master_ops
= {
1112 .init
= fme_i2c_init
,
1113 .uinit
= fme_i2c_uninit
,
1116 static int fme_eth_group_init(struct ifpga_feature
*feature
)
1118 struct ifpga_fme_hw
*fme
= (struct ifpga_fme_hw
*)feature
->parent
;
1119 struct eth_group_device
*dev
;
1121 dev
= (struct eth_group_device
*)eth_group_probe(feature
->addr
);
1125 fme
->eth_dev
[dev
->group_id
] = dev
;
1127 fme
->eth_group_region
[dev
->group_id
].addr
=
1129 fme
->eth_group_region
[dev
->group_id
].phys_addr
=
1131 fme
->eth_group_region
[dev
->group_id
].len
=
1134 fme
->nums_eth_dev
++;
1136 dev_info(NULL
, "FME PHY Group %d Init.\n", dev
->group_id
);
1137 dev_info(NULL
, "found %d eth group, addr %p phys_addr 0x%llx len %u\n",
1138 dev
->group_id
, feature
->addr
,
1139 (unsigned long long)feature
->phys_addr
,
1145 static void fme_eth_group_uinit(struct ifpga_feature
*feature
)
1150 struct ifpga_feature_ops fme_eth_group_ops
= {
1151 .init
= fme_eth_group_init
,
1152 .uinit
= fme_eth_group_uinit
,
1155 int fme_mgr_read_mac_rom(struct ifpga_fme_hw
*fme
, int offset
,
1156 void *buf
, int size
)
1158 struct altera_i2c_dev
*dev
;
1160 dev
= fme
->i2c_master
;
1164 return at24_eeprom_read(dev
, AT24512_SLAVE_ADDR
, offset
, buf
, size
);
1167 int fme_mgr_write_mac_rom(struct ifpga_fme_hw
*fme
, int offset
,
1168 void *buf
, int size
)
1170 struct altera_i2c_dev
*dev
;
1172 dev
= fme
->i2c_master
;
1176 return at24_eeprom_write(dev
, AT24512_SLAVE_ADDR
, offset
, buf
, size
);
1179 static struct eth_group_device
*get_eth_group_dev(struct ifpga_fme_hw
*fme
,
1182 struct eth_group_device
*dev
;
1184 if (group_id
> (MAX_ETH_GROUP_DEVICES
- 1))
1187 dev
= (struct eth_group_device
*)fme
->eth_dev
[group_id
];
1191 if (dev
->status
!= ETH_GROUP_DEV_ATTACHED
)
1197 int fme_mgr_get_eth_group_nums(struct ifpga_fme_hw
*fme
)
1199 return fme
->nums_eth_dev
;
1202 int fme_mgr_get_eth_group_info(struct ifpga_fme_hw
*fme
,
1203 u8 group_id
, struct opae_eth_group_info
*info
)
1205 struct eth_group_device
*dev
;
1207 dev
= get_eth_group_dev(fme
, group_id
);
1211 info
->group_id
= group_id
;
1212 info
->speed
= dev
->speed
;
1213 info
->nums_of_mac
= dev
->mac_num
;
1214 info
->nums_of_phy
= dev
->phy_num
;
1219 int fme_mgr_eth_group_read_reg(struct ifpga_fme_hw
*fme
, u8 group_id
,
1220 u8 type
, u8 index
, u16 addr
, u32
*data
)
1222 struct eth_group_device
*dev
;
1224 dev
= get_eth_group_dev(fme
, group_id
);
1228 return eth_group_read_reg(dev
, type
, index
, addr
, data
);
1231 int fme_mgr_eth_group_write_reg(struct ifpga_fme_hw
*fme
, u8 group_id
,
1232 u8 type
, u8 index
, u16 addr
, u32 data
)
1234 struct eth_group_device
*dev
;
1236 dev
= get_eth_group_dev(fme
, group_id
);
1240 return eth_group_write_reg(dev
, type
, index
, addr
, data
);
1243 static int fme_get_eth_group_speed(struct ifpga_fme_hw
*fme
,
1246 struct eth_group_device
*dev
;
1248 dev
= get_eth_group_dev(fme
, group_id
);
1255 int fme_mgr_get_retimer_info(struct ifpga_fme_hw
*fme
,
1256 struct opae_retimer_info
*info
)
1258 struct intel_max10_device
*dev
;
1260 dev
= (struct intel_max10_device
*)fme
->max10_dev
;
1264 info
->nums_retimer
= fme
->board_info
.nums_of_retimer
;
1265 info
->ports_per_retimer
= fme
->board_info
.ports_per_retimer
;
1266 info
->nums_fvl
= fme
->board_info
.nums_of_fvl
;
1267 info
->ports_per_fvl
= fme
->board_info
.ports_per_fvl
;
1269 /* The speed of PKVL is identical the eth group's speed */
1270 info
->support_speed
= fme_get_eth_group_speed(fme
,
1271 LINE_SIDE_GROUP_ID
);
1276 int fme_mgr_get_retimer_status(struct ifpga_fme_hw
*fme
,
1277 struct opae_retimer_status
*status
)
1279 struct intel_max10_device
*dev
;
1282 dev
= (struct intel_max10_device
*)fme
->max10_dev
;
1286 if (max10_reg_read(PKVL_LINK_STATUS
, &val
)) {
1287 dev_err(dev
, "%s: read pkvl status fail\n", __func__
);
1291 /* The speed of PKVL is identical the eth group's speed */
1292 status
->speed
= fme_get_eth_group_speed(fme
,
1293 LINE_SIDE_GROUP_ID
);
1295 status
->line_link_bitmap
= val
;
1297 dev_debug(dev
, "get retimer status: speed:%d. line_link_bitmap:0x%x\n",
1299 status
->line_link_bitmap
);