]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / drivers / raw / ifpga_rawdev / base / ifpga_fme.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
3 */
4
5 #include "ifpga_feature_dev.h"
6
7 #define PWR_THRESHOLD_MAX 0x7F
8
9 int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
10 {
11 struct feature *feature;
12
13 if (!fme)
14 return -ENOENT;
15
16 feature = get_fme_feature_by_id(fme, prop->feature_id);
17
18 if (feature && feature->ops && feature->ops->get_prop)
19 return feature->ops->get_prop(feature, prop);
20
21 return -ENOENT;
22 }
23
24 int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
25 {
26 struct feature *feature;
27
28 if (!fme)
29 return -ENOENT;
30
31 feature = get_fme_feature_by_id(fme, prop->feature_id);
32
33 if (feature && feature->ops && feature->ops->set_prop)
34 return feature->ops->set_prop(feature, prop);
35
36 return -ENOENT;
37 }
38
39 int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
40 {
41 struct feature *feature;
42
43 if (!fme)
44 return -ENOENT;
45
46 feature = get_fme_feature_by_id(fme, feature_id);
47
48 if (feature && feature->ops && feature->ops->set_irq)
49 return feature->ops->set_irq(feature, irq_set);
50
51 return -ENOENT;
52 }
53
54 /* fme private feature head */
55 static int fme_hdr_init(struct feature *feature)
56 {
57 struct feature_fme_header *fme_hdr;
58
59 fme_hdr = (struct feature_fme_header *)feature->addr;
60
61 dev_info(NULL, "FME HDR Init.\n");
62 dev_info(NULL, "FME cap %llx.\n",
63 (unsigned long long)fme_hdr->capability.csr);
64
65 return 0;
66 }
67
68 static void fme_hdr_uinit(struct feature *feature)
69 {
70 UNUSED(feature);
71
72 dev_info(NULL, "FME HDR UInit.\n");
73 }
74
75 static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
76 {
77 struct feature_fme_header *fme_hdr
78 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
79 struct feature_header header;
80
81 header.csr = readq(&fme_hdr->header);
82 *revision = header.revision;
83
84 return 0;
85 }
86
87 static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
88 {
89 struct feature_fme_header *fme_hdr
90 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
91 struct feature_fme_capability fme_capability;
92
93 fme_capability.csr = readq(&fme_hdr->capability);
94 *ports_num = fme_capability.num_ports;
95
96 return 0;
97 }
98
99 static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
100 {
101 struct feature_fme_header *fme_hdr
102 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
103 struct feature_fme_capability fme_capability;
104
105 fme_capability.csr = readq(&fme_hdr->capability);
106 *cache_size = fme_capability.cache_size;
107
108 return 0;
109 }
110
111 static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
112 {
113 struct feature_fme_header *fme_hdr
114 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
115 struct feature_fme_capability fme_capability;
116
117 fme_capability.csr = readq(&fme_hdr->capability);
118 *version = fme_capability.fabric_verid;
119
120 return 0;
121 }
122
123 static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
124 {
125 struct feature_fme_header *fme_hdr
126 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
127 struct feature_fme_capability fme_capability;
128
129 fme_capability.csr = readq(&fme_hdr->capability);
130 *socket_id = fme_capability.socket_id;
131
132 return 0;
133 }
134
135 static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
136 u64 *bitstream_id)
137 {
138 struct feature_fme_header *fme_hdr
139 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
140
141 *bitstream_id = readq(&fme_hdr->bitstream_id);
142
143 return 0;
144 }
145
146 static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
147 u64 *bitstream_metadata)
148 {
149 struct feature_fme_header *fme_hdr
150 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
151
152 *bitstream_metadata = readq(&fme_hdr->bitstream_md);
153
154 return 0;
155 }
156
157 static int
158 fme_hdr_get_prop(struct feature *feature, struct feature_prop *prop)
159 {
160 struct ifpga_fme_hw *fme = feature->parent;
161
162 switch (prop->prop_id) {
163 case FME_HDR_PROP_REVISION:
164 return fme_hdr_get_revision(fme, &prop->data);
165 case FME_HDR_PROP_PORTS_NUM:
166 return fme_hdr_get_ports_num(fme, &prop->data);
167 case FME_HDR_PROP_CACHE_SIZE:
168 return fme_hdr_get_cache_size(fme, &prop->data);
169 case FME_HDR_PROP_VERSION:
170 return fme_hdr_get_version(fme, &prop->data);
171 case FME_HDR_PROP_SOCKET_ID:
172 return fme_hdr_get_socket_id(fme, &prop->data);
173 case FME_HDR_PROP_BITSTREAM_ID:
174 return fme_hdr_get_bitstream_id(fme, &prop->data);
175 case FME_HDR_PROP_BITSTREAM_METADATA:
176 return fme_hdr_get_bitstream_metadata(fme, &prop->data);
177 }
178
179 return -ENOENT;
180 }
181
182 struct feature_ops fme_hdr_ops = {
183 .init = fme_hdr_init,
184 .uinit = fme_hdr_uinit,
185 .get_prop = fme_hdr_get_prop,
186 };
187
188 /* thermal management */
189 static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
190 {
191 struct feature_fme_thermal *thermal;
192 struct feature_fme_tmp_threshold temp_threshold;
193
194 thermal = get_fme_feature_ioaddr_by_index(fme,
195 FME_FEATURE_ID_THERMAL_MGMT);
196
197 temp_threshold.csr = readq(&thermal->threshold);
198 *thres1 = temp_threshold.tmp_thshold1;
199
200 return 0;
201 }
202
203 static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
204 {
205 struct feature_fme_thermal *thermal;
206 struct feature_fme_header *fme_hdr;
207 struct feature_fme_tmp_threshold tmp_threshold;
208 struct feature_fme_capability fme_capability;
209
210 thermal = get_fme_feature_ioaddr_by_index(fme,
211 FME_FEATURE_ID_THERMAL_MGMT);
212 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
213
214 spinlock_lock(&fme->lock);
215 tmp_threshold.csr = readq(&thermal->threshold);
216 fme_capability.csr = readq(&fme_hdr->capability);
217
218 if (fme_capability.lock_bit == 1) {
219 spinlock_unlock(&fme->lock);
220 return -EBUSY;
221 } else if (thres1 > 100) {
222 spinlock_unlock(&fme->lock);
223 return -EINVAL;
224 } else if (thres1 == 0) {
225 tmp_threshold.tmp_thshold1_enable = 0;
226 tmp_threshold.tmp_thshold1 = thres1;
227 } else {
228 tmp_threshold.tmp_thshold1_enable = 1;
229 tmp_threshold.tmp_thshold1 = thres1;
230 }
231
232 writeq(tmp_threshold.csr, &thermal->threshold);
233 spinlock_unlock(&fme->lock);
234
235 return 0;
236 }
237
238 static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
239 {
240 struct feature_fme_thermal *thermal;
241 struct feature_fme_tmp_threshold temp_threshold;
242
243 thermal = get_fme_feature_ioaddr_by_index(fme,
244 FME_FEATURE_ID_THERMAL_MGMT);
245
246 temp_threshold.csr = readq(&thermal->threshold);
247 *thres2 = temp_threshold.tmp_thshold2;
248
249 return 0;
250 }
251
252 static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
253 {
254 struct feature_fme_thermal *thermal;
255 struct feature_fme_header *fme_hdr;
256 struct feature_fme_tmp_threshold tmp_threshold;
257 struct feature_fme_capability fme_capability;
258
259 thermal = get_fme_feature_ioaddr_by_index(fme,
260 FME_FEATURE_ID_THERMAL_MGMT);
261 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
262
263 spinlock_lock(&fme->lock);
264 tmp_threshold.csr = readq(&thermal->threshold);
265 fme_capability.csr = readq(&fme_hdr->capability);
266
267 if (fme_capability.lock_bit == 1) {
268 spinlock_unlock(&fme->lock);
269 return -EBUSY;
270 } else if (thres2 > 100) {
271 spinlock_unlock(&fme->lock);
272 return -EINVAL;
273 } else if (thres2 == 0) {
274 tmp_threshold.tmp_thshold2_enable = 0;
275 tmp_threshold.tmp_thshold2 = thres2;
276 } else {
277 tmp_threshold.tmp_thshold2_enable = 1;
278 tmp_threshold.tmp_thshold2 = thres2;
279 }
280
281 writeq(tmp_threshold.csr, &thermal->threshold);
282 spinlock_unlock(&fme->lock);
283
284 return 0;
285 }
286
287 static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
288 u64 *thres_trip)
289 {
290 struct feature_fme_thermal *thermal;
291 struct feature_fme_tmp_threshold temp_threshold;
292
293 thermal = get_fme_feature_ioaddr_by_index(fme,
294 FME_FEATURE_ID_THERMAL_MGMT);
295
296 temp_threshold.csr = readq(&thermal->threshold);
297 *thres_trip = temp_threshold.therm_trip_thshold;
298
299 return 0;
300 }
301
302 static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
303 u64 *thres1_reached)
304 {
305 struct feature_fme_thermal *thermal;
306 struct feature_fme_tmp_threshold temp_threshold;
307
308 thermal = get_fme_feature_ioaddr_by_index(fme,
309 FME_FEATURE_ID_THERMAL_MGMT);
310
311 temp_threshold.csr = readq(&thermal->threshold);
312 *thres1_reached = temp_threshold.thshold1_status;
313
314 return 0;
315 }
316
317 static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
318 u64 *thres1_reached)
319 {
320 struct feature_fme_thermal *thermal;
321 struct feature_fme_tmp_threshold temp_threshold;
322
323 thermal = get_fme_feature_ioaddr_by_index(fme,
324 FME_FEATURE_ID_THERMAL_MGMT);
325
326 temp_threshold.csr = readq(&thermal->threshold);
327 *thres1_reached = temp_threshold.thshold2_status;
328
329 return 0;
330 }
331
332 static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
333 u64 *thres1_policy)
334 {
335 struct feature_fme_thermal *thermal;
336 struct feature_fme_tmp_threshold temp_threshold;
337
338 thermal = get_fme_feature_ioaddr_by_index(fme,
339 FME_FEATURE_ID_THERMAL_MGMT);
340
341 temp_threshold.csr = readq(&thermal->threshold);
342 *thres1_policy = temp_threshold.thshold_policy;
343
344 return 0;
345 }
346
347 static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
348 u64 thres1_policy)
349 {
350 struct feature_fme_thermal *thermal;
351 struct feature_fme_tmp_threshold tmp_threshold;
352
353 thermal = get_fme_feature_ioaddr_by_index(fme,
354 FME_FEATURE_ID_THERMAL_MGMT);
355
356 spinlock_lock(&fme->lock);
357 tmp_threshold.csr = readq(&thermal->threshold);
358
359 if (thres1_policy == 0) {
360 tmp_threshold.thshold_policy = 0;
361 } else if (thres1_policy == 1) {
362 tmp_threshold.thshold_policy = 1;
363 } else {
364 spinlock_unlock(&fme->lock);
365 return -EINVAL;
366 }
367
368 writeq(tmp_threshold.csr, &thermal->threshold);
369 spinlock_unlock(&fme->lock);
370
371 return 0;
372 }
373
374 static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
375 {
376 struct feature_fme_thermal *thermal;
377 struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
378
379 thermal = get_fme_feature_ioaddr_by_index(fme,
380 FME_FEATURE_ID_THERMAL_MGMT);
381
382 temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
383 *temp = temp_rdsensor_fmt1.fpga_temp;
384
385 return 0;
386 }
387
388 static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
389 {
390 struct feature_fme_thermal *fme_thermal
391 = get_fme_feature_ioaddr_by_index(fme,
392 FME_FEATURE_ID_THERMAL_MGMT);
393 struct feature_header header;
394
395 header.csr = readq(&fme_thermal->header);
396 *revision = header.revision;
397
398 return 0;
399 }
400
401 #define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
402
403 static int fme_thermal_mgmt_init(struct feature *feature)
404 {
405 struct feature_fme_thermal *fme_thermal;
406 struct feature_fme_tmp_threshold_cap thermal_cap;
407
408 UNUSED(feature);
409
410 dev_info(NULL, "FME thermal mgmt Init.\n");
411
412 fme_thermal = (struct feature_fme_thermal *)feature->addr;
413 thermal_cap.csr = readq(&fme_thermal->threshold_cap);
414
415 dev_info(NULL, "FME thermal cap %llx.\n",
416 (unsigned long long)fme_thermal->threshold_cap.csr);
417
418 if (thermal_cap.tmp_thshold_disabled)
419 feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
420
421 return 0;
422 }
423
424 static void fme_thermal_mgmt_uinit(struct feature *feature)
425 {
426 UNUSED(feature);
427
428 dev_info(NULL, "FME thermal mgmt UInit.\n");
429 }
430
431 static int
432 fme_thermal_set_prop(struct feature *feature, struct feature_prop *prop)
433 {
434 struct ifpga_fme_hw *fme = feature->parent;
435
436 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
437 return -ENOENT;
438
439 switch (prop->prop_id) {
440 case FME_THERMAL_PROP_THRESHOLD1:
441 return fme_thermal_set_threshold1(fme, prop->data);
442 case FME_THERMAL_PROP_THRESHOLD2:
443 return fme_thermal_set_threshold2(fme, prop->data);
444 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
445 return fme_thermal_set_threshold1_policy(fme, prop->data);
446 }
447
448 return -ENOENT;
449 }
450
451 static int
452 fme_thermal_get_prop(struct feature *feature, struct feature_prop *prop)
453 {
454 struct ifpga_fme_hw *fme = feature->parent;
455
456 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
457 prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
458 prop->prop_id != FME_THERMAL_PROP_REVISION)
459 return -ENOENT;
460
461 switch (prop->prop_id) {
462 case FME_THERMAL_PROP_THRESHOLD1:
463 return fme_thermal_get_threshold1(fme, &prop->data);
464 case FME_THERMAL_PROP_THRESHOLD2:
465 return fme_thermal_get_threshold2(fme, &prop->data);
466 case FME_THERMAL_PROP_THRESHOLD_TRIP:
467 return fme_thermal_get_threshold_trip(fme, &prop->data);
468 case FME_THERMAL_PROP_THRESHOLD1_REACHED:
469 return fme_thermal_get_threshold1_reached(fme, &prop->data);
470 case FME_THERMAL_PROP_THRESHOLD2_REACHED:
471 return fme_thermal_get_threshold2_reached(fme, &prop->data);
472 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
473 return fme_thermal_get_threshold1_policy(fme, &prop->data);
474 case FME_THERMAL_PROP_TEMPERATURE:
475 return fme_thermal_get_temperature(fme, &prop->data);
476 case FME_THERMAL_PROP_REVISION:
477 return fme_thermal_get_revision(fme, &prop->data);
478 }
479
480 return -ENOENT;
481 }
482
483 struct feature_ops fme_thermal_mgmt_ops = {
484 .init = fme_thermal_mgmt_init,
485 .uinit = fme_thermal_mgmt_uinit,
486 .get_prop = fme_thermal_get_prop,
487 .set_prop = fme_thermal_set_prop,
488 };
489
490 static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
491 {
492 struct feature_fme_power *fme_power
493 = get_fme_feature_ioaddr_by_index(fme,
494 FME_FEATURE_ID_POWER_MGMT);
495 struct feature_fme_pm_status pm_status;
496
497 pm_status.csr = readq(&fme_power->status);
498
499 *consumed = pm_status.pwr_consumed;
500
501 return 0;
502 }
503
504 static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
505 {
506 struct feature_fme_power *fme_power
507 = get_fme_feature_ioaddr_by_index(fme,
508 FME_FEATURE_ID_POWER_MGMT);
509 struct feature_fme_pm_ap_threshold pm_ap_threshold;
510
511 pm_ap_threshold.csr = readq(&fme_power->threshold);
512
513 *threshold = pm_ap_threshold.threshold1;
514
515 return 0;
516 }
517
518 static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
519 {
520 struct feature_fme_power *fme_power
521 = get_fme_feature_ioaddr_by_index(fme,
522 FME_FEATURE_ID_POWER_MGMT);
523 struct feature_fme_pm_ap_threshold pm_ap_threshold;
524
525 spinlock_lock(&fme->lock);
526 pm_ap_threshold.csr = readq(&fme_power->threshold);
527
528 if (threshold <= PWR_THRESHOLD_MAX) {
529 pm_ap_threshold.threshold1 = threshold;
530 } else {
531 spinlock_unlock(&fme->lock);
532 return -EINVAL;
533 }
534
535 writeq(pm_ap_threshold.csr, &fme_power->threshold);
536 spinlock_unlock(&fme->lock);
537
538 return 0;
539 }
540
541 static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
542 {
543 struct feature_fme_power *fme_power
544 = get_fme_feature_ioaddr_by_index(fme,
545 FME_FEATURE_ID_POWER_MGMT);
546 struct feature_fme_pm_ap_threshold pm_ap_threshold;
547
548 pm_ap_threshold.csr = readq(&fme_power->threshold);
549
550 *threshold = pm_ap_threshold.threshold2;
551
552 return 0;
553 }
554
555 static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
556 {
557 struct feature_fme_power *fme_power
558 = get_fme_feature_ioaddr_by_index(fme,
559 FME_FEATURE_ID_POWER_MGMT);
560 struct feature_fme_pm_ap_threshold pm_ap_threshold;
561
562 spinlock_lock(&fme->lock);
563 pm_ap_threshold.csr = readq(&fme_power->threshold);
564
565 if (threshold <= PWR_THRESHOLD_MAX) {
566 pm_ap_threshold.threshold2 = threshold;
567 } else {
568 spinlock_unlock(&fme->lock);
569 return -EINVAL;
570 }
571
572 writeq(pm_ap_threshold.csr, &fme_power->threshold);
573 spinlock_unlock(&fme->lock);
574
575 return 0;
576 }
577
578 static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
579 u64 *threshold_status)
580 {
581 struct feature_fme_power *fme_power
582 = get_fme_feature_ioaddr_by_index(fme,
583 FME_FEATURE_ID_POWER_MGMT);
584 struct feature_fme_pm_ap_threshold pm_ap_threshold;
585
586 pm_ap_threshold.csr = readq(&fme_power->threshold);
587
588 *threshold_status = pm_ap_threshold.threshold1_status;
589
590 return 0;
591 }
592
593 static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
594 u64 *threshold_status)
595 {
596 struct feature_fme_power *fme_power
597 = get_fme_feature_ioaddr_by_index(fme,
598 FME_FEATURE_ID_POWER_MGMT);
599 struct feature_fme_pm_ap_threshold pm_ap_threshold;
600
601 pm_ap_threshold.csr = readq(&fme_power->threshold);
602
603 *threshold_status = pm_ap_threshold.threshold2_status;
604
605 return 0;
606 }
607
608 static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
609 {
610 struct feature_fme_power *fme_power
611 = get_fme_feature_ioaddr_by_index(fme,
612 FME_FEATURE_ID_POWER_MGMT);
613 struct feature_fme_pm_status pm_status;
614
615 pm_status.csr = readq(&fme_power->status);
616
617 *rtl = pm_status.fpga_latency_report;
618
619 return 0;
620 }
621
622 static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
623 {
624 struct feature_fme_power *fme_power
625 = get_fme_feature_ioaddr_by_index(fme,
626 FME_FEATURE_ID_POWER_MGMT);
627 struct feature_fme_pm_xeon_limit xeon_limit;
628
629 xeon_limit.csr = readq(&fme_power->xeon_limit);
630
631 if (!xeon_limit.enable)
632 xeon_limit.pwr_limit = 0;
633
634 *limit = xeon_limit.pwr_limit;
635
636 return 0;
637 }
638
639 static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
640 {
641 struct feature_fme_power *fme_power
642 = get_fme_feature_ioaddr_by_index(fme,
643 FME_FEATURE_ID_POWER_MGMT);
644 struct feature_fme_pm_fpga_limit fpga_limit;
645
646 fpga_limit.csr = readq(&fme_power->fpga_limit);
647
648 if (!fpga_limit.enable)
649 fpga_limit.pwr_limit = 0;
650
651 *limit = fpga_limit.pwr_limit;
652
653 return 0;
654 }
655
656 static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
657 {
658 struct feature_fme_power *fme_power
659 = get_fme_feature_ioaddr_by_index(fme,
660 FME_FEATURE_ID_POWER_MGMT);
661 struct feature_header header;
662
663 header.csr = readq(&fme_power->header);
664 *revision = header.revision;
665
666 return 0;
667 }
668
669 static int fme_power_mgmt_init(struct feature *feature)
670 {
671 UNUSED(feature);
672
673 dev_info(NULL, "FME power mgmt Init.\n");
674
675 return 0;
676 }
677
678 static void fme_power_mgmt_uinit(struct feature *feature)
679 {
680 UNUSED(feature);
681
682 dev_info(NULL, "FME power mgmt UInit.\n");
683 }
684
685 static int fme_power_mgmt_get_prop(struct feature *feature,
686 struct feature_prop *prop)
687 {
688 struct ifpga_fme_hw *fme = feature->parent;
689
690 switch (prop->prop_id) {
691 case FME_PWR_PROP_CONSUMED:
692 return fme_pwr_get_consumed(fme, &prop->data);
693 case FME_PWR_PROP_THRESHOLD1:
694 return fme_pwr_get_threshold1(fme, &prop->data);
695 case FME_PWR_PROP_THRESHOLD2:
696 return fme_pwr_get_threshold2(fme, &prop->data);
697 case FME_PWR_PROP_THRESHOLD1_STATUS:
698 return fme_pwr_get_threshold1_status(fme, &prop->data);
699 case FME_PWR_PROP_THRESHOLD2_STATUS:
700 return fme_pwr_get_threshold2_status(fme, &prop->data);
701 case FME_PWR_PROP_RTL:
702 return fme_pwr_get_rtl(fme, &prop->data);
703 case FME_PWR_PROP_XEON_LIMIT:
704 return fme_pwr_get_xeon_limit(fme, &prop->data);
705 case FME_PWR_PROP_FPGA_LIMIT:
706 return fme_pwr_get_fpga_limit(fme, &prop->data);
707 case FME_PWR_PROP_REVISION:
708 return fme_pwr_get_revision(fme, &prop->data);
709 }
710
711 return -ENOENT;
712 }
713
714 static int fme_power_mgmt_set_prop(struct feature *feature,
715 struct feature_prop *prop)
716 {
717 struct ifpga_fme_hw *fme = feature->parent;
718
719 switch (prop->prop_id) {
720 case FME_PWR_PROP_THRESHOLD1:
721 return fme_pwr_set_threshold1(fme, prop->data);
722 case FME_PWR_PROP_THRESHOLD2:
723 return fme_pwr_set_threshold2(fme, prop->data);
724 }
725
726 return -ENOENT;
727 }
728
729 struct feature_ops fme_power_mgmt_ops = {
730 .init = fme_power_mgmt_init,
731 .uinit = fme_power_mgmt_uinit,
732 .get_prop = fme_power_mgmt_get_prop,
733 .set_prop = fme_power_mgmt_set_prop,
734 };