1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #include "ifpga_feature_dev.h"
7 #define PERF_OBJ_ROOT_ID 0xff
9 static int fme_iperf_get_clock(struct ifpga_fme_hw
*fme
, u64
*clock
)
11 struct feature_fme_iperf
*iperf
;
12 struct feature_fme_ifpmon_clk_ctr clk
;
14 iperf
= get_fme_feature_ioaddr_by_index(fme
,
15 FME_FEATURE_ID_GLOBAL_IPERF
);
16 clk
.afu_interf_clock
= readq(&iperf
->clk
);
18 *clock
= clk
.afu_interf_clock
;
22 static int fme_iperf_get_revision(struct ifpga_fme_hw
*fme
, u64
*revision
)
24 struct feature_fme_iperf
*iperf
;
25 struct feature_header header
;
27 iperf
= get_fme_feature_ioaddr_by_index(fme
,
28 FME_FEATURE_ID_GLOBAL_IPERF
);
29 header
.csr
= readq(&iperf
->header
);
30 *revision
= header
.revision
;
35 static int fme_iperf_get_cache_freeze(struct ifpga_fme_hw
*fme
, u64
*freeze
)
37 struct feature_fme_iperf
*iperf
;
38 struct feature_fme_ifpmon_ch_ctl ctl
;
40 iperf
= get_fme_feature_ioaddr_by_index(fme
,
41 FME_FEATURE_ID_GLOBAL_IPERF
);
42 ctl
.csr
= readq(&iperf
->ch_ctl
);
43 *freeze
= (u64
)ctl
.freeze
;
47 static int fme_iperf_set_cache_freeze(struct ifpga_fme_hw
*fme
, u64 freeze
)
49 struct feature_fme_iperf
*iperf
;
50 struct feature_fme_ifpmon_ch_ctl ctl
;
55 spinlock_lock(&fme
->lock
);
56 iperf
= get_fme_feature_ioaddr_by_index(fme
,
57 FME_FEATURE_ID_GLOBAL_IPERF
);
58 ctl
.csr
= readq(&iperf
->ch_ctl
);
60 writeq(ctl
.csr
, &iperf
->ch_ctl
);
61 spinlock_unlock(&fme
->lock
);
66 #define IPERF_TIMEOUT 30
68 static u64
read_cache_counter(struct ifpga_fme_hw
*fme
,
69 u8 channel
, enum iperf_cache_events event
)
71 struct feature_fme_iperf
*iperf
;
72 struct feature_fme_ifpmon_ch_ctl ctl
;
73 struct feature_fme_ifpmon_ch_ctr ctr0
, ctr1
;
76 spinlock_lock(&fme
->lock
);
77 iperf
= get_fme_feature_ioaddr_by_index(fme
,
78 FME_FEATURE_ID_GLOBAL_IPERF
);
80 /* set channel access type and cache event code. */
81 ctl
.csr
= readq(&iperf
->ch_ctl
);
82 ctl
.cci_chsel
= channel
;
83 ctl
.cache_event
= event
;
84 writeq(ctl
.csr
, &iperf
->ch_ctl
);
86 /* check the event type in the counter registers */
87 ctr0
.event_code
= event
;
89 if (fpga_wait_register_field(event_code
, ctr0
,
90 &iperf
->ch_ctr0
, IPERF_TIMEOUT
, 1)) {
91 dev_err(fme
, "timeout, unmatched cache event type in counter registers.\n");
92 spinlock_unlock(&fme
->lock
);
96 ctr0
.csr
= readq(&iperf
->ch_ctr0
);
97 ctr1
.csr
= readq(&iperf
->ch_ctr1
);
98 counter
= ctr0
.cache_counter
+ ctr1
.cache_counter
;
99 spinlock_unlock(&fme
->lock
);
104 #define CACHE_SHOW(name, type, event) \
105 static int fme_iperf_get_cache_##name(struct ifpga_fme_hw *fme, \
108 *counter = read_cache_counter(fme, type, event); \
112 CACHE_SHOW(read_hit
, CACHE_CHANNEL_RD
, IPERF_CACHE_RD_HIT
);
113 CACHE_SHOW(read_miss
, CACHE_CHANNEL_RD
, IPERF_CACHE_RD_MISS
);
114 CACHE_SHOW(write_hit
, CACHE_CHANNEL_WR
, IPERF_CACHE_WR_HIT
);
115 CACHE_SHOW(write_miss
, CACHE_CHANNEL_WR
, IPERF_CACHE_WR_MISS
);
116 CACHE_SHOW(hold_request
, CACHE_CHANNEL_RD
, IPERF_CACHE_HOLD_REQ
);
117 CACHE_SHOW(tx_req_stall
, CACHE_CHANNEL_RD
, IPERF_CACHE_TX_REQ_STALL
);
118 CACHE_SHOW(rx_req_stall
, CACHE_CHANNEL_RD
, IPERF_CACHE_RX_REQ_STALL
);
119 CACHE_SHOW(rx_eviction
, CACHE_CHANNEL_RD
, IPERF_CACHE_EVICTIONS
);
120 CACHE_SHOW(data_write_port_contention
, CACHE_CHANNEL_WR
,
121 IPERF_CACHE_DATA_WR_PORT_CONTEN
);
122 CACHE_SHOW(tag_write_port_contention
, CACHE_CHANNEL_WR
,
123 IPERF_CACHE_TAG_WR_PORT_CONTEN
);
125 static int fme_iperf_get_vtd_freeze(struct ifpga_fme_hw
*fme
, u64
*freeze
)
127 struct feature_fme_ifpmon_vtd_ctl ctl
;
128 struct feature_fme_iperf
*iperf
;
130 iperf
= get_fme_feature_ioaddr_by_index(fme
,
131 FME_FEATURE_ID_GLOBAL_IPERF
);
132 ctl
.csr
= readq(&iperf
->vtd_ctl
);
133 *freeze
= (u64
)ctl
.freeze
;
138 static int fme_iperf_set_vtd_freeze(struct ifpga_fme_hw
*fme
, u64 freeze
)
140 struct feature_fme_ifpmon_vtd_ctl ctl
;
141 struct feature_fme_iperf
*iperf
;
146 spinlock_lock(&fme
->lock
);
147 iperf
= get_fme_feature_ioaddr_by_index(fme
,
148 FME_FEATURE_ID_GLOBAL_IPERF
);
149 ctl
.csr
= readq(&iperf
->vtd_ctl
);
151 writeq(ctl
.csr
, &iperf
->vtd_ctl
);
152 spinlock_unlock(&fme
->lock
);
157 static u64
read_iommu_sip_counter(struct ifpga_fme_hw
*fme
,
158 enum iperf_vtd_sip_events event
)
160 struct feature_fme_ifpmon_vtd_sip_ctl sip_ctl
;
161 struct feature_fme_ifpmon_vtd_sip_ctr sip_ctr
;
162 struct feature_fme_iperf
*iperf
;
165 spinlock_lock(&fme
->lock
);
166 iperf
= get_fme_feature_ioaddr_by_index(fme
,
167 FME_FEATURE_ID_GLOBAL_IPERF
);
168 sip_ctl
.csr
= readq(&iperf
->vtd_sip_ctl
);
169 sip_ctl
.vtd_evtcode
= event
;
170 writeq(sip_ctl
.csr
, &iperf
->vtd_sip_ctl
);
172 sip_ctr
.event_code
= event
;
174 if (fpga_wait_register_field(event_code
, sip_ctr
,
175 &iperf
->vtd_sip_ctr
, IPERF_TIMEOUT
, 1)) {
176 dev_err(fme
, "timeout, unmatched VTd SIP event type in counter registers\n");
177 spinlock_unlock(&fme
->lock
);
181 sip_ctr
.csr
= readq(&iperf
->vtd_sip_ctr
);
182 counter
= sip_ctr
.vtd_counter
;
183 spinlock_unlock(&fme
->lock
);
188 #define VTD_SIP_SHOW(name, event) \
189 static int fme_iperf_get_vtd_sip_##name(struct ifpga_fme_hw *fme, \
192 *counter = read_iommu_sip_counter(fme, event); \
196 VTD_SIP_SHOW(iotlb_4k_hit
, IPERF_VTD_SIP_IOTLB_4K_HIT
);
197 VTD_SIP_SHOW(iotlb_2m_hit
, IPERF_VTD_SIP_IOTLB_2M_HIT
);
198 VTD_SIP_SHOW(iotlb_1g_hit
, IPERF_VTD_SIP_IOTLB_1G_HIT
);
199 VTD_SIP_SHOW(slpwc_l3_hit
, IPERF_VTD_SIP_SLPWC_L3_HIT
);
200 VTD_SIP_SHOW(slpwc_l4_hit
, IPERF_VTD_SIP_SLPWC_L4_HIT
);
201 VTD_SIP_SHOW(rcc_hit
, IPERF_VTD_SIP_RCC_HIT
);
202 VTD_SIP_SHOW(iotlb_4k_miss
, IPERF_VTD_SIP_IOTLB_4K_MISS
);
203 VTD_SIP_SHOW(iotlb_2m_miss
, IPERF_VTD_SIP_IOTLB_2M_MISS
);
204 VTD_SIP_SHOW(iotlb_1g_miss
, IPERF_VTD_SIP_IOTLB_1G_MISS
);
205 VTD_SIP_SHOW(slpwc_l3_miss
, IPERF_VTD_SIP_SLPWC_L3_MISS
);
206 VTD_SIP_SHOW(slpwc_l4_miss
, IPERF_VTD_SIP_SLPWC_L4_MISS
);
207 VTD_SIP_SHOW(rcc_miss
, IPERF_VTD_SIP_RCC_MISS
);
209 static u64
read_iommu_counter(struct ifpga_fme_hw
*fme
, u8 port_id
,
210 enum iperf_vtd_events base_event
)
212 struct feature_fme_ifpmon_vtd_ctl ctl
;
213 struct feature_fme_ifpmon_vtd_ctr ctr
;
214 struct feature_fme_iperf
*iperf
;
215 enum iperf_vtd_events event
= base_event
+ port_id
;
218 spinlock_lock(&fme
->lock
);
219 iperf
= get_fme_feature_ioaddr_by_index(fme
,
220 FME_FEATURE_ID_GLOBAL_IPERF
);
221 ctl
.csr
= readq(&iperf
->vtd_ctl
);
222 ctl
.vtd_evtcode
= event
;
223 writeq(ctl
.csr
, &iperf
->vtd_ctl
);
225 ctr
.event_code
= event
;
227 if (fpga_wait_register_field(event_code
, ctr
,
228 &iperf
->vtd_ctr
, IPERF_TIMEOUT
, 1)) {
229 dev_err(fme
, "timeout, unmatched VTd event type in counter registers.\n");
230 spinlock_unlock(&fme
->lock
);
234 ctr
.csr
= readq(&iperf
->vtd_ctr
);
235 counter
= ctr
.vtd_counter
;
236 spinlock_unlock(&fme
->lock
);
241 #define VTD_PORT_SHOW(name, base_event) \
242 static int fme_iperf_get_vtd_port_##name(struct ifpga_fme_hw *fme, \
243 u8 port_id, u64 *counter) \
245 *counter = read_iommu_counter(fme, port_id, base_event); \
249 VTD_PORT_SHOW(read_transaction
, IPERF_VTD_AFU_MEM_RD_TRANS
);
250 VTD_PORT_SHOW(write_transaction
, IPERF_VTD_AFU_MEM_WR_TRANS
);
251 VTD_PORT_SHOW(devtlb_read_hit
, IPERF_VTD_AFU_DEVTLB_RD_HIT
);
252 VTD_PORT_SHOW(devtlb_write_hit
, IPERF_VTD_AFU_DEVTLB_WR_HIT
);
253 VTD_PORT_SHOW(devtlb_4k_fill
, IPERF_VTD_DEVTLB_4K_FILL
);
254 VTD_PORT_SHOW(devtlb_2m_fill
, IPERF_VTD_DEVTLB_2M_FILL
);
255 VTD_PORT_SHOW(devtlb_1g_fill
, IPERF_VTD_DEVTLB_1G_FILL
);
257 static bool fabric_pobj_is_enabled(u8 port_id
, struct feature_fme_iperf
*iperf
)
259 struct feature_fme_ifpmon_fab_ctl ctl
;
261 ctl
.csr
= readq(&iperf
->fab_ctl
);
263 if (ctl
.port_filter
== FAB_DISABLE_FILTER
)
264 return port_id
== PERF_OBJ_ROOT_ID
;
266 return port_id
== ctl
.port_id
;
269 static u64
read_fabric_counter(struct ifpga_fme_hw
*fme
, u8 port_id
,
270 enum iperf_fab_events fab_event
)
272 struct feature_fme_ifpmon_fab_ctl ctl
;
273 struct feature_fme_ifpmon_fab_ctr ctr
;
274 struct feature_fme_iperf
*iperf
;
277 spinlock_lock(&fme
->lock
);
278 iperf
= get_fme_feature_ioaddr_by_index(fme
,
279 FME_FEATURE_ID_GLOBAL_IPERF
);
281 /* if it is disabled, force the counter to return zero. */
282 if (!fabric_pobj_is_enabled(port_id
, iperf
))
285 ctl
.csr
= readq(&iperf
->fab_ctl
);
286 ctl
.fab_evtcode
= fab_event
;
287 writeq(ctl
.csr
, &iperf
->fab_ctl
);
289 ctr
.event_code
= fab_event
;
291 if (fpga_wait_register_field(event_code
, ctr
,
292 &iperf
->fab_ctr
, IPERF_TIMEOUT
, 1)) {
293 dev_err(fme
, "timeout, unmatched VTd event type in counter registers.\n");
294 spinlock_unlock(&fme
->lock
);
298 ctr
.csr
= readq(&iperf
->fab_ctr
);
299 counter
= ctr
.fab_cnt
;
301 spinlock_unlock(&fme
->lock
);
305 #define FAB_PORT_SHOW(name, event) \
306 static int fme_iperf_get_fab_port_##name(struct ifpga_fme_hw *fme, \
307 u8 port_id, u64 *counter) \
309 *counter = read_fabric_counter(fme, port_id, event); \
313 FAB_PORT_SHOW(pcie0_read
, IPERF_FAB_PCIE0_RD
);
314 FAB_PORT_SHOW(pcie0_write
, IPERF_FAB_PCIE0_WR
);
315 FAB_PORT_SHOW(pcie1_read
, IPERF_FAB_PCIE1_RD
);
316 FAB_PORT_SHOW(pcie1_write
, IPERF_FAB_PCIE1_WR
);
317 FAB_PORT_SHOW(upi_read
, IPERF_FAB_UPI_RD
);
318 FAB_PORT_SHOW(upi_write
, IPERF_FAB_UPI_WR
);
319 FAB_PORT_SHOW(mmio_read
, IPERF_FAB_MMIO_RD
);
320 FAB_PORT_SHOW(mmio_write
, IPERF_FAB_MMIO_WR
);
322 static int fme_iperf_get_fab_port_enable(struct ifpga_fme_hw
*fme
,
323 u8 port_id
, u64
*enable
)
325 struct feature_fme_iperf
*iperf
;
328 iperf
= get_fme_feature_ioaddr_by_index(fme
,
329 FME_FEATURE_ID_GLOBAL_IPERF
);
331 status
= fabric_pobj_is_enabled(port_id
, iperf
);
332 *enable
= (u64
)status
;
338 * If enable one port or all port event counter in fabric, other
339 * fabric event counter originally enabled will be disable automatically.
341 static int fme_iperf_set_fab_port_enable(struct ifpga_fme_hw
*fme
,
342 u8 port_id
, u64 enable
)
344 struct feature_fme_ifpmon_fab_ctl ctl
;
345 struct feature_fme_iperf
*iperf
;
353 iperf
= get_fme_feature_ioaddr_by_index(fme
,
354 FME_FEATURE_ID_GLOBAL_IPERF
);
356 /* if it is already enabled. */
357 if (fabric_pobj_is_enabled(port_id
, iperf
))
360 spinlock_lock(&fme
->lock
);
361 ctl
.csr
= readq(&iperf
->fab_ctl
);
362 if (port_id
== PERF_OBJ_ROOT_ID
) {
363 ctl
.port_filter
= FAB_DISABLE_FILTER
;
365 ctl
.port_filter
= FAB_ENABLE_FILTER
;
366 ctl
.port_id
= port_id
;
369 writeq(ctl
.csr
, &iperf
->fab_ctl
);
370 spinlock_unlock(&fme
->lock
);
375 static int fme_iperf_get_fab_freeze(struct ifpga_fme_hw
*fme
, u64
*freeze
)
377 struct feature_fme_iperf
*iperf
;
378 struct feature_fme_ifpmon_fab_ctl ctl
;
380 iperf
= get_fme_feature_ioaddr_by_index(fme
,
381 FME_FEATURE_ID_GLOBAL_IPERF
);
382 ctl
.csr
= readq(&iperf
->fab_ctl
);
383 *freeze
= (u64
)ctl
.freeze
;
388 static int fme_iperf_set_fab_freeze(struct ifpga_fme_hw
*fme
, u64 freeze
)
390 struct feature_fme_iperf
*iperf
;
391 struct feature_fme_ifpmon_fab_ctl ctl
;
396 spinlock_lock(&fme
->lock
);
397 iperf
= get_fme_feature_ioaddr_by_index(fme
,
398 FME_FEATURE_ID_GLOBAL_IPERF
);
399 ctl
.csr
= readq(&iperf
->fab_ctl
);
401 writeq(ctl
.csr
, &iperf
->fab_ctl
);
402 spinlock_unlock(&fme
->lock
);
407 #define PERF_MAX_PORT_NUM 1
408 #define FME_IPERF_CAP_IOMMU 0x1
410 static int fme_global_iperf_init(struct feature
*feature
)
412 struct ifpga_fme_hw
*fme
;
413 struct feature_fme_header
*fme_hdr
;
414 struct feature_fme_capability fme_capability
;
416 dev_info(NULL
, "FME global_iperf Init.\n");
418 fme
= (struct ifpga_fme_hw
*)feature
->parent
;
419 fme_hdr
= get_fme_feature_ioaddr_by_index(fme
, FME_FEATURE_ID_HEADER
);
421 /* check if iommu is not supported on this device. */
422 fme_capability
.csr
= readq(&fme_hdr
->capability
);
423 dev_info(NULL
, "FME HEAD fme_capability %llx.\n",
424 (unsigned long long)fme_hdr
->capability
.csr
);
426 if (fme_capability
.iommu_support
)
427 feature
->cap
|= FME_IPERF_CAP_IOMMU
;
432 static void fme_global_iperf_uinit(struct feature
*feature
)
436 dev_info(NULL
, "FME global_iperf UInit.\n");
439 static int fme_iperf_root_get_prop(struct feature
*feature
,
440 struct feature_prop
*prop
)
442 struct ifpga_fme_hw
*fme
= feature
->parent
;
443 u8 sub
= GET_FIELD(PROP_SUB
, prop
->prop_id
);
444 u16 id
= GET_FIELD(PROP_ID
, prop
->prop_id
);
446 if (sub
!= PERF_PROP_SUB_UNUSED
)
450 case 0x1: /* CLOCK */
451 return fme_iperf_get_clock(fme
, &prop
->data
);
452 case 0x2: /* REVISION */
453 return fme_iperf_get_revision(fme
, &prop
->data
);
459 static int fme_iperf_cache_get_prop(struct feature
*feature
,
460 struct feature_prop
*prop
)
462 struct ifpga_fme_hw
*fme
= feature
->parent
;
463 u8 sub
= GET_FIELD(PROP_SUB
, prop
->prop_id
);
464 u16 id
= GET_FIELD(PROP_ID
, prop
->prop_id
);
466 if (sub
!= PERF_PROP_SUB_UNUSED
)
470 case 0x1: /* FREEZE */
471 return fme_iperf_get_cache_freeze(fme
, &prop
->data
);
472 case 0x2: /* READ_HIT */
473 return fme_iperf_get_cache_read_hit(fme
, &prop
->data
);
474 case 0x3: /* READ_MISS */
475 return fme_iperf_get_cache_read_miss(fme
, &prop
->data
);
476 case 0x4: /* WRITE_HIT */
477 return fme_iperf_get_cache_write_hit(fme
, &prop
->data
);
478 case 0x5: /* WRITE_MISS */
479 return fme_iperf_get_cache_write_miss(fme
, &prop
->data
);
480 case 0x6: /* HOLD_REQUEST */
481 return fme_iperf_get_cache_hold_request(fme
, &prop
->data
);
482 case 0x7: /* TX_REQ_STALL */
483 return fme_iperf_get_cache_tx_req_stall(fme
, &prop
->data
);
484 case 0x8: /* RX_REQ_STALL */
485 return fme_iperf_get_cache_rx_req_stall(fme
, &prop
->data
);
486 case 0x9: /* RX_EVICTION */
487 return fme_iperf_get_cache_rx_eviction(fme
, &prop
->data
);
488 case 0xa: /* DATA_WRITE_PORT_CONTENTION */
489 return fme_iperf_get_cache_data_write_port_contention(fme
,
491 case 0xb: /* TAG_WRITE_PORT_CONTENTION */
492 return fme_iperf_get_cache_tag_write_port_contention(fme
,
499 static int fme_iperf_vtd_root_get_prop(struct feature
*feature
,
500 struct feature_prop
*prop
)
502 struct ifpga_fme_hw
*fme
= feature
->parent
;
503 u16 id
= GET_FIELD(PROP_ID
, prop
->prop_id
);
506 case 0x1: /* FREEZE */
507 return fme_iperf_get_vtd_freeze(fme
, &prop
->data
);
508 case 0x2: /* IOTLB_4K_HIT */
509 return fme_iperf_get_vtd_sip_iotlb_4k_hit(fme
, &prop
->data
);
510 case 0x3: /* IOTLB_2M_HIT */
511 return fme_iperf_get_vtd_sip_iotlb_2m_hit(fme
, &prop
->data
);
512 case 0x4: /* IOTLB_1G_HIT */
513 return fme_iperf_get_vtd_sip_iotlb_1g_hit(fme
, &prop
->data
);
514 case 0x5: /* SLPWC_L3_HIT */
515 return fme_iperf_get_vtd_sip_slpwc_l3_hit(fme
, &prop
->data
);
516 case 0x6: /* SLPWC_L4_HIT */
517 return fme_iperf_get_vtd_sip_slpwc_l4_hit(fme
, &prop
->data
);
518 case 0x7: /* RCC_HIT */
519 return fme_iperf_get_vtd_sip_rcc_hit(fme
, &prop
->data
);
520 case 0x8: /* IOTLB_4K_MISS */
521 return fme_iperf_get_vtd_sip_iotlb_4k_miss(fme
, &prop
->data
);
522 case 0x9: /* IOTLB_2M_MISS */
523 return fme_iperf_get_vtd_sip_iotlb_2m_miss(fme
, &prop
->data
);
524 case 0xa: /* IOTLB_1G_MISS */
525 return fme_iperf_get_vtd_sip_iotlb_1g_miss(fme
, &prop
->data
);
526 case 0xb: /* SLPWC_L3_MISS */
527 return fme_iperf_get_vtd_sip_slpwc_l3_miss(fme
, &prop
->data
);
528 case 0xc: /* SLPWC_L4_MISS */
529 return fme_iperf_get_vtd_sip_slpwc_l4_miss(fme
, &prop
->data
);
530 case 0xd: /* RCC_MISS */
531 return fme_iperf_get_vtd_sip_rcc_miss(fme
, &prop
->data
);
537 static int fme_iperf_vtd_sub_get_prop(struct feature
*feature
,
538 struct feature_prop
*prop
)
540 struct ifpga_fme_hw
*fme
= feature
->parent
;
541 u16 id
= GET_FIELD(PROP_ID
, prop
->prop_id
);
542 u8 sub
= GET_FIELD(PROP_SUB
, prop
->prop_id
);
544 if (sub
> PERF_MAX_PORT_NUM
)
548 case 0xe: /* READ_TRANSACTION */
549 return fme_iperf_get_vtd_port_read_transaction(fme
, sub
,
551 case 0xf: /* WRITE_TRANSACTION */
552 return fme_iperf_get_vtd_port_write_transaction(fme
, sub
,
554 case 0x10: /* DEVTLB_READ_HIT */
555 return fme_iperf_get_vtd_port_devtlb_read_hit(fme
, sub
,
557 case 0x11: /* DEVTLB_WRITE_HIT */
558 return fme_iperf_get_vtd_port_devtlb_write_hit(fme
, sub
,
560 case 0x12: /* DEVTLB_4K_FILL */
561 return fme_iperf_get_vtd_port_devtlb_4k_fill(fme
, sub
,
563 case 0x13: /* DEVTLB_2M_FILL */
564 return fme_iperf_get_vtd_port_devtlb_2m_fill(fme
, sub
,
566 case 0x14: /* DEVTLB_1G_FILL */
567 return fme_iperf_get_vtd_port_devtlb_1g_fill(fme
, sub
,
574 static int fme_iperf_vtd_get_prop(struct feature
*feature
,
575 struct feature_prop
*prop
)
577 u8 sub
= GET_FIELD(PROP_SUB
, prop
->prop_id
);
579 if (sub
== PERF_PROP_SUB_UNUSED
)
580 return fme_iperf_vtd_root_get_prop(feature
, prop
);
582 return fme_iperf_vtd_sub_get_prop(feature
, prop
);
585 static int fme_iperf_fab_get_prop(struct feature
*feature
,
586 struct feature_prop
*prop
)
588 struct ifpga_fme_hw
*fme
= feature
->parent
;
589 u8 sub
= GET_FIELD(PROP_SUB
, prop
->prop_id
);
590 u16 id
= GET_FIELD(PROP_ID
, prop
->prop_id
);
592 /* Other properties are present for both top and sub levels */
594 case 0x1: /* FREEZE */
595 if (sub
!= PERF_PROP_SUB_UNUSED
)
597 return fme_iperf_get_fab_freeze(fme
, &prop
->data
);
598 case 0x2: /* PCIE0_READ */
599 return fme_iperf_get_fab_port_pcie0_read(fme
, sub
,
601 case 0x3: /* PCIE0_WRITE */
602 return fme_iperf_get_fab_port_pcie0_write(fme
, sub
,
604 case 0x4: /* PCIE1_READ */
605 return fme_iperf_get_fab_port_pcie1_read(fme
, sub
,
607 case 0x5: /* PCIE1_WRITE */
608 return fme_iperf_get_fab_port_pcie1_write(fme
, sub
,
610 case 0x6: /* UPI_READ */
611 return fme_iperf_get_fab_port_upi_read(fme
, sub
,
613 case 0x7: /* UPI_WRITE */
614 return fme_iperf_get_fab_port_upi_write(fme
, sub
,
616 case 0x8: /* MMIO_READ */
617 return fme_iperf_get_fab_port_mmio_read(fme
, sub
,
619 case 0x9: /* MMIO_WRITE */
620 return fme_iperf_get_fab_port_mmio_write(fme
, sub
,
622 case 0xa: /* ENABLE */
623 return fme_iperf_get_fab_port_enable(fme
, sub
, &prop
->data
);
629 static int fme_global_iperf_get_prop(struct feature
*feature
,
630 struct feature_prop
*prop
)
632 u8 top
= GET_FIELD(PROP_TOP
, prop
->prop_id
);
635 case PERF_PROP_TOP_CACHE
:
636 return fme_iperf_cache_get_prop(feature
, prop
);
637 case PERF_PROP_TOP_VTD
:
638 return fme_iperf_vtd_get_prop(feature
, prop
);
639 case PERF_PROP_TOP_FAB
:
640 return fme_iperf_fab_get_prop(feature
, prop
);
641 case PERF_PROP_TOP_UNUSED
:
642 return fme_iperf_root_get_prop(feature
, prop
);
648 static int fme_iperf_cache_set_prop(struct feature
*feature
,
649 struct feature_prop
*prop
)
651 struct ifpga_fme_hw
*fme
= feature
->parent
;
652 u8 sub
= GET_FIELD(PROP_SUB
, prop
->prop_id
);
653 u16 id
= GET_FIELD(PROP_ID
, prop
->prop_id
);
655 if (sub
== PERF_PROP_SUB_UNUSED
&& id
== 0x1) /* FREEZE */
656 return fme_iperf_set_cache_freeze(fme
, prop
->data
);
661 static int fme_iperf_vtd_set_prop(struct feature
*feature
,
662 struct feature_prop
*prop
)
664 struct ifpga_fme_hw
*fme
= feature
->parent
;
665 u8 sub
= GET_FIELD(PROP_SUB
, prop
->prop_id
);
666 u16 id
= GET_FIELD(PROP_ID
, prop
->prop_id
);
668 if (sub
== PERF_PROP_SUB_UNUSED
&& id
== 0x1) /* FREEZE */
669 return fme_iperf_set_vtd_freeze(fme
, prop
->data
);
674 static int fme_iperf_fab_set_prop(struct feature
*feature
,
675 struct feature_prop
*prop
)
677 struct ifpga_fme_hw
*fme
= feature
->parent
;
678 u8 sub
= GET_FIELD(PROP_SUB
, prop
->prop_id
);
679 u16 id
= GET_FIELD(PROP_ID
, prop
->prop_id
);
682 case 0x1: /* FREEZE */
683 if (sub
!= PERF_PROP_SUB_UNUSED
)
685 return fme_iperf_set_fab_freeze(fme
, prop
->data
);
686 case 0xa: /* ENABLE */
687 return fme_iperf_set_fab_port_enable(fme
, sub
, prop
->data
);
693 static int fme_global_iperf_set_prop(struct feature
*feature
,
694 struct feature_prop
*prop
)
696 u8 top
= GET_FIELD(PROP_TOP
, prop
->prop_id
);
699 case PERF_PROP_TOP_CACHE
:
700 return fme_iperf_cache_set_prop(feature
, prop
);
701 case PERF_PROP_TOP_VTD
:
702 return fme_iperf_vtd_set_prop(feature
, prop
);
703 case PERF_PROP_TOP_FAB
:
704 return fme_iperf_fab_set_prop(feature
, prop
);
710 struct feature_ops fme_global_iperf_ops
= {
711 .init
= fme_global_iperf_init
,
712 .uinit
= fme_global_iperf_uinit
,
713 .get_prop
= fme_global_iperf_get_prop
,
714 .set_prop
= fme_global_iperf_set_prop
,