1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #include "ifpga_feature_dev.h"
7 #define PERF_OBJ_ROOT_ID 0xff
9 static int fme_dperf_get_clock(struct ifpga_fme_hw
*fme
, u64
*clock
)
11 struct feature_fme_dperf
*dperf
;
12 struct feature_fme_dfpmon_clk_ctr clk
;
14 dperf
= get_fme_feature_ioaddr_by_index(fme
,
15 FME_FEATURE_ID_GLOBAL_DPERF
);
16 clk
.afu_interf_clock
= readq(&dperf
->clk
);
18 *clock
= clk
.afu_interf_clock
;
22 static int fme_dperf_get_revision(struct ifpga_fme_hw
*fme
, u64
*revision
)
24 struct feature_fme_dperf
*dperf
;
25 struct feature_header header
;
27 dperf
= get_fme_feature_ioaddr_by_index(fme
,
28 FME_FEATURE_ID_GLOBAL_DPERF
);
29 header
.csr
= readq(&dperf
->header
);
30 *revision
= header
.revision
;
35 #define DPERF_TIMEOUT 30
37 static bool fabric_pobj_is_enabled(int port_id
,
38 struct feature_fme_dperf
*dperf
)
40 struct feature_fme_dfpmon_fab_ctl ctl
;
42 ctl
.csr
= readq(&dperf
->fab_ctl
);
44 if (ctl
.port_filter
== FAB_DISABLE_FILTER
)
45 return port_id
== PERF_OBJ_ROOT_ID
;
47 return port_id
== ctl
.port_id
;
50 static u64
read_fabric_counter(struct ifpga_fme_hw
*fme
, u8 port_id
,
51 enum dperf_fab_events fab_event
)
53 struct feature_fme_dfpmon_fab_ctl ctl
;
54 struct feature_fme_dfpmon_fab_ctr ctr
;
55 struct feature_fme_dperf
*dperf
;
58 spinlock_lock(&fme
->lock
);
59 dperf
= get_fme_feature_ioaddr_by_index(fme
,
60 FME_FEATURE_ID_GLOBAL_DPERF
);
62 /* if it is disabled, force the counter to return zero. */
63 if (!fabric_pobj_is_enabled(port_id
, dperf
))
66 ctl
.csr
= readq(&dperf
->fab_ctl
);
67 ctl
.fab_evtcode
= fab_event
;
68 writeq(ctl
.csr
, &dperf
->fab_ctl
);
70 ctr
.event_code
= fab_event
;
72 if (fpga_wait_register_field(event_code
, ctr
,
73 &dperf
->fab_ctr
, DPERF_TIMEOUT
, 1)) {
74 dev_err(fme
, "timeout, unmatched VTd event type in counter registers.\n");
75 spinlock_unlock(&fme
->lock
);
79 ctr
.csr
= readq(&dperf
->fab_ctr
);
80 counter
= ctr
.fab_cnt
;
82 spinlock_unlock(&fme
->lock
);
86 #define FAB_PORT_SHOW(name, event) \
87 static int fme_dperf_get_fab_port_##name(struct ifpga_fme_hw *fme, \
88 u8 port_id, u64 *counter) \
90 *counter = read_fabric_counter(fme, port_id, event); \
94 FAB_PORT_SHOW(pcie0_read
, DPERF_FAB_PCIE0_RD
);
95 FAB_PORT_SHOW(pcie0_write
, DPERF_FAB_PCIE0_WR
);
96 FAB_PORT_SHOW(mmio_read
, DPERF_FAB_MMIO_RD
);
97 FAB_PORT_SHOW(mmio_write
, DPERF_FAB_MMIO_WR
);
99 static int fme_dperf_get_fab_port_enable(struct ifpga_fme_hw
*fme
,
100 u8 port_id
, u64
*enable
)
102 struct feature_fme_dperf
*dperf
;
105 dperf
= get_fme_feature_ioaddr_by_index(fme
,
106 FME_FEATURE_ID_GLOBAL_DPERF
);
108 status
= fabric_pobj_is_enabled(port_id
, dperf
);
109 *enable
= (u64
)status
;
115 * If enable one port or all port event counter in fabric, other
116 * fabric event counter originally enabled will be disable automatically.
118 static int fme_dperf_set_fab_port_enable(struct ifpga_fme_hw
*fme
,
119 u8 port_id
, u64 enable
)
121 struct feature_fme_dfpmon_fab_ctl ctl
;
122 struct feature_fme_dperf
*dperf
;
130 dperf
= get_fme_feature_ioaddr_by_index(fme
,
131 FME_FEATURE_ID_GLOBAL_DPERF
);
133 /* if it is already enabled. */
134 if (fabric_pobj_is_enabled(port_id
, dperf
))
137 spinlock_lock(&fme
->lock
);
138 ctl
.csr
= readq(&dperf
->fab_ctl
);
139 if (port_id
== PERF_OBJ_ROOT_ID
) {
140 ctl
.port_filter
= FAB_DISABLE_FILTER
;
142 ctl
.port_filter
= FAB_ENABLE_FILTER
;
143 ctl
.port_id
= port_id
;
146 writeq(ctl
.csr
, &dperf
->fab_ctl
);
147 spinlock_unlock(&fme
->lock
);
152 static int fme_dperf_get_fab_freeze(struct ifpga_fme_hw
*fme
, u64
*freeze
)
154 struct feature_fme_dperf
*dperf
;
155 struct feature_fme_dfpmon_fab_ctl ctl
;
157 dperf
= get_fme_feature_ioaddr_by_index(fme
,
158 FME_FEATURE_ID_GLOBAL_DPERF
);
159 ctl
.csr
= readq(&dperf
->fab_ctl
);
160 *freeze
= (u64
)ctl
.freeze
;
165 static int fme_dperf_set_fab_freeze(struct ifpga_fme_hw
*fme
, u64 freeze
)
167 struct feature_fme_dperf
*dperf
;
168 struct feature_fme_dfpmon_fab_ctl ctl
;
173 spinlock_lock(&fme
->lock
);
174 dperf
= get_fme_feature_ioaddr_by_index(fme
,
175 FME_FEATURE_ID_GLOBAL_DPERF
);
176 ctl
.csr
= readq(&dperf
->fab_ctl
);
178 writeq(ctl
.csr
, &dperf
->fab_ctl
);
179 spinlock_unlock(&fme
->lock
);
184 #define PERF_MAX_PORT_NUM 1
186 static int fme_global_dperf_init(struct ifpga_feature
*feature
)
190 dev_info(NULL
, "FME global_dperf Init.\n");
195 static void fme_global_dperf_uinit(struct ifpga_feature
*feature
)
199 dev_info(NULL
, "FME global_dperf UInit.\n");
202 static int fme_dperf_fab_get_prop(struct ifpga_feature
*feature
,
203 struct feature_prop
*prop
)
205 struct ifpga_fme_hw
*fme
= feature
->parent
;
206 u8 sub
= GET_FIELD(PROP_SUB
, prop
->prop_id
);
207 u16 id
= GET_FIELD(PROP_ID
, prop
->prop_id
);
210 case 0x1: /* FREEZE */
211 return fme_dperf_get_fab_freeze(fme
, &prop
->data
);
212 case 0x2: /* PCIE0_READ */
213 return fme_dperf_get_fab_port_pcie0_read(fme
, sub
, &prop
->data
);
214 case 0x3: /* PCIE0_WRITE */
215 return fme_dperf_get_fab_port_pcie0_write(fme
, sub
,
217 case 0x4: /* MMIO_READ */
218 return fme_dperf_get_fab_port_mmio_read(fme
, sub
, &prop
->data
);
219 case 0x5: /* MMIO_WRITE */
220 return fme_dperf_get_fab_port_mmio_write(fme
, sub
, &prop
->data
);
221 case 0x6: /* ENABLE */
222 return fme_dperf_get_fab_port_enable(fme
, sub
, &prop
->data
);
228 static int fme_dperf_root_get_prop(struct ifpga_feature
*feature
,
229 struct feature_prop
*prop
)
231 struct ifpga_fme_hw
*fme
= feature
->parent
;
232 u8 sub
= GET_FIELD(PROP_SUB
, prop
->prop_id
);
233 u16 id
= GET_FIELD(PROP_ID
, prop
->prop_id
);
235 if (sub
!= PERF_PROP_SUB_UNUSED
)
239 case 0x1: /* CLOCK */
240 return fme_dperf_get_clock(fme
, &prop
->data
);
241 case 0x2: /* REVISION */
242 return fme_dperf_get_revision(fme
, &prop
->data
);
248 static int fme_global_dperf_get_prop(struct ifpga_feature
*feature
,
249 struct feature_prop
*prop
)
251 u8 top
= GET_FIELD(PROP_TOP
, prop
->prop_id
);
254 case PERF_PROP_TOP_FAB
:
255 return fme_dperf_fab_get_prop(feature
, prop
);
256 case PERF_PROP_TOP_UNUSED
:
257 return fme_dperf_root_get_prop(feature
, prop
);
263 static int fme_dperf_fab_set_prop(struct ifpga_feature
*feature
,
264 struct feature_prop
*prop
)
266 struct ifpga_fme_hw
*fme
= feature
->parent
;
267 u8 sub
= GET_FIELD(PROP_SUB
, prop
->prop_id
);
268 u16 id
= GET_FIELD(PROP_ID
, prop
->prop_id
);
271 case 0x1: /* FREEZE - fab root only prop */
272 if (sub
!= PERF_PROP_SUB_UNUSED
)
274 return fme_dperf_set_fab_freeze(fme
, prop
->data
);
275 case 0x6: /* ENABLE - fab both root and sub */
276 return fme_dperf_set_fab_port_enable(fme
, sub
, prop
->data
);
282 static int fme_global_dperf_set_prop(struct ifpga_feature
*feature
,
283 struct feature_prop
*prop
)
285 u8 top
= GET_FIELD(PROP_TOP
, prop
->prop_id
);
288 case PERF_PROP_TOP_FAB
:
289 return fme_dperf_fab_set_prop(feature
, prop
);
295 struct ifpga_feature_ops fme_global_dperf_ops
= {
296 .init
= fme_global_dperf_init
,
297 .uinit
= fme_global_dperf_uinit
,
298 .get_prop
= fme_global_dperf_get_prop
,
299 .set_prop
= fme_global_dperf_set_prop
,