2 * Copyright 2011,2016 Freescale Semiconductor, Inc.
3 * Copyright 2011 Linaro Ltd.
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
13 #include <linux/hrtimer.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
17 #include <linux/module.h>
19 #include <linux/of_address.h>
20 #include <linux/of_device.h>
21 #include <linux/perf_event.h>
22 #include <linux/slab.h>
26 #define MMDC_MAPSR 0x404
27 #define BP_MMDC_MAPSR_PSD 0
28 #define BP_MMDC_MAPSR_PSS 4
30 #define MMDC_MDMISC 0x18
31 #define BM_MMDC_MDMISC_DDR_TYPE 0x18
32 #define BP_MMDC_MDMISC_DDR_TYPE 0x3
34 #define TOTAL_CYCLES 0x0
35 #define BUSY_CYCLES 0x1
36 #define READ_ACCESSES 0x2
37 #define WRITE_ACCESSES 0x3
38 #define READ_BYTES 0x4
39 #define WRITE_BYTES 0x5
41 /* Enables, resets, freezes, overflow profiling*/
47 #define PROFILE_SEL 0x10
49 #define MMDC_MADPCR0 0x410
50 #define MMDC_MADPSR0 0x418
51 #define MMDC_MADPSR1 0x41C
52 #define MMDC_MADPSR2 0x420
53 #define MMDC_MADPSR3 0x424
54 #define MMDC_MADPSR4 0x428
55 #define MMDC_MADPSR5 0x42C
57 #define MMDC_NUM_COUNTERS 6
59 #define MMDC_FLAG_PROFILE_SEL 0x1
61 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
65 struct fsl_mmdc_devtype_data
{
69 static const struct fsl_mmdc_devtype_data imx6q_data
= {
72 static const struct fsl_mmdc_devtype_data imx6qp_data
= {
73 .flags
= MMDC_FLAG_PROFILE_SEL
,
76 static const struct of_device_id imx_mmdc_dt_ids
[] = {
77 { .compatible
= "fsl,imx6q-mmdc", .data
= (void *)&imx6q_data
},
78 { .compatible
= "fsl,imx6qp-mmdc", .data
= (void *)&imx6qp_data
},
82 #ifdef CONFIG_PERF_EVENTS
84 static DEFINE_IDA(mmdc_ida
);
86 PMU_EVENT_ATTR_STRING(total
-cycles
, mmdc_pmu_total_cycles
, "event=0x00")
87 PMU_EVENT_ATTR_STRING(busy
-cycles
, mmdc_pmu_busy_cycles
, "event=0x01")
88 PMU_EVENT_ATTR_STRING(read
-accesses
, mmdc_pmu_read_accesses
, "event=0x02")
89 PMU_EVENT_ATTR_STRING(write
-accesses
, mmdc_pmu_write_accesses
, "config=0x03")
90 PMU_EVENT_ATTR_STRING(read
-bytes
, mmdc_pmu_read_bytes
, "event=0x04")
91 PMU_EVENT_ATTR_STRING(read
-bytes
.unit
, mmdc_pmu_read_bytes_unit
, "MB");
92 PMU_EVENT_ATTR_STRING(read
-bytes
.scale
, mmdc_pmu_read_bytes_scale
, "0.000001");
93 PMU_EVENT_ATTR_STRING(write
-bytes
, mmdc_pmu_write_bytes
, "event=0x05")
94 PMU_EVENT_ATTR_STRING(write
-bytes
.unit
, mmdc_pmu_write_bytes_unit
, "MB");
95 PMU_EVENT_ATTR_STRING(write
-bytes
.scale
, mmdc_pmu_write_bytes_scale
, "0.000001");
99 void __iomem
*mmdc_base
;
101 struct hrtimer hrtimer
;
102 unsigned int active_events
;
104 struct perf_event
*mmdc_events
[MMDC_NUM_COUNTERS
];
105 struct hlist_node node
;
106 struct fsl_mmdc_devtype_data
*devtype_data
;
110 * Polling period is set to one second, overflow of total-cycles (the fastest
111 * increasing counter) takes ten seconds so one second is safe
113 static unsigned int mmdc_pmu_poll_period_us
= 1000000;
115 module_param_named(pmu_pmu_poll_period_us
, mmdc_pmu_poll_period_us
, uint
,
118 static ktime_t
mmdc_pmu_timer_period(void)
120 return ns_to_ktime((u64
)mmdc_pmu_poll_period_us
* 1000);
123 static ssize_t
mmdc_pmu_cpumask_show(struct device
*dev
,
124 struct device_attribute
*attr
, char *buf
)
126 struct mmdc_pmu
*pmu_mmdc
= dev_get_drvdata(dev
);
128 return cpumap_print_to_pagebuf(true, buf
, &pmu_mmdc
->cpu
);
131 static struct device_attribute mmdc_pmu_cpumask_attr
=
132 __ATTR(cpumask
, S_IRUGO
, mmdc_pmu_cpumask_show
, NULL
);
134 static struct attribute
*mmdc_pmu_cpumask_attrs
[] = {
135 &mmdc_pmu_cpumask_attr
.attr
,
139 static struct attribute_group mmdc_pmu_cpumask_attr_group
= {
140 .attrs
= mmdc_pmu_cpumask_attrs
,
143 static struct attribute
*mmdc_pmu_events_attrs
[] = {
144 &mmdc_pmu_total_cycles
.attr
.attr
,
145 &mmdc_pmu_busy_cycles
.attr
.attr
,
146 &mmdc_pmu_read_accesses
.attr
.attr
,
147 &mmdc_pmu_write_accesses
.attr
.attr
,
148 &mmdc_pmu_read_bytes
.attr
.attr
,
149 &mmdc_pmu_read_bytes_unit
.attr
.attr
,
150 &mmdc_pmu_read_bytes_scale
.attr
.attr
,
151 &mmdc_pmu_write_bytes
.attr
.attr
,
152 &mmdc_pmu_write_bytes_unit
.attr
.attr
,
153 &mmdc_pmu_write_bytes_scale
.attr
.attr
,
157 static struct attribute_group mmdc_pmu_events_attr_group
= {
159 .attrs
= mmdc_pmu_events_attrs
,
162 PMU_FORMAT_ATTR(event
, "config:0-63");
163 static struct attribute
*mmdc_pmu_format_attrs
[] = {
164 &format_attr_event
.attr
,
168 static struct attribute_group mmdc_pmu_format_attr_group
= {
170 .attrs
= mmdc_pmu_format_attrs
,
173 static const struct attribute_group
*attr_groups
[] = {
174 &mmdc_pmu_events_attr_group
,
175 &mmdc_pmu_format_attr_group
,
176 &mmdc_pmu_cpumask_attr_group
,
180 static u32
mmdc_pmu_read_counter(struct mmdc_pmu
*pmu_mmdc
, int cfg
)
182 void __iomem
*mmdc_base
, *reg
;
184 mmdc_base
= pmu_mmdc
->mmdc_base
;
188 reg
= mmdc_base
+ MMDC_MADPSR0
;
191 reg
= mmdc_base
+ MMDC_MADPSR1
;
194 reg
= mmdc_base
+ MMDC_MADPSR2
;
197 reg
= mmdc_base
+ MMDC_MADPSR3
;
200 reg
= mmdc_base
+ MMDC_MADPSR4
;
203 reg
= mmdc_base
+ MMDC_MADPSR5
;
207 "invalid configuration %d for mmdc counter", cfg
);
212 static int mmdc_pmu_offline_cpu(unsigned int cpu
, struct hlist_node
*node
)
214 struct mmdc_pmu
*pmu_mmdc
= hlist_entry_safe(node
, struct mmdc_pmu
, node
);
217 if (!cpumask_test_and_clear_cpu(cpu
, &pmu_mmdc
->cpu
))
220 target
= cpumask_any_but(cpu_online_mask
, cpu
);
221 if (target
>= nr_cpu_ids
)
224 perf_pmu_migrate_context(&pmu_mmdc
->pmu
, cpu
, target
);
225 cpumask_set_cpu(target
, &pmu_mmdc
->cpu
);
230 static bool mmdc_pmu_group_event_is_valid(struct perf_event
*event
,
232 unsigned long *used_counters
)
234 int cfg
= event
->attr
.config
;
236 if (is_software_event(event
))
239 if (event
->pmu
!= pmu
)
242 return !test_and_set_bit(cfg
, used_counters
);
246 * Each event has a single fixed-purpose counter, so we can only have a
247 * single active event for each at any point in time. Here we just check
248 * for duplicates, and rely on mmdc_pmu_event_init to verify that the HW
249 * event numbers are valid.
251 static bool mmdc_pmu_group_is_valid(struct perf_event
*event
)
253 struct pmu
*pmu
= event
->pmu
;
254 struct perf_event
*leader
= event
->group_leader
;
255 struct perf_event
*sibling
;
256 unsigned long counter_mask
= 0;
258 set_bit(leader
->attr
.config
, &counter_mask
);
260 if (event
!= leader
) {
261 if (!mmdc_pmu_group_event_is_valid(event
, pmu
, &counter_mask
))
265 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
266 if (!mmdc_pmu_group_event_is_valid(sibling
, pmu
, &counter_mask
))
273 static int mmdc_pmu_event_init(struct perf_event
*event
)
275 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
276 int cfg
= event
->attr
.config
;
278 if (event
->attr
.type
!= event
->pmu
->type
)
281 if (is_sampling_event(event
) || event
->attach_state
& PERF_ATTACH_TASK
)
284 if (event
->cpu
< 0) {
285 dev_warn(pmu_mmdc
->dev
, "Can't provide per-task data!\n");
289 if (event
->attr
.exclude_user
||
290 event
->attr
.exclude_kernel
||
291 event
->attr
.exclude_hv
||
292 event
->attr
.exclude_idle
||
293 event
->attr
.exclude_host
||
294 event
->attr
.exclude_guest
||
295 event
->attr
.sample_period
)
298 if (cfg
< 0 || cfg
>= MMDC_NUM_COUNTERS
)
301 if (!mmdc_pmu_group_is_valid(event
))
304 event
->cpu
= cpumask_first(&pmu_mmdc
->cpu
);
308 static void mmdc_pmu_event_update(struct perf_event
*event
)
310 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
311 struct hw_perf_event
*hwc
= &event
->hw
;
312 u64 delta
, prev_raw_count
, new_raw_count
;
315 prev_raw_count
= local64_read(&hwc
->prev_count
);
316 new_raw_count
= mmdc_pmu_read_counter(pmu_mmdc
,
318 } while (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
319 new_raw_count
) != prev_raw_count
);
321 delta
= (new_raw_count
- prev_raw_count
) & 0xFFFFFFFF;
323 local64_add(delta
, &event
->count
);
326 static void mmdc_pmu_event_start(struct perf_event
*event
, int flags
)
328 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
329 struct hw_perf_event
*hwc
= &event
->hw
;
330 void __iomem
*mmdc_base
, *reg
;
333 mmdc_base
= pmu_mmdc
->mmdc_base
;
334 reg
= mmdc_base
+ MMDC_MADPCR0
;
337 * hrtimer is required because mmdc does not provide an interrupt so
338 * polling is necessary
340 hrtimer_start(&pmu_mmdc
->hrtimer
, mmdc_pmu_timer_period(),
341 HRTIMER_MODE_REL_PINNED
);
343 local64_set(&hwc
->prev_count
, 0);
345 writel(DBG_RST
, reg
);
348 if (pmu_mmdc
->devtype_data
->flags
& MMDC_FLAG_PROFILE_SEL
)
354 static int mmdc_pmu_event_add(struct perf_event
*event
, int flags
)
356 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
357 struct hw_perf_event
*hwc
= &event
->hw
;
359 int cfg
= event
->attr
.config
;
361 if (flags
& PERF_EF_START
)
362 mmdc_pmu_event_start(event
, flags
);
364 if (pmu_mmdc
->mmdc_events
[cfg
] != NULL
)
367 pmu_mmdc
->mmdc_events
[cfg
] = event
;
368 pmu_mmdc
->active_events
++;
370 local64_set(&hwc
->prev_count
, mmdc_pmu_read_counter(pmu_mmdc
, cfg
));
375 static void mmdc_pmu_event_stop(struct perf_event
*event
, int flags
)
377 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
378 void __iomem
*mmdc_base
, *reg
;
380 mmdc_base
= pmu_mmdc
->mmdc_base
;
381 reg
= mmdc_base
+ MMDC_MADPCR0
;
383 writel(PRF_FRZ
, reg
);
384 mmdc_pmu_event_update(event
);
387 static void mmdc_pmu_event_del(struct perf_event
*event
, int flags
)
389 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
390 int cfg
= event
->attr
.config
;
392 pmu_mmdc
->mmdc_events
[cfg
] = NULL
;
393 pmu_mmdc
->active_events
--;
395 if (pmu_mmdc
->active_events
== 0)
396 hrtimer_cancel(&pmu_mmdc
->hrtimer
);
398 mmdc_pmu_event_stop(event
, PERF_EF_UPDATE
);
401 static void mmdc_pmu_overflow_handler(struct mmdc_pmu
*pmu_mmdc
)
405 for (i
= 0; i
< MMDC_NUM_COUNTERS
; i
++) {
406 struct perf_event
*event
= pmu_mmdc
->mmdc_events
[i
];
409 mmdc_pmu_event_update(event
);
413 static enum hrtimer_restart
mmdc_pmu_timer_handler(struct hrtimer
*hrtimer
)
415 struct mmdc_pmu
*pmu_mmdc
= container_of(hrtimer
, struct mmdc_pmu
,
418 mmdc_pmu_overflow_handler(pmu_mmdc
);
419 hrtimer_forward_now(hrtimer
, mmdc_pmu_timer_period());
421 return HRTIMER_RESTART
;
424 static int mmdc_pmu_init(struct mmdc_pmu
*pmu_mmdc
,
425 void __iomem
*mmdc_base
, struct device
*dev
)
429 *pmu_mmdc
= (struct mmdc_pmu
) {
430 .pmu
= (struct pmu
) {
431 .task_ctx_nr
= perf_invalid_context
,
432 .attr_groups
= attr_groups
,
433 .event_init
= mmdc_pmu_event_init
,
434 .add
= mmdc_pmu_event_add
,
435 .del
= mmdc_pmu_event_del
,
436 .start
= mmdc_pmu_event_start
,
437 .stop
= mmdc_pmu_event_stop
,
438 .read
= mmdc_pmu_event_update
,
440 .mmdc_base
= mmdc_base
,
445 mmdc_num
= ida_simple_get(&mmdc_ida
, 0, 0, GFP_KERNEL
);
450 static int imx_mmdc_remove(struct platform_device
*pdev
)
452 struct mmdc_pmu
*pmu_mmdc
= platform_get_drvdata(pdev
);
454 perf_pmu_unregister(&pmu_mmdc
->pmu
);
455 cpuhp_remove_state_nocalls(CPUHP_ONLINE
);
460 static int imx_mmdc_perf_init(struct platform_device
*pdev
, void __iomem
*mmdc_base
)
462 struct mmdc_pmu
*pmu_mmdc
;
466 const struct of_device_id
*of_id
=
467 of_match_device(imx_mmdc_dt_ids
, &pdev
->dev
);
469 pmu_mmdc
= kzalloc(sizeof(*pmu_mmdc
), GFP_KERNEL
);
471 pr_err("failed to allocate PMU device!\n");
475 mmdc_num
= mmdc_pmu_init(pmu_mmdc
, mmdc_base
, &pdev
->dev
);
479 name
= devm_kasprintf(&pdev
->dev
,
480 GFP_KERNEL
, "mmdc%d", mmdc_num
);
482 pmu_mmdc
->devtype_data
= (struct fsl_mmdc_devtype_data
*)of_id
->data
;
484 hrtimer_init(&pmu_mmdc
->hrtimer
, CLOCK_MONOTONIC
,
486 pmu_mmdc
->hrtimer
.function
= mmdc_pmu_timer_handler
;
488 cpuhp_state_add_instance_nocalls(CPUHP_ONLINE
,
490 cpumask_set_cpu(smp_processor_id(), &pmu_mmdc
->cpu
);
491 ret
= cpuhp_setup_state_multi(CPUHP_AP_NOTIFY_ONLINE
,
493 mmdc_pmu_offline_cpu
);
495 pr_err("cpuhp_setup_state_multi failure\n");
496 goto pmu_register_err
;
499 ret
= perf_pmu_register(&(pmu_mmdc
->pmu
), name
, -1);
500 platform_set_drvdata(pdev
, pmu_mmdc
);
502 goto pmu_register_err
;
506 pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret
);
507 hrtimer_cancel(&pmu_mmdc
->hrtimer
);
513 #define imx_mmdc_remove NULL
514 #define imx_mmdc_perf_init(pdev, mmdc_base) 0
517 static int imx_mmdc_probe(struct platform_device
*pdev
)
519 struct device_node
*np
= pdev
->dev
.of_node
;
520 void __iomem
*mmdc_base
, *reg
;
524 mmdc_base
= of_iomap(np
, 0);
527 reg
= mmdc_base
+ MMDC_MDMISC
;
529 val
= readl_relaxed(reg
);
530 ddr_type
= (val
& BM_MMDC_MDMISC_DDR_TYPE
) >>
531 BP_MMDC_MDMISC_DDR_TYPE
;
533 reg
= mmdc_base
+ MMDC_MAPSR
;
535 /* Enable automatic power saving */
536 val
= readl_relaxed(reg
);
537 val
&= ~(1 << BP_MMDC_MAPSR_PSD
);
538 writel_relaxed(val
, reg
);
540 /* Ensure it's successfully enabled */
541 while (!(readl_relaxed(reg
) & 1 << BP_MMDC_MAPSR_PSS
) && --timeout
)
544 if (unlikely(!timeout
)) {
545 pr_warn("%s: failed to enable automatic power saving\n",
550 return imx_mmdc_perf_init(pdev
, mmdc_base
);
553 int imx_mmdc_get_ddr_type(void)
558 static struct platform_driver imx_mmdc_driver
= {
561 .of_match_table
= imx_mmdc_dt_ids
,
563 .probe
= imx_mmdc_probe
,
564 .remove
= imx_mmdc_remove
,
567 static int __init
imx_mmdc_init(void)
569 return platform_driver_register(&imx_mmdc_driver
);
571 postcore_initcall(imx_mmdc_init
);