1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
3 #include <linux/numa.h>
4 #include <linux/slab.h>
5 #include <linux/rculist.h>
6 #include <linux/threads.h>
7 #include <linux/preempt.h>
8 #include <linux/irqflags.h>
9 #include <linux/vmalloc.h>
11 #include <linux/module.h>
12 #include <linux/device-mapper.h>
17 #define DM_MSG_PREFIX "stats"
19 static int dm_stat_need_rcu_barrier
;
22 * Using 64-bit values to avoid overflow (which is a
23 * problem that block/genhd.c's IO accounting has).
25 struct dm_stat_percpu
{
26 unsigned long long sectors
[2];
27 unsigned long long ios
[2];
28 unsigned long long merges
[2];
29 unsigned long long ticks
[2];
30 unsigned long long io_ticks
[2];
31 unsigned long long io_ticks_total
;
32 unsigned long long time_in_queue
;
33 unsigned long long *histogram
;
36 struct dm_stat_shared
{
37 atomic_t in_flight
[2];
38 unsigned long long stamp
;
39 struct dm_stat_percpu tmp
;
43 struct list_head list_entry
;
50 unsigned n_histogram_entries
;
51 unsigned long long *histogram_boundaries
;
52 const char *program_id
;
54 struct rcu_head rcu_head
;
55 size_t shared_alloc_size
;
56 size_t percpu_alloc_size
;
57 size_t histogram_alloc_size
;
58 struct dm_stat_percpu
*stat_percpu
[NR_CPUS
];
59 struct dm_stat_shared stat_shared
[];
62 #define STAT_PRECISE_TIMESTAMPS 1
64 struct dm_stats_last_position
{
70 * A typo on the command line could possibly make the kernel run out of memory
71 * and crash. To prevent the crash we account all used memory. We fail if we
72 * exhaust 1/4 of all memory or 1/2 of vmalloc space.
74 #define DM_STATS_MEMORY_FACTOR 4
75 #define DM_STATS_VMALLOC_FACTOR 2
77 static DEFINE_SPINLOCK(shared_memory_lock
);
79 static unsigned long shared_memory_amount
;
81 static bool __check_shared_memory(size_t alloc_size
)
85 a
= shared_memory_amount
+ alloc_size
;
86 if (a
< shared_memory_amount
)
88 if (a
>> PAGE_SHIFT
> totalram_pages() / DM_STATS_MEMORY_FACTOR
)
91 if (a
> (VMALLOC_END
- VMALLOC_START
) / DM_STATS_VMALLOC_FACTOR
)
97 static bool check_shared_memory(size_t alloc_size
)
101 spin_lock_irq(&shared_memory_lock
);
103 ret
= __check_shared_memory(alloc_size
);
105 spin_unlock_irq(&shared_memory_lock
);
110 static bool claim_shared_memory(size_t alloc_size
)
112 spin_lock_irq(&shared_memory_lock
);
114 if (!__check_shared_memory(alloc_size
)) {
115 spin_unlock_irq(&shared_memory_lock
);
119 shared_memory_amount
+= alloc_size
;
121 spin_unlock_irq(&shared_memory_lock
);
126 static void free_shared_memory(size_t alloc_size
)
130 spin_lock_irqsave(&shared_memory_lock
, flags
);
132 if (WARN_ON_ONCE(shared_memory_amount
< alloc_size
)) {
133 spin_unlock_irqrestore(&shared_memory_lock
, flags
);
134 DMCRIT("Memory usage accounting bug.");
138 shared_memory_amount
-= alloc_size
;
140 spin_unlock_irqrestore(&shared_memory_lock
, flags
);
143 static void *dm_kvzalloc(size_t alloc_size
, int node
)
147 if (!claim_shared_memory(alloc_size
))
150 p
= kvzalloc_node(alloc_size
, GFP_KERNEL
| __GFP_NOMEMALLOC
, node
);
154 free_shared_memory(alloc_size
);
159 static void dm_kvfree(void *ptr
, size_t alloc_size
)
164 free_shared_memory(alloc_size
);
169 static void dm_stat_free(struct rcu_head
*head
)
172 struct dm_stat
*s
= container_of(head
, struct dm_stat
, rcu_head
);
174 kfree(s
->histogram_boundaries
);
175 kfree(s
->program_id
);
177 for_each_possible_cpu(cpu
) {
178 dm_kvfree(s
->stat_percpu
[cpu
][0].histogram
, s
->histogram_alloc_size
);
179 dm_kvfree(s
->stat_percpu
[cpu
], s
->percpu_alloc_size
);
181 dm_kvfree(s
->stat_shared
[0].tmp
.histogram
, s
->histogram_alloc_size
);
182 dm_kvfree(s
, s
->shared_alloc_size
);
185 static int dm_stat_in_flight(struct dm_stat_shared
*shared
)
187 return atomic_read(&shared
->in_flight
[READ
]) +
188 atomic_read(&shared
->in_flight
[WRITE
]);
191 void dm_stats_init(struct dm_stats
*stats
)
194 struct dm_stats_last_position
*last
;
196 mutex_init(&stats
->mutex
);
197 INIT_LIST_HEAD(&stats
->list
);
198 stats
->precise_timestamps
= false;
199 stats
->last
= alloc_percpu(struct dm_stats_last_position
);
200 for_each_possible_cpu(cpu
) {
201 last
= per_cpu_ptr(stats
->last
, cpu
);
202 last
->last_sector
= (sector_t
)ULLONG_MAX
;
203 last
->last_rw
= UINT_MAX
;
207 void dm_stats_cleanup(struct dm_stats
*stats
)
211 struct dm_stat_shared
*shared
;
213 while (!list_empty(&stats
->list
)) {
214 s
= container_of(stats
->list
.next
, struct dm_stat
, list_entry
);
215 list_del(&s
->list_entry
);
216 for (ni
= 0; ni
< s
->n_entries
; ni
++) {
217 shared
= &s
->stat_shared
[ni
];
218 if (WARN_ON(dm_stat_in_flight(shared
))) {
219 DMCRIT("leaked in-flight counter at index %lu "
220 "(start %llu, end %llu, step %llu): reads %d, writes %d",
222 (unsigned long long)s
->start
,
223 (unsigned long long)s
->end
,
224 (unsigned long long)s
->step
,
225 atomic_read(&shared
->in_flight
[READ
]),
226 atomic_read(&shared
->in_flight
[WRITE
]));
230 dm_stat_free(&s
->rcu_head
);
232 free_percpu(stats
->last
);
233 mutex_destroy(&stats
->mutex
);
236 static void dm_stats_recalc_precise_timestamps(struct dm_stats
*stats
)
239 struct dm_stat
*tmp_s
;
240 bool precise_timestamps
= false;
242 list_for_each(l
, &stats
->list
) {
243 tmp_s
= container_of(l
, struct dm_stat
, list_entry
);
244 if (tmp_s
->stat_flags
& STAT_PRECISE_TIMESTAMPS
) {
245 precise_timestamps
= true;
249 stats
->precise_timestamps
= precise_timestamps
;
252 static int dm_stats_create(struct dm_stats
*stats
, sector_t start
, sector_t end
,
253 sector_t step
, unsigned stat_flags
,
254 unsigned n_histogram_entries
,
255 unsigned long long *histogram_boundaries
,
256 const char *program_id
, const char *aux_data
,
257 void (*suspend_callback
)(struct mapped_device
*),
258 void (*resume_callback
)(struct mapped_device
*),
259 struct mapped_device
*md
)
262 struct dm_stat
*s
, *tmp_s
;
265 size_t shared_alloc_size
;
266 size_t percpu_alloc_size
;
267 size_t histogram_alloc_size
;
268 struct dm_stat_percpu
*p
;
273 if (end
< start
|| !step
)
276 n_entries
= end
- start
;
277 if (dm_sector_div64(n_entries
, step
))
280 if (n_entries
!= (size_t)n_entries
|| !(size_t)(n_entries
+ 1))
283 shared_alloc_size
= struct_size(s
, stat_shared
, n_entries
);
284 if ((shared_alloc_size
- sizeof(struct dm_stat
)) / sizeof(struct dm_stat_shared
) != n_entries
)
287 percpu_alloc_size
= (size_t)n_entries
* sizeof(struct dm_stat_percpu
);
288 if (percpu_alloc_size
/ sizeof(struct dm_stat_percpu
) != n_entries
)
291 histogram_alloc_size
= (n_histogram_entries
+ 1) * (size_t)n_entries
* sizeof(unsigned long long);
292 if (histogram_alloc_size
/ (n_histogram_entries
+ 1) != (size_t)n_entries
* sizeof(unsigned long long))
295 if (!check_shared_memory(shared_alloc_size
+ histogram_alloc_size
+
296 num_possible_cpus() * (percpu_alloc_size
+ histogram_alloc_size
)))
299 s
= dm_kvzalloc(shared_alloc_size
, NUMA_NO_NODE
);
303 s
->stat_flags
= stat_flags
;
304 s
->n_entries
= n_entries
;
308 s
->shared_alloc_size
= shared_alloc_size
;
309 s
->percpu_alloc_size
= percpu_alloc_size
;
310 s
->histogram_alloc_size
= histogram_alloc_size
;
312 s
->n_histogram_entries
= n_histogram_entries
;
313 s
->histogram_boundaries
= kmemdup(histogram_boundaries
,
314 s
->n_histogram_entries
* sizeof(unsigned long long), GFP_KERNEL
);
315 if (!s
->histogram_boundaries
) {
320 s
->program_id
= kstrdup(program_id
, GFP_KERNEL
);
321 if (!s
->program_id
) {
325 s
->aux_data
= kstrdup(aux_data
, GFP_KERNEL
);
331 for (ni
= 0; ni
< n_entries
; ni
++) {
332 atomic_set(&s
->stat_shared
[ni
].in_flight
[READ
], 0);
333 atomic_set(&s
->stat_shared
[ni
].in_flight
[WRITE
], 0);
337 if (s
->n_histogram_entries
) {
338 unsigned long long *hi
;
339 hi
= dm_kvzalloc(s
->histogram_alloc_size
, NUMA_NO_NODE
);
344 for (ni
= 0; ni
< n_entries
; ni
++) {
345 s
->stat_shared
[ni
].tmp
.histogram
= hi
;
346 hi
+= s
->n_histogram_entries
+ 1;
351 for_each_possible_cpu(cpu
) {
352 p
= dm_kvzalloc(percpu_alloc_size
, cpu_to_node(cpu
));
357 s
->stat_percpu
[cpu
] = p
;
358 if (s
->n_histogram_entries
) {
359 unsigned long long *hi
;
360 hi
= dm_kvzalloc(s
->histogram_alloc_size
, cpu_to_node(cpu
));
365 for (ni
= 0; ni
< n_entries
; ni
++) {
366 p
[ni
].histogram
= hi
;
367 hi
+= s
->n_histogram_entries
+ 1;
374 * Suspend/resume to make sure there is no i/o in flight,
375 * so that newly created statistics will be exact.
377 * (note: we couldn't suspend earlier because we must not
378 * allocate memory while suspended)
380 suspend_callback(md
);
382 mutex_lock(&stats
->mutex
);
384 list_for_each(l
, &stats
->list
) {
385 tmp_s
= container_of(l
, struct dm_stat
, list_entry
);
386 if (WARN_ON(tmp_s
->id
< s
->id
)) {
388 goto out_unlock_resume
;
390 if (tmp_s
->id
> s
->id
)
392 if (unlikely(s
->id
== INT_MAX
)) {
394 goto out_unlock_resume
;
399 list_add_tail_rcu(&s
->list_entry
, l
);
401 dm_stats_recalc_precise_timestamps(stats
);
403 mutex_unlock(&stats
->mutex
);
410 mutex_unlock(&stats
->mutex
);
413 dm_stat_free(&s
->rcu_head
);
417 static struct dm_stat
*__dm_stats_find(struct dm_stats
*stats
, int id
)
421 list_for_each_entry(s
, &stats
->list
, list_entry
) {
431 static int dm_stats_delete(struct dm_stats
*stats
, int id
)
436 mutex_lock(&stats
->mutex
);
438 s
= __dm_stats_find(stats
, id
);
440 mutex_unlock(&stats
->mutex
);
444 list_del_rcu(&s
->list_entry
);
446 dm_stats_recalc_precise_timestamps(stats
);
448 mutex_unlock(&stats
->mutex
);
451 * vfree can't be called from RCU callback
453 for_each_possible_cpu(cpu
)
454 if (is_vmalloc_addr(s
->stat_percpu
) ||
455 is_vmalloc_addr(s
->stat_percpu
[cpu
][0].histogram
))
457 if (is_vmalloc_addr(s
) ||
458 is_vmalloc_addr(s
->stat_shared
[0].tmp
.histogram
)) {
460 synchronize_rcu_expedited();
461 dm_stat_free(&s
->rcu_head
);
463 WRITE_ONCE(dm_stat_need_rcu_barrier
, 1);
464 call_rcu(&s
->rcu_head
, dm_stat_free
);
469 static int dm_stats_list(struct dm_stats
*stats
, const char *program
,
470 char *result
, unsigned maxlen
)
478 * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
481 mutex_lock(&stats
->mutex
);
482 list_for_each_entry(s
, &stats
->list
, list_entry
) {
483 if (!program
|| !strcmp(program
, s
->program_id
)) {
484 len
= s
->end
- s
->start
;
485 DMEMIT("%d: %llu+%llu %llu %s %s", s
->id
,
486 (unsigned long long)s
->start
,
487 (unsigned long long)len
,
488 (unsigned long long)s
->step
,
491 if (s
->stat_flags
& STAT_PRECISE_TIMESTAMPS
)
492 DMEMIT(" precise_timestamps");
493 if (s
->n_histogram_entries
) {
495 DMEMIT(" histogram:");
496 for (i
= 0; i
< s
->n_histogram_entries
; i
++) {
499 DMEMIT("%llu", s
->histogram_boundaries
[i
]);
506 mutex_unlock(&stats
->mutex
);
511 static void dm_stat_round(struct dm_stat
*s
, struct dm_stat_shared
*shared
,
512 struct dm_stat_percpu
*p
)
515 * This is racy, but so is part_round_stats_single.
517 unsigned long long now
, difference
;
518 unsigned in_flight_read
, in_flight_write
;
520 if (likely(!(s
->stat_flags
& STAT_PRECISE_TIMESTAMPS
)))
523 now
= ktime_to_ns(ktime_get());
525 difference
= now
- shared
->stamp
;
529 in_flight_read
= (unsigned)atomic_read(&shared
->in_flight
[READ
]);
530 in_flight_write
= (unsigned)atomic_read(&shared
->in_flight
[WRITE
]);
532 p
->io_ticks
[READ
] += difference
;
534 p
->io_ticks
[WRITE
] += difference
;
535 if (in_flight_read
+ in_flight_write
) {
536 p
->io_ticks_total
+= difference
;
537 p
->time_in_queue
+= (in_flight_read
+ in_flight_write
) * difference
;
542 static void dm_stat_for_entry(struct dm_stat
*s
, size_t entry
,
543 int idx
, sector_t len
,
544 struct dm_stats_aux
*stats_aux
, bool end
,
545 unsigned long duration_jiffies
)
547 struct dm_stat_shared
*shared
= &s
->stat_shared
[entry
];
548 struct dm_stat_percpu
*p
;
551 * For strict correctness we should use local_irq_save/restore
552 * instead of preempt_disable/enable.
554 * preempt_disable/enable is racy if the driver finishes bios
555 * from non-interrupt context as well as from interrupt context
556 * or from more different interrupts.
558 * On 64-bit architectures the race only results in not counting some
559 * events, so it is acceptable. On 32-bit architectures the race could
560 * cause the counter going off by 2^32, so we need to do proper locking
563 * part_stat_lock()/part_stat_unlock() have this race too.
565 #if BITS_PER_LONG == 32
567 local_irq_save(flags
);
571 p
= &s
->stat_percpu
[smp_processor_id()][entry
];
574 dm_stat_round(s
, shared
, p
);
575 atomic_inc(&shared
->in_flight
[idx
]);
577 unsigned long long duration
;
578 dm_stat_round(s
, shared
, p
);
579 atomic_dec(&shared
->in_flight
[idx
]);
580 p
->sectors
[idx
] += len
;
582 p
->merges
[idx
] += stats_aux
->merged
;
583 if (!(s
->stat_flags
& STAT_PRECISE_TIMESTAMPS
)) {
584 p
->ticks
[idx
] += duration_jiffies
;
585 duration
= jiffies_to_msecs(duration_jiffies
);
587 p
->ticks
[idx
] += stats_aux
->duration_ns
;
588 duration
= stats_aux
->duration_ns
;
590 if (s
->n_histogram_entries
) {
591 unsigned lo
= 0, hi
= s
->n_histogram_entries
+ 1;
592 while (lo
+ 1 < hi
) {
593 unsigned mid
= (lo
+ hi
) / 2;
594 if (s
->histogram_boundaries
[mid
- 1] > duration
) {
605 #if BITS_PER_LONG == 32
606 local_irq_restore(flags
);
612 static void __dm_stat_bio(struct dm_stat
*s
, int bi_rw
,
613 sector_t bi_sector
, sector_t end_sector
,
614 bool end
, unsigned long duration_jiffies
,
615 struct dm_stats_aux
*stats_aux
)
617 sector_t rel_sector
, offset
, todo
, fragment_len
;
620 if (end_sector
<= s
->start
|| bi_sector
>= s
->end
)
622 if (unlikely(bi_sector
< s
->start
)) {
624 todo
= end_sector
- s
->start
;
626 rel_sector
= bi_sector
- s
->start
;
627 todo
= end_sector
- bi_sector
;
629 if (unlikely(end_sector
> s
->end
))
630 todo
-= (end_sector
- s
->end
);
632 offset
= dm_sector_div64(rel_sector
, s
->step
);
635 if (WARN_ON_ONCE(entry
>= s
->n_entries
)) {
636 DMCRIT("Invalid area access in region id %d", s
->id
);
640 if (fragment_len
> s
->step
- offset
)
641 fragment_len
= s
->step
- offset
;
642 dm_stat_for_entry(s
, entry
, bi_rw
, fragment_len
,
643 stats_aux
, end
, duration_jiffies
);
644 todo
-= fragment_len
;
647 } while (unlikely(todo
!= 0));
650 void dm_stats_account_io(struct dm_stats
*stats
, unsigned long bi_rw
,
651 sector_t bi_sector
, unsigned bi_sectors
, bool end
,
652 unsigned long start_time
,
653 struct dm_stats_aux
*stats_aux
)
657 struct dm_stats_last_position
*last
;
658 bool got_precise_time
;
659 unsigned long duration_jiffies
= 0;
661 if (unlikely(!bi_sectors
))
664 end_sector
= bi_sector
+ bi_sectors
;
668 * A race condition can at worst result in the merged flag being
669 * misrepresented, so we don't have to disable preemption here.
671 last
= raw_cpu_ptr(stats
->last
);
673 (bi_sector
== (READ_ONCE(last
->last_sector
) &&
675 (READ_ONCE(last
->last_rw
) == WRITE
))
677 WRITE_ONCE(last
->last_sector
, end_sector
);
678 WRITE_ONCE(last
->last_rw
, bi_rw
);
680 duration_jiffies
= jiffies
- start_time
;
684 got_precise_time
= false;
685 list_for_each_entry_rcu(s
, &stats
->list
, list_entry
) {
686 if (s
->stat_flags
& STAT_PRECISE_TIMESTAMPS
&& !got_precise_time
) {
687 /* start (!end) duration_ns is set by DM core's alloc_io() */
689 stats_aux
->duration_ns
= ktime_to_ns(ktime_get()) - stats_aux
->duration_ns
;
690 got_precise_time
= true;
692 __dm_stat_bio(s
, bi_rw
, bi_sector
, end_sector
, end
, duration_jiffies
, stats_aux
);
698 static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared
*shared
,
699 struct dm_stat
*s
, size_t x
)
702 struct dm_stat_percpu
*p
;
705 p
= &s
->stat_percpu
[smp_processor_id()][x
];
706 dm_stat_round(s
, shared
, p
);
709 shared
->tmp
.sectors
[READ
] = 0;
710 shared
->tmp
.sectors
[WRITE
] = 0;
711 shared
->tmp
.ios
[READ
] = 0;
712 shared
->tmp
.ios
[WRITE
] = 0;
713 shared
->tmp
.merges
[READ
] = 0;
714 shared
->tmp
.merges
[WRITE
] = 0;
715 shared
->tmp
.ticks
[READ
] = 0;
716 shared
->tmp
.ticks
[WRITE
] = 0;
717 shared
->tmp
.io_ticks
[READ
] = 0;
718 shared
->tmp
.io_ticks
[WRITE
] = 0;
719 shared
->tmp
.io_ticks_total
= 0;
720 shared
->tmp
.time_in_queue
= 0;
722 if (s
->n_histogram_entries
)
723 memset(shared
->tmp
.histogram
, 0, (s
->n_histogram_entries
+ 1) * sizeof(unsigned long long));
725 for_each_possible_cpu(cpu
) {
726 p
= &s
->stat_percpu
[cpu
][x
];
727 shared
->tmp
.sectors
[READ
] += READ_ONCE(p
->sectors
[READ
]);
728 shared
->tmp
.sectors
[WRITE
] += READ_ONCE(p
->sectors
[WRITE
]);
729 shared
->tmp
.ios
[READ
] += READ_ONCE(p
->ios
[READ
]);
730 shared
->tmp
.ios
[WRITE
] += READ_ONCE(p
->ios
[WRITE
]);
731 shared
->tmp
.merges
[READ
] += READ_ONCE(p
->merges
[READ
]);
732 shared
->tmp
.merges
[WRITE
] += READ_ONCE(p
->merges
[WRITE
]);
733 shared
->tmp
.ticks
[READ
] += READ_ONCE(p
->ticks
[READ
]);
734 shared
->tmp
.ticks
[WRITE
] += READ_ONCE(p
->ticks
[WRITE
]);
735 shared
->tmp
.io_ticks
[READ
] += READ_ONCE(p
->io_ticks
[READ
]);
736 shared
->tmp
.io_ticks
[WRITE
] += READ_ONCE(p
->io_ticks
[WRITE
]);
737 shared
->tmp
.io_ticks_total
+= READ_ONCE(p
->io_ticks_total
);
738 shared
->tmp
.time_in_queue
+= READ_ONCE(p
->time_in_queue
);
739 if (s
->n_histogram_entries
) {
741 for (i
= 0; i
< s
->n_histogram_entries
+ 1; i
++)
742 shared
->tmp
.histogram
[i
] += READ_ONCE(p
->histogram
[i
]);
747 static void __dm_stat_clear(struct dm_stat
*s
, size_t idx_start
, size_t idx_end
,
748 bool init_tmp_percpu_totals
)
751 struct dm_stat_shared
*shared
;
752 struct dm_stat_percpu
*p
;
754 for (x
= idx_start
; x
< idx_end
; x
++) {
755 shared
= &s
->stat_shared
[x
];
756 if (init_tmp_percpu_totals
)
757 __dm_stat_init_temporary_percpu_totals(shared
, s
, x
);
759 p
= &s
->stat_percpu
[smp_processor_id()][x
];
760 p
->sectors
[READ
] -= shared
->tmp
.sectors
[READ
];
761 p
->sectors
[WRITE
] -= shared
->tmp
.sectors
[WRITE
];
762 p
->ios
[READ
] -= shared
->tmp
.ios
[READ
];
763 p
->ios
[WRITE
] -= shared
->tmp
.ios
[WRITE
];
764 p
->merges
[READ
] -= shared
->tmp
.merges
[READ
];
765 p
->merges
[WRITE
] -= shared
->tmp
.merges
[WRITE
];
766 p
->ticks
[READ
] -= shared
->tmp
.ticks
[READ
];
767 p
->ticks
[WRITE
] -= shared
->tmp
.ticks
[WRITE
];
768 p
->io_ticks
[READ
] -= shared
->tmp
.io_ticks
[READ
];
769 p
->io_ticks
[WRITE
] -= shared
->tmp
.io_ticks
[WRITE
];
770 p
->io_ticks_total
-= shared
->tmp
.io_ticks_total
;
771 p
->time_in_queue
-= shared
->tmp
.time_in_queue
;
773 if (s
->n_histogram_entries
) {
775 for (i
= 0; i
< s
->n_histogram_entries
+ 1; i
++) {
777 p
= &s
->stat_percpu
[smp_processor_id()][x
];
778 p
->histogram
[i
] -= shared
->tmp
.histogram
[i
];
786 static int dm_stats_clear(struct dm_stats
*stats
, int id
)
790 mutex_lock(&stats
->mutex
);
792 s
= __dm_stats_find(stats
, id
);
794 mutex_unlock(&stats
->mutex
);
798 __dm_stat_clear(s
, 0, s
->n_entries
, true);
800 mutex_unlock(&stats
->mutex
);
806 * This is like jiffies_to_msec, but works for 64-bit values.
808 static unsigned long long dm_jiffies_to_msec64(struct dm_stat
*s
, unsigned long long j
)
810 unsigned long long result
;
813 if (s
->stat_flags
& STAT_PRECISE_TIMESTAMPS
)
818 result
= jiffies_to_msecs(j
& 0x3fffff);
820 mult
= jiffies_to_msecs(1 << 22);
821 result
+= (unsigned long long)mult
* (unsigned long long)jiffies_to_msecs((j
>> 22) & 0x3fffff);
824 result
+= (unsigned long long)mult
* (unsigned long long)mult
* (unsigned long long)jiffies_to_msecs(j
>> 44);
829 static int dm_stats_print(struct dm_stats
*stats
, int id
,
830 size_t idx_start
, size_t idx_len
,
831 bool clear
, char *result
, unsigned maxlen
)
836 sector_t start
, end
, step
;
838 struct dm_stat_shared
*shared
;
842 * <start_sector>+<length> counters
845 mutex_lock(&stats
->mutex
);
847 s
= __dm_stats_find(stats
, id
);
849 mutex_unlock(&stats
->mutex
);
853 idx_end
= idx_start
+ idx_len
;
854 if (idx_end
< idx_start
||
855 idx_end
> s
->n_entries
)
856 idx_end
= s
->n_entries
;
858 if (idx_start
> idx_end
)
862 start
= s
->start
+ (step
* idx_start
);
864 for (x
= idx_start
; x
< idx_end
; x
++, start
= end
) {
865 shared
= &s
->stat_shared
[x
];
867 if (unlikely(end
> s
->end
))
870 __dm_stat_init_temporary_percpu_totals(shared
, s
, x
);
872 DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu",
873 (unsigned long long)start
,
874 (unsigned long long)step
,
875 shared
->tmp
.ios
[READ
],
876 shared
->tmp
.merges
[READ
],
877 shared
->tmp
.sectors
[READ
],
878 dm_jiffies_to_msec64(s
, shared
->tmp
.ticks
[READ
]),
879 shared
->tmp
.ios
[WRITE
],
880 shared
->tmp
.merges
[WRITE
],
881 shared
->tmp
.sectors
[WRITE
],
882 dm_jiffies_to_msec64(s
, shared
->tmp
.ticks
[WRITE
]),
883 dm_stat_in_flight(shared
),
884 dm_jiffies_to_msec64(s
, shared
->tmp
.io_ticks_total
),
885 dm_jiffies_to_msec64(s
, shared
->tmp
.time_in_queue
),
886 dm_jiffies_to_msec64(s
, shared
->tmp
.io_ticks
[READ
]),
887 dm_jiffies_to_msec64(s
, shared
->tmp
.io_ticks
[WRITE
]));
888 if (s
->n_histogram_entries
) {
890 for (i
= 0; i
< s
->n_histogram_entries
+ 1; i
++) {
891 DMEMIT("%s%llu", !i
? " " : ":", shared
->tmp
.histogram
[i
]);
896 if (unlikely(sz
+ 1 >= maxlen
))
897 goto buffer_overflow
;
903 __dm_stat_clear(s
, idx_start
, idx_end
, false);
906 mutex_unlock(&stats
->mutex
);
911 static int dm_stats_set_aux(struct dm_stats
*stats
, int id
, const char *aux_data
)
914 const char *new_aux_data
;
916 mutex_lock(&stats
->mutex
);
918 s
= __dm_stats_find(stats
, id
);
920 mutex_unlock(&stats
->mutex
);
924 new_aux_data
= kstrdup(aux_data
, GFP_KERNEL
);
926 mutex_unlock(&stats
->mutex
);
931 s
->aux_data
= new_aux_data
;
933 mutex_unlock(&stats
->mutex
);
938 static int parse_histogram(const char *h
, unsigned *n_histogram_entries
,
939 unsigned long long **histogram_boundaries
)
943 unsigned long long last
;
945 *n_histogram_entries
= 1;
948 (*n_histogram_entries
)++;
950 *histogram_boundaries
= kmalloc_array(*n_histogram_entries
,
951 sizeof(unsigned long long),
953 if (!*histogram_boundaries
)
959 unsigned long long hi
;
962 s
= sscanf(h
, "%llu%c", &hi
, &ch
);
963 if (!s
|| (s
== 2 && ch
!= ','))
968 (*histogram_boundaries
)[n
] = hi
;
971 h
= strchr(h
, ',') + 1;
976 static int message_stats_create(struct mapped_device
*md
,
977 unsigned argc
, char **argv
,
978 char *result
, unsigned maxlen
)
983 unsigned long long start
, end
, len
, step
;
985 const char *program_id
, *aux_data
;
986 unsigned stat_flags
= 0;
988 unsigned n_histogram_entries
= 0;
989 unsigned long long *histogram_boundaries
= NULL
;
991 struct dm_arg_set as
, as_backup
;
993 unsigned feature_args
;
997 * <range> <step> [<extra_parameters> <parameters>] [<program_id> [<aux_data>]]
1005 dm_consume_args(&as
, 1);
1007 a
= dm_shift_arg(&as
);
1008 if (!strcmp(a
, "-")) {
1010 len
= dm_get_size(md
);
1013 } else if (sscanf(a
, "%llu+%llu%c", &start
, &len
, &dummy
) != 2 ||
1014 start
!= (sector_t
)start
|| len
!= (sector_t
)len
)
1021 a
= dm_shift_arg(&as
);
1022 if (sscanf(a
, "/%u%c", &divisor
, &dummy
) == 1) {
1026 if (do_div(step
, divisor
))
1030 } else if (sscanf(a
, "%llu%c", &step
, &dummy
) != 1 ||
1031 step
!= (sector_t
)step
|| !step
)
1035 a
= dm_shift_arg(&as
);
1036 if (a
&& sscanf(a
, "%u%c", &feature_args
, &dummy
) == 1) {
1037 while (feature_args
--) {
1038 a
= dm_shift_arg(&as
);
1041 if (!strcasecmp(a
, "precise_timestamps"))
1042 stat_flags
|= STAT_PRECISE_TIMESTAMPS
;
1043 else if (!strncasecmp(a
, "histogram:", 10)) {
1044 if (n_histogram_entries
)
1046 if ((r
= parse_histogram(a
+ 10, &n_histogram_entries
, &histogram_boundaries
)))
1058 a
= dm_shift_arg(&as
);
1062 a
= dm_shift_arg(&as
);
1070 * If a buffer overflow happens after we created the region,
1071 * it's too late (the userspace would retry with a larger
1072 * buffer, but the region id that caused the overflow is already
1073 * leaked). So we must detect buffer overflow in advance.
1075 snprintf(result
, maxlen
, "%d", INT_MAX
);
1076 if (dm_message_test_buffer_overflow(result
, maxlen
)) {
1081 id
= dm_stats_create(dm_get_stats(md
), start
, end
, step
, stat_flags
,
1082 n_histogram_entries
, histogram_boundaries
, program_id
, aux_data
,
1083 dm_internal_suspend_fast
, dm_internal_resume_fast
, md
);
1089 snprintf(result
, maxlen
, "%d", id
);
1097 kfree(histogram_boundaries
);
1101 static int message_stats_delete(struct mapped_device
*md
,
1102 unsigned argc
, char **argv
)
1110 if (sscanf(argv
[1], "%d%c", &id
, &dummy
) != 1 || id
< 0)
1113 return dm_stats_delete(dm_get_stats(md
), id
);
1116 static int message_stats_clear(struct mapped_device
*md
,
1117 unsigned argc
, char **argv
)
1125 if (sscanf(argv
[1], "%d%c", &id
, &dummy
) != 1 || id
< 0)
1128 return dm_stats_clear(dm_get_stats(md
), id
);
1131 static int message_stats_list(struct mapped_device
*md
,
1132 unsigned argc
, char **argv
,
1133 char *result
, unsigned maxlen
)
1136 const char *program
= NULL
;
1138 if (argc
< 1 || argc
> 2)
1142 program
= kstrdup(argv
[1], GFP_KERNEL
);
1147 r
= dm_stats_list(dm_get_stats(md
), program
, result
, maxlen
);
1154 static int message_stats_print(struct mapped_device
*md
,
1155 unsigned argc
, char **argv
, bool clear
,
1156 char *result
, unsigned maxlen
)
1160 unsigned long idx_start
= 0, idx_len
= ULONG_MAX
;
1162 if (argc
!= 2 && argc
!= 4)
1165 if (sscanf(argv
[1], "%d%c", &id
, &dummy
) != 1 || id
< 0)
1169 if (strcmp(argv
[2], "-") &&
1170 sscanf(argv
[2], "%lu%c", &idx_start
, &dummy
) != 1)
1172 if (strcmp(argv
[3], "-") &&
1173 sscanf(argv
[3], "%lu%c", &idx_len
, &dummy
) != 1)
1177 return dm_stats_print(dm_get_stats(md
), id
, idx_start
, idx_len
, clear
,
1181 static int message_stats_set_aux(struct mapped_device
*md
,
1182 unsigned argc
, char **argv
)
1190 if (sscanf(argv
[1], "%d%c", &id
, &dummy
) != 1 || id
< 0)
1193 return dm_stats_set_aux(dm_get_stats(md
), id
, argv
[2]);
1196 int dm_stats_message(struct mapped_device
*md
, unsigned argc
, char **argv
,
1197 char *result
, unsigned maxlen
)
1201 /* All messages here must start with '@' */
1202 if (!strcasecmp(argv
[0], "@stats_create"))
1203 r
= message_stats_create(md
, argc
, argv
, result
, maxlen
);
1204 else if (!strcasecmp(argv
[0], "@stats_delete"))
1205 r
= message_stats_delete(md
, argc
, argv
);
1206 else if (!strcasecmp(argv
[0], "@stats_clear"))
1207 r
= message_stats_clear(md
, argc
, argv
);
1208 else if (!strcasecmp(argv
[0], "@stats_list"))
1209 r
= message_stats_list(md
, argc
, argv
, result
, maxlen
);
1210 else if (!strcasecmp(argv
[0], "@stats_print"))
1211 r
= message_stats_print(md
, argc
, argv
, false, result
, maxlen
);
1212 else if (!strcasecmp(argv
[0], "@stats_print_clear"))
1213 r
= message_stats_print(md
, argc
, argv
, true, result
, maxlen
);
1214 else if (!strcasecmp(argv
[0], "@stats_set_aux"))
1215 r
= message_stats_set_aux(md
, argc
, argv
);
1217 return 2; /* this wasn't a stats message */
1220 DMWARN("Invalid parameters for message %s", argv
[0]);
1225 int __init
dm_statistics_init(void)
1227 shared_memory_amount
= 0;
1228 dm_stat_need_rcu_barrier
= 0;
1232 void dm_statistics_exit(void)
1234 if (dm_stat_need_rcu_barrier
)
1236 if (WARN_ON(shared_memory_amount
))
1237 DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount
);
1240 module_param_named(stats_current_allocated_bytes
, shared_memory_amount
, ulong
, S_IRUGO
);
1241 MODULE_PARM_DESC(stats_current_allocated_bytes
, "Memory currently used by statistics");