]>
Commit | Line | Data |
---|---|---|
fd2ed4d2 MP |
1 | #include <linux/errno.h> |
2 | #include <linux/numa.h> | |
3 | #include <linux/slab.h> | |
4 | #include <linux/rculist.h> | |
5 | #include <linux/threads.h> | |
6 | #include <linux/preempt.h> | |
7 | #include <linux/irqflags.h> | |
8 | #include <linux/vmalloc.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/device-mapper.h> | |
12 | ||
13 | #include "dm.h" | |
14 | #include "dm-stats.h" | |
15 | ||
16 | #define DM_MSG_PREFIX "stats" | |
17 | ||
18 | static int dm_stat_need_rcu_barrier; | |
19 | ||
20 | /* | |
21 | * Using 64-bit values to avoid overflow (which is a | |
22 | * problem that block/genhd.c's IO accounting has). | |
23 | */ | |
24 | struct dm_stat_percpu { | |
25 | unsigned long long sectors[2]; | |
26 | unsigned long long ios[2]; | |
27 | unsigned long long merges[2]; | |
28 | unsigned long long ticks[2]; | |
29 | unsigned long long io_ticks[2]; | |
30 | unsigned long long io_ticks_total; | |
31 | unsigned long long time_in_queue; | |
32 | }; | |
33 | ||
34 | struct dm_stat_shared { | |
35 | atomic_t in_flight[2]; | |
36 | unsigned long stamp; | |
37 | struct dm_stat_percpu tmp; | |
38 | }; | |
39 | ||
40 | struct dm_stat { | |
41 | struct list_head list_entry; | |
42 | int id; | |
43 | size_t n_entries; | |
44 | sector_t start; | |
45 | sector_t end; | |
46 | sector_t step; | |
47 | const char *program_id; | |
48 | const char *aux_data; | |
49 | struct rcu_head rcu_head; | |
50 | size_t shared_alloc_size; | |
51 | size_t percpu_alloc_size; | |
52 | struct dm_stat_percpu *stat_percpu[NR_CPUS]; | |
53 | struct dm_stat_shared stat_shared[0]; | |
54 | }; | |
55 | ||
56 | struct dm_stats_last_position { | |
57 | sector_t last_sector; | |
58 | unsigned last_rw; | |
59 | }; | |
60 | ||
61 | /* | |
62 | * A typo on the command line could possibly make the kernel run out of memory | |
63 | * and crash. To prevent the crash we account all used memory. We fail if we | |
64 | * exhaust 1/4 of all memory or 1/2 of vmalloc space. | |
65 | */ | |
66 | #define DM_STATS_MEMORY_FACTOR 4 | |
67 | #define DM_STATS_VMALLOC_FACTOR 2 | |
68 | ||
69 | static DEFINE_SPINLOCK(shared_memory_lock); | |
70 | ||
71 | static unsigned long shared_memory_amount; | |
72 | ||
73 | static bool __check_shared_memory(size_t alloc_size) | |
74 | { | |
75 | size_t a; | |
76 | ||
77 | a = shared_memory_amount + alloc_size; | |
78 | if (a < shared_memory_amount) | |
79 | return false; | |
80 | if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR) | |
81 | return false; | |
82 | #ifdef CONFIG_MMU | |
83 | if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR) | |
84 | return false; | |
85 | #endif | |
86 | return true; | |
87 | } | |
88 | ||
89 | static bool check_shared_memory(size_t alloc_size) | |
90 | { | |
91 | bool ret; | |
92 | ||
93 | spin_lock_irq(&shared_memory_lock); | |
94 | ||
95 | ret = __check_shared_memory(alloc_size); | |
96 | ||
97 | spin_unlock_irq(&shared_memory_lock); | |
98 | ||
99 | return ret; | |
100 | } | |
101 | ||
102 | static bool claim_shared_memory(size_t alloc_size) | |
103 | { | |
104 | spin_lock_irq(&shared_memory_lock); | |
105 | ||
106 | if (!__check_shared_memory(alloc_size)) { | |
107 | spin_unlock_irq(&shared_memory_lock); | |
108 | return false; | |
109 | } | |
110 | ||
111 | shared_memory_amount += alloc_size; | |
112 | ||
113 | spin_unlock_irq(&shared_memory_lock); | |
114 | ||
115 | return true; | |
116 | } | |
117 | ||
118 | static void free_shared_memory(size_t alloc_size) | |
119 | { | |
120 | unsigned long flags; | |
121 | ||
122 | spin_lock_irqsave(&shared_memory_lock, flags); | |
123 | ||
124 | if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) { | |
125 | spin_unlock_irqrestore(&shared_memory_lock, flags); | |
126 | DMCRIT("Memory usage accounting bug."); | |
127 | return; | |
128 | } | |
129 | ||
130 | shared_memory_amount -= alloc_size; | |
131 | ||
132 | spin_unlock_irqrestore(&shared_memory_lock, flags); | |
133 | } | |
134 | ||
135 | static void *dm_kvzalloc(size_t alloc_size, int node) | |
136 | { | |
137 | void *p; | |
138 | ||
139 | if (!claim_shared_memory(alloc_size)) | |
140 | return NULL; | |
141 | ||
142 | if (alloc_size <= KMALLOC_MAX_SIZE) { | |
143 | p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node); | |
144 | if (p) | |
145 | return p; | |
146 | } | |
147 | p = vzalloc_node(alloc_size, node); | |
148 | if (p) | |
149 | return p; | |
150 | ||
151 | free_shared_memory(alloc_size); | |
152 | ||
153 | return NULL; | |
154 | } | |
155 | ||
156 | static void dm_kvfree(void *ptr, size_t alloc_size) | |
157 | { | |
158 | if (!ptr) | |
159 | return; | |
160 | ||
161 | free_shared_memory(alloc_size); | |
162 | ||
0f24b79b | 163 | kvfree(ptr); |
fd2ed4d2 MP |
164 | } |
165 | ||
166 | static void dm_stat_free(struct rcu_head *head) | |
167 | { | |
168 | int cpu; | |
169 | struct dm_stat *s = container_of(head, struct dm_stat, rcu_head); | |
170 | ||
171 | kfree(s->program_id); | |
172 | kfree(s->aux_data); | |
173 | for_each_possible_cpu(cpu) | |
174 | dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size); | |
175 | dm_kvfree(s, s->shared_alloc_size); | |
176 | } | |
177 | ||
178 | static int dm_stat_in_flight(struct dm_stat_shared *shared) | |
179 | { | |
180 | return atomic_read(&shared->in_flight[READ]) + | |
181 | atomic_read(&shared->in_flight[WRITE]); | |
182 | } | |
183 | ||
184 | void dm_stats_init(struct dm_stats *stats) | |
185 | { | |
186 | int cpu; | |
187 | struct dm_stats_last_position *last; | |
188 | ||
189 | mutex_init(&stats->mutex); | |
190 | INIT_LIST_HEAD(&stats->list); | |
191 | stats->last = alloc_percpu(struct dm_stats_last_position); | |
192 | for_each_possible_cpu(cpu) { | |
193 | last = per_cpu_ptr(stats->last, cpu); | |
194 | last->last_sector = (sector_t)ULLONG_MAX; | |
195 | last->last_rw = UINT_MAX; | |
196 | } | |
197 | } | |
198 | ||
199 | void dm_stats_cleanup(struct dm_stats *stats) | |
200 | { | |
201 | size_t ni; | |
202 | struct dm_stat *s; | |
203 | struct dm_stat_shared *shared; | |
204 | ||
205 | while (!list_empty(&stats->list)) { | |
206 | s = container_of(stats->list.next, struct dm_stat, list_entry); | |
207 | list_del(&s->list_entry); | |
208 | for (ni = 0; ni < s->n_entries; ni++) { | |
209 | shared = &s->stat_shared[ni]; | |
210 | if (WARN_ON(dm_stat_in_flight(shared))) { | |
211 | DMCRIT("leaked in-flight counter at index %lu " | |
212 | "(start %llu, end %llu, step %llu): reads %d, writes %d", | |
213 | (unsigned long)ni, | |
214 | (unsigned long long)s->start, | |
215 | (unsigned long long)s->end, | |
216 | (unsigned long long)s->step, | |
217 | atomic_read(&shared->in_flight[READ]), | |
218 | atomic_read(&shared->in_flight[WRITE])); | |
219 | } | |
220 | } | |
221 | dm_stat_free(&s->rcu_head); | |
222 | } | |
223 | free_percpu(stats->last); | |
224 | } | |
225 | ||
226 | static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, | |
227 | sector_t step, const char *program_id, const char *aux_data, | |
228 | void (*suspend_callback)(struct mapped_device *), | |
229 | void (*resume_callback)(struct mapped_device *), | |
230 | struct mapped_device *md) | |
231 | { | |
232 | struct list_head *l; | |
233 | struct dm_stat *s, *tmp_s; | |
234 | sector_t n_entries; | |
235 | size_t ni; | |
236 | size_t shared_alloc_size; | |
237 | size_t percpu_alloc_size; | |
238 | struct dm_stat_percpu *p; | |
239 | int cpu; | |
240 | int ret_id; | |
241 | int r; | |
242 | ||
243 | if (end < start || !step) | |
244 | return -EINVAL; | |
245 | ||
246 | n_entries = end - start; | |
247 | if (dm_sector_div64(n_entries, step)) | |
248 | n_entries++; | |
249 | ||
250 | if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1)) | |
251 | return -EOVERFLOW; | |
252 | ||
253 | shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared); | |
254 | if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries) | |
255 | return -EOVERFLOW; | |
256 | ||
257 | percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu); | |
258 | if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries) | |
259 | return -EOVERFLOW; | |
260 | ||
261 | if (!check_shared_memory(shared_alloc_size + num_possible_cpus() * percpu_alloc_size)) | |
262 | return -ENOMEM; | |
263 | ||
264 | s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE); | |
265 | if (!s) | |
266 | return -ENOMEM; | |
267 | ||
268 | s->n_entries = n_entries; | |
269 | s->start = start; | |
270 | s->end = end; | |
271 | s->step = step; | |
272 | s->shared_alloc_size = shared_alloc_size; | |
273 | s->percpu_alloc_size = percpu_alloc_size; | |
274 | ||
275 | s->program_id = kstrdup(program_id, GFP_KERNEL); | |
276 | if (!s->program_id) { | |
277 | r = -ENOMEM; | |
278 | goto out; | |
279 | } | |
280 | s->aux_data = kstrdup(aux_data, GFP_KERNEL); | |
281 | if (!s->aux_data) { | |
282 | r = -ENOMEM; | |
283 | goto out; | |
284 | } | |
285 | ||
286 | for (ni = 0; ni < n_entries; ni++) { | |
287 | atomic_set(&s->stat_shared[ni].in_flight[READ], 0); | |
288 | atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0); | |
289 | } | |
290 | ||
291 | for_each_possible_cpu(cpu) { | |
292 | p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu)); | |
293 | if (!p) { | |
294 | r = -ENOMEM; | |
295 | goto out; | |
296 | } | |
297 | s->stat_percpu[cpu] = p; | |
298 | } | |
299 | ||
300 | /* | |
301 | * Suspend/resume to make sure there is no i/o in flight, | |
302 | * so that newly created statistics will be exact. | |
303 | * | |
304 | * (note: we couldn't suspend earlier because we must not | |
305 | * allocate memory while suspended) | |
306 | */ | |
307 | suspend_callback(md); | |
308 | ||
309 | mutex_lock(&stats->mutex); | |
310 | s->id = 0; | |
311 | list_for_each(l, &stats->list) { | |
312 | tmp_s = container_of(l, struct dm_stat, list_entry); | |
313 | if (WARN_ON(tmp_s->id < s->id)) { | |
314 | r = -EINVAL; | |
315 | goto out_unlock_resume; | |
316 | } | |
317 | if (tmp_s->id > s->id) | |
318 | break; | |
319 | if (unlikely(s->id == INT_MAX)) { | |
320 | r = -ENFILE; | |
321 | goto out_unlock_resume; | |
322 | } | |
323 | s->id++; | |
324 | } | |
325 | ret_id = s->id; | |
326 | list_add_tail_rcu(&s->list_entry, l); | |
327 | mutex_unlock(&stats->mutex); | |
328 | ||
329 | resume_callback(md); | |
330 | ||
331 | return ret_id; | |
332 | ||
333 | out_unlock_resume: | |
334 | mutex_unlock(&stats->mutex); | |
335 | resume_callback(md); | |
336 | out: | |
337 | dm_stat_free(&s->rcu_head); | |
338 | return r; | |
339 | } | |
340 | ||
341 | static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id) | |
342 | { | |
343 | struct dm_stat *s; | |
344 | ||
345 | list_for_each_entry(s, &stats->list, list_entry) { | |
346 | if (s->id > id) | |
347 | break; | |
348 | if (s->id == id) | |
349 | return s; | |
350 | } | |
351 | ||
352 | return NULL; | |
353 | } | |
354 | ||
355 | static int dm_stats_delete(struct dm_stats *stats, int id) | |
356 | { | |
357 | struct dm_stat *s; | |
358 | int cpu; | |
359 | ||
360 | mutex_lock(&stats->mutex); | |
361 | ||
362 | s = __dm_stats_find(stats, id); | |
363 | if (!s) { | |
364 | mutex_unlock(&stats->mutex); | |
365 | return -ENOENT; | |
366 | } | |
367 | ||
368 | list_del_rcu(&s->list_entry); | |
369 | mutex_unlock(&stats->mutex); | |
370 | ||
371 | /* | |
372 | * vfree can't be called from RCU callback | |
373 | */ | |
374 | for_each_possible_cpu(cpu) | |
375 | if (is_vmalloc_addr(s->stat_percpu)) | |
376 | goto do_sync_free; | |
377 | if (is_vmalloc_addr(s)) { | |
378 | do_sync_free: | |
379 | synchronize_rcu_expedited(); | |
380 | dm_stat_free(&s->rcu_head); | |
381 | } else { | |
382 | ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1; | |
383 | call_rcu(&s->rcu_head, dm_stat_free); | |
384 | } | |
385 | return 0; | |
386 | } | |
387 | ||
388 | static int dm_stats_list(struct dm_stats *stats, const char *program, | |
389 | char *result, unsigned maxlen) | |
390 | { | |
391 | struct dm_stat *s; | |
392 | sector_t len; | |
393 | unsigned sz = 0; | |
394 | ||
395 | /* | |
396 | * Output format: | |
397 | * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data> | |
398 | */ | |
399 | ||
400 | mutex_lock(&stats->mutex); | |
401 | list_for_each_entry(s, &stats->list, list_entry) { | |
402 | if (!program || !strcmp(program, s->program_id)) { | |
403 | len = s->end - s->start; | |
404 | DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id, | |
405 | (unsigned long long)s->start, | |
406 | (unsigned long long)len, | |
407 | (unsigned long long)s->step, | |
408 | s->program_id, | |
409 | s->aux_data); | |
410 | } | |
411 | } | |
412 | mutex_unlock(&stats->mutex); | |
413 | ||
414 | return 1; | |
415 | } | |
416 | ||
417 | static void dm_stat_round(struct dm_stat_shared *shared, struct dm_stat_percpu *p) | |
418 | { | |
419 | /* | |
420 | * This is racy, but so is part_round_stats_single. | |
421 | */ | |
422 | unsigned long now = jiffies; | |
423 | unsigned in_flight_read; | |
424 | unsigned in_flight_write; | |
425 | unsigned long difference = now - shared->stamp; | |
426 | ||
427 | if (!difference) | |
428 | return; | |
429 | in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]); | |
430 | in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]); | |
431 | if (in_flight_read) | |
432 | p->io_ticks[READ] += difference; | |
433 | if (in_flight_write) | |
434 | p->io_ticks[WRITE] += difference; | |
435 | if (in_flight_read + in_flight_write) { | |
436 | p->io_ticks_total += difference; | |
437 | p->time_in_queue += (in_flight_read + in_flight_write) * difference; | |
438 | } | |
439 | shared->stamp = now; | |
440 | } | |
441 | ||
442 | static void dm_stat_for_entry(struct dm_stat *s, size_t entry, | |
443 | unsigned long bi_rw, sector_t len, bool merged, | |
444 | bool end, unsigned long duration) | |
445 | { | |
446 | unsigned long idx = bi_rw & REQ_WRITE; | |
447 | struct dm_stat_shared *shared = &s->stat_shared[entry]; | |
448 | struct dm_stat_percpu *p; | |
449 | ||
450 | /* | |
bbf3f8cb | 451 | * For strict correctness we should use local_irq_save/restore |
fd2ed4d2 MP |
452 | * instead of preempt_disable/enable. |
453 | * | |
bbf3f8cb MP |
454 | * preempt_disable/enable is racy if the driver finishes bios |
455 | * from non-interrupt context as well as from interrupt context | |
456 | * or from more different interrupts. | |
fd2ed4d2 | 457 | * |
bbf3f8cb MP |
458 | * On 64-bit architectures the race only results in not counting some |
459 | * events, so it is acceptable. On 32-bit architectures the race could | |
460 | * cause the counter going off by 2^32, so we need to do proper locking | |
461 | * there. | |
fd2ed4d2 MP |
462 | * |
463 | * part_stat_lock()/part_stat_unlock() have this race too. | |
464 | */ | |
bbf3f8cb MP |
465 | #if BITS_PER_LONG == 32 |
466 | unsigned long flags; | |
467 | local_irq_save(flags); | |
468 | #else | |
fd2ed4d2 | 469 | preempt_disable(); |
bbf3f8cb | 470 | #endif |
fd2ed4d2 MP |
471 | p = &s->stat_percpu[smp_processor_id()][entry]; |
472 | ||
473 | if (!end) { | |
474 | dm_stat_round(shared, p); | |
475 | atomic_inc(&shared->in_flight[idx]); | |
476 | } else { | |
477 | dm_stat_round(shared, p); | |
478 | atomic_dec(&shared->in_flight[idx]); | |
479 | p->sectors[idx] += len; | |
480 | p->ios[idx] += 1; | |
481 | p->merges[idx] += merged; | |
482 | p->ticks[idx] += duration; | |
483 | } | |
484 | ||
bbf3f8cb MP |
485 | #if BITS_PER_LONG == 32 |
486 | local_irq_restore(flags); | |
487 | #else | |
fd2ed4d2 | 488 | preempt_enable(); |
bbf3f8cb | 489 | #endif |
fd2ed4d2 MP |
490 | } |
491 | ||
492 | static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, | |
493 | sector_t bi_sector, sector_t end_sector, | |
494 | bool end, unsigned long duration, | |
495 | struct dm_stats_aux *stats_aux) | |
496 | { | |
497 | sector_t rel_sector, offset, todo, fragment_len; | |
498 | size_t entry; | |
499 | ||
500 | if (end_sector <= s->start || bi_sector >= s->end) | |
501 | return; | |
502 | if (unlikely(bi_sector < s->start)) { | |
503 | rel_sector = 0; | |
504 | todo = end_sector - s->start; | |
505 | } else { | |
506 | rel_sector = bi_sector - s->start; | |
507 | todo = end_sector - bi_sector; | |
508 | } | |
509 | if (unlikely(end_sector > s->end)) | |
510 | todo -= (end_sector - s->end); | |
511 | ||
512 | offset = dm_sector_div64(rel_sector, s->step); | |
513 | entry = rel_sector; | |
514 | do { | |
515 | if (WARN_ON_ONCE(entry >= s->n_entries)) { | |
516 | DMCRIT("Invalid area access in region id %d", s->id); | |
517 | return; | |
518 | } | |
519 | fragment_len = todo; | |
520 | if (fragment_len > s->step - offset) | |
521 | fragment_len = s->step - offset; | |
522 | dm_stat_for_entry(s, entry, bi_rw, fragment_len, | |
523 | stats_aux->merged, end, duration); | |
524 | todo -= fragment_len; | |
525 | entry++; | |
526 | offset = 0; | |
527 | } while (unlikely(todo != 0)); | |
528 | } | |
529 | ||
530 | void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, | |
531 | sector_t bi_sector, unsigned bi_sectors, bool end, | |
532 | unsigned long duration, struct dm_stats_aux *stats_aux) | |
533 | { | |
534 | struct dm_stat *s; | |
535 | sector_t end_sector; | |
536 | struct dm_stats_last_position *last; | |
537 | ||
538 | if (unlikely(!bi_sectors)) | |
539 | return; | |
540 | ||
541 | end_sector = bi_sector + bi_sectors; | |
542 | ||
543 | if (!end) { | |
544 | /* | |
545 | * A race condition can at worst result in the merged flag being | |
546 | * misrepresented, so we don't have to disable preemption here. | |
547 | */ | |
1f125e76 | 548 | last = raw_cpu_ptr(stats->last); |
fd2ed4d2 MP |
549 | stats_aux->merged = |
550 | (bi_sector == (ACCESS_ONCE(last->last_sector) && | |
551 | ((bi_rw & (REQ_WRITE | REQ_DISCARD)) == | |
552 | (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD))) | |
553 | )); | |
554 | ACCESS_ONCE(last->last_sector) = end_sector; | |
555 | ACCESS_ONCE(last->last_rw) = bi_rw; | |
556 | } | |
557 | ||
558 | rcu_read_lock(); | |
559 | ||
560 | list_for_each_entry_rcu(s, &stats->list, list_entry) | |
561 | __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration, stats_aux); | |
562 | ||
563 | rcu_read_unlock(); | |
564 | } | |
565 | ||
566 | static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared, | |
567 | struct dm_stat *s, size_t x) | |
568 | { | |
569 | int cpu; | |
570 | struct dm_stat_percpu *p; | |
571 | ||
572 | local_irq_disable(); | |
573 | p = &s->stat_percpu[smp_processor_id()][x]; | |
574 | dm_stat_round(shared, p); | |
575 | local_irq_enable(); | |
576 | ||
577 | memset(&shared->tmp, 0, sizeof(shared->tmp)); | |
578 | for_each_possible_cpu(cpu) { | |
579 | p = &s->stat_percpu[cpu][x]; | |
580 | shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]); | |
581 | shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]); | |
582 | shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]); | |
583 | shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]); | |
584 | shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]); | |
585 | shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]); | |
586 | shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]); | |
587 | shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]); | |
588 | shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]); | |
589 | shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]); | |
590 | shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total); | |
591 | shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue); | |
592 | } | |
593 | } | |
594 | ||
595 | static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end, | |
596 | bool init_tmp_percpu_totals) | |
597 | { | |
598 | size_t x; | |
599 | struct dm_stat_shared *shared; | |
600 | struct dm_stat_percpu *p; | |
601 | ||
602 | for (x = idx_start; x < idx_end; x++) { | |
603 | shared = &s->stat_shared[x]; | |
604 | if (init_tmp_percpu_totals) | |
605 | __dm_stat_init_temporary_percpu_totals(shared, s, x); | |
606 | local_irq_disable(); | |
607 | p = &s->stat_percpu[smp_processor_id()][x]; | |
608 | p->sectors[READ] -= shared->tmp.sectors[READ]; | |
609 | p->sectors[WRITE] -= shared->tmp.sectors[WRITE]; | |
610 | p->ios[READ] -= shared->tmp.ios[READ]; | |
611 | p->ios[WRITE] -= shared->tmp.ios[WRITE]; | |
612 | p->merges[READ] -= shared->tmp.merges[READ]; | |
613 | p->merges[WRITE] -= shared->tmp.merges[WRITE]; | |
614 | p->ticks[READ] -= shared->tmp.ticks[READ]; | |
615 | p->ticks[WRITE] -= shared->tmp.ticks[WRITE]; | |
616 | p->io_ticks[READ] -= shared->tmp.io_ticks[READ]; | |
617 | p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE]; | |
618 | p->io_ticks_total -= shared->tmp.io_ticks_total; | |
619 | p->time_in_queue -= shared->tmp.time_in_queue; | |
620 | local_irq_enable(); | |
621 | } | |
622 | } | |
623 | ||
624 | static int dm_stats_clear(struct dm_stats *stats, int id) | |
625 | { | |
626 | struct dm_stat *s; | |
627 | ||
628 | mutex_lock(&stats->mutex); | |
629 | ||
630 | s = __dm_stats_find(stats, id); | |
631 | if (!s) { | |
632 | mutex_unlock(&stats->mutex); | |
633 | return -ENOENT; | |
634 | } | |
635 | ||
636 | __dm_stat_clear(s, 0, s->n_entries, true); | |
637 | ||
638 | mutex_unlock(&stats->mutex); | |
639 | ||
640 | return 1; | |
641 | } | |
642 | ||
643 | /* | |
644 | * This is like jiffies_to_msec, but works for 64-bit values. | |
645 | */ | |
646 | static unsigned long long dm_jiffies_to_msec64(unsigned long long j) | |
647 | { | |
648 | unsigned long long result = 0; | |
649 | unsigned mult; | |
650 | ||
651 | if (j) | |
652 | result = jiffies_to_msecs(j & 0x3fffff); | |
653 | if (j >= 1 << 22) { | |
654 | mult = jiffies_to_msecs(1 << 22); | |
655 | result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff); | |
656 | } | |
657 | if (j >= 1ULL << 44) | |
658 | result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44); | |
659 | ||
660 | return result; | |
661 | } | |
662 | ||
663 | static int dm_stats_print(struct dm_stats *stats, int id, | |
664 | size_t idx_start, size_t idx_len, | |
665 | bool clear, char *result, unsigned maxlen) | |
666 | { | |
667 | unsigned sz = 0; | |
668 | struct dm_stat *s; | |
669 | size_t x; | |
670 | sector_t start, end, step; | |
671 | size_t idx_end; | |
672 | struct dm_stat_shared *shared; | |
673 | ||
674 | /* | |
675 | * Output format: | |
676 | * <start_sector>+<length> counters | |
677 | */ | |
678 | ||
679 | mutex_lock(&stats->mutex); | |
680 | ||
681 | s = __dm_stats_find(stats, id); | |
682 | if (!s) { | |
683 | mutex_unlock(&stats->mutex); | |
684 | return -ENOENT; | |
685 | } | |
686 | ||
687 | idx_end = idx_start + idx_len; | |
688 | if (idx_end < idx_start || | |
689 | idx_end > s->n_entries) | |
690 | idx_end = s->n_entries; | |
691 | ||
692 | if (idx_start > idx_end) | |
693 | idx_start = idx_end; | |
694 | ||
695 | step = s->step; | |
696 | start = s->start + (step * idx_start); | |
697 | ||
698 | for (x = idx_start; x < idx_end; x++, start = end) { | |
699 | shared = &s->stat_shared[x]; | |
700 | end = start + step; | |
701 | if (unlikely(end > s->end)) | |
702 | end = s->end; | |
703 | ||
704 | __dm_stat_init_temporary_percpu_totals(shared, s, x); | |
705 | ||
706 | DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu\n", | |
707 | (unsigned long long)start, | |
708 | (unsigned long long)step, | |
709 | shared->tmp.ios[READ], | |
710 | shared->tmp.merges[READ], | |
711 | shared->tmp.sectors[READ], | |
712 | dm_jiffies_to_msec64(shared->tmp.ticks[READ]), | |
713 | shared->tmp.ios[WRITE], | |
714 | shared->tmp.merges[WRITE], | |
715 | shared->tmp.sectors[WRITE], | |
716 | dm_jiffies_to_msec64(shared->tmp.ticks[WRITE]), | |
717 | dm_stat_in_flight(shared), | |
718 | dm_jiffies_to_msec64(shared->tmp.io_ticks_total), | |
719 | dm_jiffies_to_msec64(shared->tmp.time_in_queue), | |
720 | dm_jiffies_to_msec64(shared->tmp.io_ticks[READ]), | |
721 | dm_jiffies_to_msec64(shared->tmp.io_ticks[WRITE])); | |
722 | ||
723 | if (unlikely(sz + 1 >= maxlen)) | |
724 | goto buffer_overflow; | |
725 | } | |
726 | ||
727 | if (clear) | |
728 | __dm_stat_clear(s, idx_start, idx_end, false); | |
729 | ||
730 | buffer_overflow: | |
731 | mutex_unlock(&stats->mutex); | |
732 | ||
733 | return 1; | |
734 | } | |
735 | ||
736 | static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data) | |
737 | { | |
738 | struct dm_stat *s; | |
739 | const char *new_aux_data; | |
740 | ||
741 | mutex_lock(&stats->mutex); | |
742 | ||
743 | s = __dm_stats_find(stats, id); | |
744 | if (!s) { | |
745 | mutex_unlock(&stats->mutex); | |
746 | return -ENOENT; | |
747 | } | |
748 | ||
749 | new_aux_data = kstrdup(aux_data, GFP_KERNEL); | |
750 | if (!new_aux_data) { | |
751 | mutex_unlock(&stats->mutex); | |
752 | return -ENOMEM; | |
753 | } | |
754 | ||
755 | kfree(s->aux_data); | |
756 | s->aux_data = new_aux_data; | |
757 | ||
758 | mutex_unlock(&stats->mutex); | |
759 | ||
760 | return 0; | |
761 | } | |
762 | ||
763 | static int message_stats_create(struct mapped_device *md, | |
764 | unsigned argc, char **argv, | |
765 | char *result, unsigned maxlen) | |
766 | { | |
767 | int id; | |
768 | char dummy; | |
769 | unsigned long long start, end, len, step; | |
770 | unsigned divisor; | |
771 | const char *program_id, *aux_data; | |
772 | ||
773 | /* | |
774 | * Input format: | |
775 | * <range> <step> [<program_id> [<aux_data>]] | |
776 | */ | |
777 | ||
778 | if (argc < 3 || argc > 5) | |
779 | return -EINVAL; | |
780 | ||
781 | if (!strcmp(argv[1], "-")) { | |
782 | start = 0; | |
783 | len = dm_get_size(md); | |
784 | if (!len) | |
785 | len = 1; | |
786 | } else if (sscanf(argv[1], "%llu+%llu%c", &start, &len, &dummy) != 2 || | |
787 | start != (sector_t)start || len != (sector_t)len) | |
788 | return -EINVAL; | |
789 | ||
790 | end = start + len; | |
791 | if (start >= end) | |
792 | return -EINVAL; | |
793 | ||
794 | if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) { | |
795 | step = end - start; | |
796 | if (do_div(step, divisor)) | |
797 | step++; | |
798 | if (!step) | |
799 | step = 1; | |
800 | } else if (sscanf(argv[2], "%llu%c", &step, &dummy) != 1 || | |
801 | step != (sector_t)step || !step) | |
802 | return -EINVAL; | |
803 | ||
804 | program_id = "-"; | |
805 | aux_data = "-"; | |
806 | ||
807 | if (argc > 3) | |
808 | program_id = argv[3]; | |
809 | ||
810 | if (argc > 4) | |
811 | aux_data = argv[4]; | |
812 | ||
813 | /* | |
814 | * If a buffer overflow happens after we created the region, | |
815 | * it's too late (the userspace would retry with a larger | |
816 | * buffer, but the region id that caused the overflow is already | |
817 | * leaked). So we must detect buffer overflow in advance. | |
818 | */ | |
819 | snprintf(result, maxlen, "%d", INT_MAX); | |
820 | if (dm_message_test_buffer_overflow(result, maxlen)) | |
821 | return 1; | |
822 | ||
823 | id = dm_stats_create(dm_get_stats(md), start, end, step, program_id, aux_data, | |
ffcc3936 | 824 | dm_internal_suspend_fast, dm_internal_resume_fast, md); |
fd2ed4d2 MP |
825 | if (id < 0) |
826 | return id; | |
827 | ||
828 | snprintf(result, maxlen, "%d", id); | |
829 | ||
830 | return 1; | |
831 | } | |
832 | ||
833 | static int message_stats_delete(struct mapped_device *md, | |
834 | unsigned argc, char **argv) | |
835 | { | |
836 | int id; | |
837 | char dummy; | |
838 | ||
839 | if (argc != 2) | |
840 | return -EINVAL; | |
841 | ||
842 | if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) | |
843 | return -EINVAL; | |
844 | ||
845 | return dm_stats_delete(dm_get_stats(md), id); | |
846 | } | |
847 | ||
848 | static int message_stats_clear(struct mapped_device *md, | |
849 | unsigned argc, char **argv) | |
850 | { | |
851 | int id; | |
852 | char dummy; | |
853 | ||
854 | if (argc != 2) | |
855 | return -EINVAL; | |
856 | ||
857 | if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) | |
858 | return -EINVAL; | |
859 | ||
860 | return dm_stats_clear(dm_get_stats(md), id); | |
861 | } | |
862 | ||
863 | static int message_stats_list(struct mapped_device *md, | |
864 | unsigned argc, char **argv, | |
865 | char *result, unsigned maxlen) | |
866 | { | |
867 | int r; | |
868 | const char *program = NULL; | |
869 | ||
870 | if (argc < 1 || argc > 2) | |
871 | return -EINVAL; | |
872 | ||
873 | if (argc > 1) { | |
874 | program = kstrdup(argv[1], GFP_KERNEL); | |
875 | if (!program) | |
876 | return -ENOMEM; | |
877 | } | |
878 | ||
879 | r = dm_stats_list(dm_get_stats(md), program, result, maxlen); | |
880 | ||
881 | kfree(program); | |
882 | ||
883 | return r; | |
884 | } | |
885 | ||
886 | static int message_stats_print(struct mapped_device *md, | |
887 | unsigned argc, char **argv, bool clear, | |
888 | char *result, unsigned maxlen) | |
889 | { | |
890 | int id; | |
891 | char dummy; | |
892 | unsigned long idx_start = 0, idx_len = ULONG_MAX; | |
893 | ||
894 | if (argc != 2 && argc != 4) | |
895 | return -EINVAL; | |
896 | ||
897 | if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) | |
898 | return -EINVAL; | |
899 | ||
900 | if (argc > 3) { | |
901 | if (strcmp(argv[2], "-") && | |
902 | sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1) | |
903 | return -EINVAL; | |
904 | if (strcmp(argv[3], "-") && | |
905 | sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1) | |
906 | return -EINVAL; | |
907 | } | |
908 | ||
909 | return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear, | |
910 | result, maxlen); | |
911 | } | |
912 | ||
913 | static int message_stats_set_aux(struct mapped_device *md, | |
914 | unsigned argc, char **argv) | |
915 | { | |
916 | int id; | |
917 | char dummy; | |
918 | ||
919 | if (argc != 3) | |
920 | return -EINVAL; | |
921 | ||
922 | if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) | |
923 | return -EINVAL; | |
924 | ||
925 | return dm_stats_set_aux(dm_get_stats(md), id, argv[2]); | |
926 | } | |
927 | ||
928 | int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, | |
929 | char *result, unsigned maxlen) | |
930 | { | |
931 | int r; | |
932 | ||
933 | if (dm_request_based(md)) { | |
934 | DMWARN("Statistics are only supported for bio-based devices"); | |
935 | return -EOPNOTSUPP; | |
936 | } | |
937 | ||
938 | /* All messages here must start with '@' */ | |
939 | if (!strcasecmp(argv[0], "@stats_create")) | |
940 | r = message_stats_create(md, argc, argv, result, maxlen); | |
941 | else if (!strcasecmp(argv[0], "@stats_delete")) | |
942 | r = message_stats_delete(md, argc, argv); | |
943 | else if (!strcasecmp(argv[0], "@stats_clear")) | |
944 | r = message_stats_clear(md, argc, argv); | |
945 | else if (!strcasecmp(argv[0], "@stats_list")) | |
946 | r = message_stats_list(md, argc, argv, result, maxlen); | |
947 | else if (!strcasecmp(argv[0], "@stats_print")) | |
948 | r = message_stats_print(md, argc, argv, false, result, maxlen); | |
949 | else if (!strcasecmp(argv[0], "@stats_print_clear")) | |
950 | r = message_stats_print(md, argc, argv, true, result, maxlen); | |
951 | else if (!strcasecmp(argv[0], "@stats_set_aux")) | |
952 | r = message_stats_set_aux(md, argc, argv); | |
953 | else | |
954 | return 2; /* this wasn't a stats message */ | |
955 | ||
956 | if (r == -EINVAL) | |
957 | DMWARN("Invalid parameters for message %s", argv[0]); | |
958 | ||
959 | return r; | |
960 | } | |
961 | ||
962 | int __init dm_statistics_init(void) | |
963 | { | |
76f5bee5 | 964 | shared_memory_amount = 0; |
fd2ed4d2 MP |
965 | dm_stat_need_rcu_barrier = 0; |
966 | return 0; | |
967 | } | |
968 | ||
969 | void dm_statistics_exit(void) | |
970 | { | |
971 | if (dm_stat_need_rcu_barrier) | |
972 | rcu_barrier(); | |
973 | if (WARN_ON(shared_memory_amount)) | |
974 | DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount); | |
975 | } | |
976 | ||
977 | module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO); | |
978 | MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics"); |