X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=lib%2Fpercpu_counter.c;h=3bf4a9984f4cb094b7d74550be1897c20ac292a6;hb=cb247857f3dae0bdb843362c35027a0066b963a4;hp=8ee7e5ec21be23f658323a9cea840e3d7fb41ce6;hpb=b5e16170f59b4ae38937b795a56a356fb95cca56;p=mirror_ubuntu-artful-kernel.git diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 8ee7e5ec21be..3bf4a9984f4c 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -72,6 +72,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) } EXPORT_SYMBOL(percpu_counter_set); +/** + * This function is both preempt and irq safe. The former is due to explicit + * preemption disable. The latter is guaranteed by the fact that the slow path + * is explicitly protected by an irq-safe spinlock whereas the fast patch uses + * this_cpu_add which is irq-safe by definition. Hence there is no need muck + * with irq state before calling this one + */ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { s64 count;