struct perf_counter_mmap_page {
__u32 version; /* version number of this structure */
__u32 compat_version; /* lowest version this is compat with */
+
+ /*
+ * Bits needed to read the hw counters in user-space.
+ *
+ * The index and offset should be read atomically using the seqlock:
+ *
+ * __u32 seq, index;
+ * __s64 offset;
+ *
+ * again:
+ * rmb();
+ * seq = pc->lock;
+ *
+ * if (unlikely(seq & 1)) {
+ * cpu_relax();
+ * goto again;
+ * }
+ *
+ * index = pc->index;
+ * offset = pc->offset;
+ *
+ * rmb();
+ * if (pc->lock != seq)
+ * goto again;
+ *
+ * After this, index contains architecture specific counter index + 1,
+ * so that 0 means unavailable, offset contains the value to be added
+ * to the result of the raw timer read to obtain this counter's value.
+ */
__u32 lock; /* seqlock for synchronization */
__u32 index; /* hardware counter identifier */
__s64 offset; /* add to hardware counter value */
+ /*
+ * Control data for the mmap() data buffer.
+ *
+ * User-space reading this value should issue an rmb(), on SMP capable
+ * platforms, after reading this value -- see perf_counter_wakeup().
+ */
__u32 data_head; /* head in the data section */
};
return err;
}
-static void __perf_counter_update_userpage(struct perf_counter *counter,
- struct perf_mmap_data *data)
+/*
+ * Callers need to ensure there can be no nesting of this function, otherwise
+ * the seqlock logic goes bad. We can not serialize this because the arch
+ * code calls this from NMI context.
+ */
+void perf_counter_update_userpage(struct perf_counter *counter)
{
- struct perf_counter_mmap_page *userpg = data->user_page;
+ struct perf_mmap_data *data;
+ struct perf_counter_mmap_page *userpg;
+
+ rcu_read_lock();
+ data = rcu_dereference(counter->data);
+ if (!data)
+ goto unlock;
+
+ userpg = data->user_page;
/*
* Disable preemption so as to not let the corresponding user-space
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
userpg->offset -= atomic64_read(&counter->hw.prev_count);
- userpg->data_head = atomic_read(&data->head);
smp_wmb();
++userpg->lock;
preempt_enable();
-}
-
-void perf_counter_update_userpage(struct perf_counter *counter)
-{
- struct perf_mmap_data *data;
-
- rcu_read_lock();
- data = rcu_dereference(counter->data);
- if (data)
- __perf_counter_update_userpage(counter, data);
+unlock:
rcu_read_unlock();
}
data = rcu_dereference(counter->data);
if (data) {
(void)atomic_xchg(&data->wakeup, POLL_IN);
- __perf_counter_update_userpage(counter, data);
+ /*
+ * Ensure all data writes are issued before updating the
+ * user-space data head information. The matching rmb()
+ * will be in userspace after reading this value.
+ */
+ smp_wmb();
+ data->user_page->data_head = atomic_read(&data->head);
}
rcu_read_unlock();