]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - include/linux/perf_counter.h
perf counters: add prctl interface to disable/enable counters
[mirror_ubuntu-artful-kernel.git] / include / linux / perf_counter.h
index 1f0017673e77c3c9a70814d91a1ba6febf2e1c91..97d86c293ee8ffc4ee3bdee98d2e15d496f66d5d 100644 (file)
 struct task_struct;
 
 /*
- * Generalized hardware event types, used by the hw_event_type parameter
- * of the sys_perf_counter_open() syscall:
+ * User-space ABI bits:
+ */
+
+/*
+ * Generalized performance counter event types, used by the hw_event.type
+ * parameter of the sys_perf_counter_open() syscall:
  */
 enum hw_event_types {
-       PERF_COUNT_CYCLES,
-       PERF_COUNT_INSTRUCTIONS,
-       PERF_COUNT_CACHE_REFERENCES,
-       PERF_COUNT_CACHE_MISSES,
-       PERF_COUNT_BRANCH_INSTRUCTIONS,
-       PERF_COUNT_BRANCH_MISSES,
        /*
-        * If this bit is set in the type, then trigger NMI sampling:
+        * Common hardware events, generalized by the kernel:
+        */
+       PERF_COUNT_CYCLES               =  0,
+       PERF_COUNT_INSTRUCTIONS         =  1,
+       PERF_COUNT_CACHE_REFERENCES     =  2,
+       PERF_COUNT_CACHE_MISSES         =  3,
+       PERF_COUNT_BRANCH_INSTRUCTIONS  =  4,
+       PERF_COUNT_BRANCH_MISSES        =  5,
+
+       /*
+        * Special "software" counters provided by the kernel, even if
+        * the hardware does not support performance counters. These
+        * counters measure various physical and sw events of the
+        * kernel (and allow the profiling of them as well):
+        */
+       PERF_COUNT_CPU_CLOCK            = -1,
+       PERF_COUNT_TASK_CLOCK           = -2,
+       /*
+        * Future software events:
         */
-       PERF_COUNT_NMI                  = (1 << 30),
-       PERF_COUNT_RAW                  = (1 << 31),
+       /* PERF_COUNT_PAGE_FAULTS       = -3,
+          PERF_COUNT_CONTEXT_SWITCHES  = -4, */
 };
 
 /*
  * IRQ-notification data record type:
  */
-enum perf_record_type {
-       PERF_RECORD_SIMPLE,
-       PERF_RECORD_IRQ,
-       PERF_RECORD_GROUP,
+enum perf_counter_record_type {
+       PERF_RECORD_SIMPLE              =  0,
+       PERF_RECORD_IRQ                 =  1,
+       PERF_RECORD_GROUP               =  2,
 };
 
-struct perf_counter_event {
-       u32                     hw_event_type;
-       u32                     hw_event_period;
-       u64                     hw_raw_ctrl;
+/*
+ * Hardware event to monitor via a performance monitoring counter:
+ */
+struct perf_counter_hw_event {
+       s64                     type;
+
+       u64                     irq_period;
+       u32                     record_type;
+
+       u32                     disabled     :  1, /* off by default */
+                               nmi          :  1, /* NMI sampling   */
+                               raw          :  1, /* raw event type */
+                               __reserved_1 : 29;
+
+       u64                     __reserved_2;
 };
 
+/*
+ * Kernel-internal data types:
+ */
+
 /**
- * struct hw_perf_counter - performance counter hardware details
+ * struct hw_perf_counter - performance counter hardware details:
  */
 struct hw_perf_counter {
-       u64                     config;
-       unsigned long           config_base;
-       unsigned long           counter_base;
-       int                     nmi;
-       unsigned int            idx;
-       u64                     prev_count;
-       s32                     next_count;
-       u64                     irq_period;
+       u64                             config;
+       unsigned long                   config_base;
+       unsigned long                   counter_base;
+       int                             nmi;
+       unsigned int                    idx;
+       u64                             prev_count;
+       u64                             irq_period;
+       s32                             next_count;
 };
 
 /*
  * Hardcoded buffer length limit for now, for IRQ-fed events:
  */
-#define PERF_DATA_BUFLEN       2048
+#define PERF_DATA_BUFLEN               2048
 
 /**
  * struct perf_data - performance counter IRQ data sampling ...
  */
 struct perf_data {
-       int                     len;
-       int                     rd_idx;
-       int                     overrun;
-       u8                      data[PERF_DATA_BUFLEN];
+       int                             len;
+       int                             rd_idx;
+       int                             overrun;
+       u8                              data[PERF_DATA_BUFLEN];
+};
+
+struct perf_counter;
+
+/**
+ * struct hw_perf_counter_ops - performance counter hw ops
+ */
+struct hw_perf_counter_ops {
+       void (*hw_perf_counter_enable)  (struct perf_counter *counter);
+       void (*hw_perf_counter_disable) (struct perf_counter *counter);
+       void (*hw_perf_counter_read)    (struct perf_counter *counter);
 };
 
 /**
  * struct perf_counter - performance counter kernel representation:
  */
 struct perf_counter {
-       struct list_head                list;
+       struct list_head                list_entry;
+       struct list_head                sibling_list;
+       struct perf_counter             *group_leader;
+       const struct hw_perf_counter_ops *hw_ops;
+
        int                             active;
 #if BITS_PER_LONG == 64
        atomic64_t                      count;
 #else
        atomic_t                        count32[2];
 #endif
-       struct perf_counter_event       event;
+       struct perf_counter_hw_event    hw_event;
        struct hw_perf_counter          hw;
 
        struct perf_counter_context     *ctx;
@@ -110,8 +156,6 @@ struct perf_counter {
        int                             oncpu;
        int                             cpu;
 
-       enum perf_record_type           record_type;
-
        /* read() / irq related data */
        wait_queue_head_t               waitq;
        /* optional: for NMIs */
@@ -132,7 +176,8 @@ struct perf_counter_context {
         * Protect the list of counters:
         */
        spinlock_t              lock;
-       struct list_head        counters;
+
+       struct list_head        counter_list;
        int                     nr_counters;
        int                     nr_active;
        struct task_struct      *task;
@@ -155,14 +200,22 @@ struct perf_cpu_context {
 extern int perf_max_counters;
 
 #ifdef CONFIG_PERF_COUNTERS
+extern const struct hw_perf_counter_ops *
+hw_perf_counter_init(struct perf_counter *counter);
+
 extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
 extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
 extern void perf_counter_task_tick(struct task_struct *task, int cpu);
 extern void perf_counter_init_task(struct task_struct *task);
 extern void perf_counter_notify(struct pt_regs *regs);
 extern void perf_counter_print_debug(void);
-extern void hw_perf_restore_ctrl(u64 ctrl);
-extern u64 hw_perf_disable_all(void);
+extern u64 hw_perf_save_disable(void);
+extern void hw_perf_restore(u64 ctrl);
+extern void atomic64_counter_set(struct perf_counter *counter, u64 val64);
+extern u64 atomic64_counter_read(struct perf_counter *counter);
+extern int perf_counter_task_disable(void);
+extern int perf_counter_task_enable(void);
+
 #else
 static inline void
 perf_counter_task_sched_in(struct task_struct *task, int cpu)          { }
@@ -173,8 +226,10 @@ perf_counter_task_tick(struct task_struct *task, int cpu)          { }
 static inline void perf_counter_init_task(struct task_struct *task)    { }
 static inline void perf_counter_notify(struct pt_regs *regs)           { }
 static inline void perf_counter_print_debug(void)                      { }
-static inline void hw_perf_restore_ctrl(u64 ctrl)                      { }
-static inline u64 hw_perf_disable_all(void)            { return 0; }
+static inline void hw_perf_restore(u64 ctrl)                   { }
+static inline u64 hw_perf_save_disable(void)                 { return 0; }
+static inline int perf_counter_task_disable(void)      { return -EINVAL; }
+static inline int perf_counter_task_enable(void)       { return -EINVAL; }
 #endif
 
 #endif /* _LINUX_PERF_COUNTER_H */