]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
Merge branch 'perfcounters-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Jun 2009 21:01:07 +0000 (14:01 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Jun 2009 21:01:07 +0000 (14:01 -0700)
* 'perfcounters-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (574 commits)
  perf_counter: Turn off by default
  perf_counter: Add counter->id to the throttle event
  perf_counter: Better align code
  perf_counter: Rename L2 to LL cache
  perf_counter: Standardize event names
  perf_counter: Rename enums
  perf_counter tools: Clean up u64 usage
  perf_counter: Rename perf_counter_limit sysctl
  perf_counter: More paranoia settings
  perf_counter: powerpc: Implement generalized cache events for POWER processors
  perf_counters: powerpc: Add support for POWER7 processors
  perf_counter: Accurate period data
  perf_counter: Introduce struct for sample data
  perf_counter tools: Normalize data using per sample period data
  perf_counter: Annotate exit ctx recursion
  perf_counter tools: Propagate signals properly
  perf_counter tools: Small frequency related fixes
  perf_counter: More aggressive frequency adjustment
  perf_counter/x86: Fix the model number of Intel Core2 processors
  perf_counter, x86: Correct some event and umask values for Intel processors
  ...

1  2 
MAINTAINERS
fs/exec.c
include/linux/init_task.h
include/linux/sched.h
kernel/exit.c
kernel/sched.c
kernel/sysctl.c
mm/mmap.c

diff --combined MAINTAINERS
index ccdb57524e3ca776d44a23f87c9c2f212b606f5f,fd24af2ff32613ab87ffed9bd74e379c602b6191..70f961d43d9ca4fee44afeef25806c40dc44a938
@@@ -71,7 -71,7 +71,7 @@@ P: Perso
  M: Mail patches to
  L: Mailing list that is relevant to this area
  W: Web-page with status/info
 -T: SCM tree type and location.  Type is one of: git, hg, quilt.
 +T: SCM tree type and location.  Type is one of: git, hg, quilt, stgit.
  S: Status, one of the following:
  
        Supported:      Someone is actually paid to look after this.
@@@ -159,8 -159,7 +159,8 @@@ F: drivers/net/r8169.
  8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
  L:    linux-serial@vger.kernel.org
  W:    http://serial.sourceforge.net
 -S:    Orphan
 +M:    alan@lxorguk.ukuu.org.uk
 +S:    Odd Fixes
  F:    drivers/serial/8250*
  F:    include/linux/serial_8250.h
  
@@@ -1979,16 -1978,6 +1979,16 @@@ F:    Documentation/edac.tx
  F:    drivers/edac/edac_*
  F:    include/linux/edac.h
  
 +EDAC-AMD64
 +P:    Doug Thompson
 +M:    dougthompson@xmission.com
 +P:    Borislav Petkov
 +M:    borislav.petkov@amd.com
 +L:    bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
 +W:    bluesmoke.sourceforge.net
 +S:    Supported
 +F:    drivers/edac/amd64_edac*
 +
  EDAC-E752X
  P:    Mark Gross
  M:    mark.gross@intel.com
@@@ -4403,6 -4392,16 +4403,16 @@@ S:    Maintaine
  F:    include/linux/delayacct.h
  F:    kernel/delayacct.c
  
+ PERFORMANCE COUNTER SUBSYSTEM
+ P:    Peter Zijlstra
+ M:    a.p.zijlstra@chello.nl
+ P:    Paul Mackerras
+ M:    paulus@samba.org
+ P:    Ingo Molnar
+ M:    mingo@elte.hu
+ L:    linux-kernel@vger.kernel.org
+ S:    Supported
  PERSONALITY HANDLING
  P:    Christoph Hellwig
  M:    hch@infradead.org
@@@ -5640,7 -5639,6 +5650,7 @@@ P:      Alan Co
  M:    alan@lxorguk.ukuu.org.uk
  L:    linux-kernel@vger.kernel.org
  S:    Maintained
 +T:    stgit http://zeniv.linux.org.uk/~alan/ttydev/
  
  TULIP NETWORK DRIVERS
  P:    Grant Grundler
diff --combined fs/exec.c
index a7fcd975c6b264d534f63f7958cc8918027010ae,ad4f28c2327ad097e900cf071fba2ac10bf20b04..e639957d7a57a310c12718e8fe3e4f0d9dfe2fe2
+++ b/fs/exec.c
@@@ -33,6 -33,7 +33,7 @@@
  #include <linux/string.h>
  #include <linux/init.h>
  #include <linux/pagemap.h>
+ #include <linux/perf_counter.h>
  #include <linux/highmem.h>
  #include <linux/spinlock.h>
  #include <linux/key.h>
@@@ -922,6 -923,7 +923,7 @@@ void set_task_comm(struct task_struct *
        task_lock(tsk);
        strlcpy(tsk->comm, buf, sizeof(tsk->comm));
        task_unlock(tsk);
+       perf_counter_comm(tsk);
  }
  
  int flush_old_exec(struct linux_binprm * bprm)
  
        current->personality &= ~bprm->per_clear;
  
+       /*
+        * Flush performance counters when crossing a
+        * security domain:
+        */
+       if (!get_dumpable(current->mm))
+               perf_counter_exit_task(current);
        /* An exec changes our domain. We are no longer part of the thread
           group */
  
@@@ -1016,7 -1025,7 +1025,7 @@@ void install_exec_creds(struct linux_bi
        commit_creds(bprm->cred);
        bprm->cred = NULL;
  
 -      /* cred_exec_mutex must be held at least to this point to prevent
 +      /* cred_guard_mutex must be held at least to this point to prevent
         * ptrace_attach() from altering our determination of the task's
         * credentials; any time after this it may be unlocked */
  
@@@ -1026,7 -1035,7 +1035,7 @@@ EXPORT_SYMBOL(install_exec_creds)
  
  /*
   * determine how safe it is to execute the proposed program
 - * - the caller must hold current->cred_exec_mutex to protect against
 + * - the caller must hold current->cred_guard_mutex to protect against
   *   PTRACE_ATTACH
   */
  int check_unsafe_exec(struct linux_binprm *bprm)
@@@ -1268,7 -1277,7 +1277,7 @@@ int do_execve(char * filename
        if (!bprm)
                goto out_files;
  
 -      retval = mutex_lock_interruptible(&current->cred_exec_mutex);
 +      retval = mutex_lock_interruptible(&current->cred_guard_mutex);
        if (retval < 0)
                goto out_free;
        current->in_execve = 1;
        /* execve succeeded */
        current->fs->in_exec = 0;
        current->in_execve = 0;
 -      mutex_unlock(&current->cred_exec_mutex);
 +      mutex_unlock(&current->cred_guard_mutex);
        acct_update_integrals(current);
        free_bprm(bprm);
        if (displaced)
@@@ -1354,7 -1363,7 +1363,7 @@@ out_unmark
  
  out_unlock:
        current->in_execve = 0;
 -      mutex_unlock(&current->cred_exec_mutex);
 +      mutex_unlock(&current->cred_guard_mutex);
  
  out_free:
        free_bprm(bprm);
index 6646bfc7b8929aeadbc1c12dbdd6f48dc07d7722,b6b7cf23c2a0eab30d378b551fa0a95cd9b0841b..28b1f30601b555d6f12532a9223428244dad7e87
@@@ -108,6 -108,15 +108,15 @@@ extern struct group_info init_groups
  
  extern struct cred init_cred;
  
+ #ifdef CONFIG_PERF_COUNTERS
+ # define INIT_PERF_COUNTERS(tsk)                                      \
+       .perf_counter_mutex =                                           \
+                __MUTEX_INITIALIZER(tsk.perf_counter_mutex),           \
+       .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list),
+ #else
+ # define INIT_PERF_COUNTERS(tsk)
+ #endif
  /*
   *  INIT_TASK is used to set up the first task table, touch at
   * your own risk!. Base=0, limit=0x1fffff (=2MB)
        .group_leader   = &tsk,                                         \
        .real_cred      = &init_cred,                                   \
        .cred           = &init_cred,                                   \
 -      .cred_exec_mutex =                                              \
 -               __MUTEX_INITIALIZER(tsk.cred_exec_mutex),              \
 +      .cred_guard_mutex =                                             \
 +               __MUTEX_INITIALIZER(tsk.cred_guard_mutex),             \
        .comm           = "swapper",                                    \
        .thread         = INIT_THREAD,                                  \
        .fs             = &init_fs,                                     \
        },                                                              \
        .dirties = INIT_PROP_LOCAL_SINGLE(dirties),                     \
        INIT_IDS                                                        \
+       INIT_PERF_COUNTERS(tsk)                                         \
        INIT_TRACE_IRQFLAGS                                             \
        INIT_LOCKDEP                                                    \
        INIT_FTRACE_GRAPH                                               \
diff --combined include/linux/sched.h
index 42bf2766111e5585f24812fbc7183d81316b8f62,28c774ff3cc7539673835bd2d07d16cf458a2e04..4896fdfec91383902df140dd05723d64dd2bc769
@@@ -99,6 -99,7 +99,7 @@@ struct robust_list_head
  struct bio;
  struct fs_struct;
  struct bts_context;
+ struct perf_counter_context;
  
  /*
   * List of flags we want to share for kernel threads,
@@@ -139,6 -140,7 +140,7 @@@ extern unsigned long nr_running(void)
  extern unsigned long nr_uninterruptible(void);
  extern unsigned long nr_iowait(void);
  extern void calc_global_load(void);
+ extern u64 cpu_nr_migrations(int cpu);
  
  extern unsigned long get_parent_ip(unsigned long addr);
  
@@@ -674,6 -676,10 +676,10 @@@ struct user_struct 
        struct work_struct work;
  #endif
  #endif
+ #ifdef CONFIG_PERF_COUNTERS
+       atomic_long_t locked_vm;
+ #endif
  };
  
  extern int uids_sysfs_init(void);
@@@ -1073,9 -1079,10 +1079,10 @@@ struct sched_entity 
        u64                     last_wakeup;
        u64                     avg_overlap;
  
+       u64                     nr_migrations;
        u64                     start_runtime;
        u64                     avg_wakeup;
-       u64                     nr_migrations;
  
  #ifdef CONFIG_SCHEDSTATS
        u64                     wait_start;
@@@ -1261,9 -1268,7 +1268,9 @@@ struct task_struct 
                                         * credentials (COW) */
        const struct cred *cred;        /* effective (overridable) subjective task
                                         * credentials (COW) */
 -      struct mutex cred_exec_mutex;   /* execve vs ptrace cred calculation mutex */
 +      struct mutex cred_guard_mutex;  /* guard against foreign influences on
 +                                       * credential calculations
 +                                       * (notably. ptrace) */
  
        char comm[TASK_COMM_LEN]; /* executable name excluding path
                                     - access with [gs]et_task_comm (which lock
        struct list_head pi_state_list;
        struct futex_pi_state *pi_state_cache;
  #endif
+ #ifdef CONFIG_PERF_COUNTERS
+       struct perf_counter_context *perf_counter_ctxp;
+       struct mutex perf_counter_mutex;
+       struct list_head perf_counter_list;
+ #endif
  #ifdef CONFIG_NUMA
        struct mempolicy *mempolicy;
        short il_next;
@@@ -1903,7 -1913,6 +1915,7 @@@ extern void sched_dead(struct task_stru
  
  extern void proc_caches_init(void);
  extern void flush_signals(struct task_struct *);
 +extern void __flush_signals(struct task_struct *);
  extern void ignore_signals(struct task_struct *);
  extern void flush_signal_handlers(struct task_struct *, int force_default);
  extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
@@@ -2410,6 -2419,13 +2422,13 @@@ static inline void inc_syscw(struct tas
  #define TASK_SIZE_OF(tsk)     TASK_SIZE
  #endif
  
+ /*
+  * Call the function if the target task is executing on a CPU right now:
+  */
+ extern void task_oncpu_function_call(struct task_struct *p,
+                                    void (*func) (void *info), void *info);
  #ifdef CONFIG_MM_OWNER
  extern void mm_update_next_owner(struct mm_struct *mm);
  extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
diff --combined kernel/exit.c
index 51d1fe3fb7ad5b60666c7f229d6bacd8942479e9,49cdf6946f34b4b64b4e61f18dcd417b16f7faa6..b6c90b5ef5094aef90b8cffa0fd3add3f2bbe80c
@@@ -48,6 -48,7 +48,7 @@@
  #include <linux/tracehook.h>
  #include <linux/fs_struct.h>
  #include <linux/init_task.h>
+ #include <linux/perf_counter.h>
  #include <trace/events/sched.h>
  
  #include <asm/uaccess.h>
@@@ -154,6 -155,9 +155,9 @@@ static void delayed_put_task_struct(str
  {
        struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
  
+ #ifdef CONFIG_PERF_COUNTERS
+       WARN_ON_ONCE(tsk->perf_counter_ctxp);
+ #endif
        trace_sched_process_free(tsk);
        put_task_struct(tsk);
  }
@@@ -170,6 -174,7 +174,7 @@@ repeat
        atomic_dec(&__task_cred(p)->user->processes);
  
        proc_flush_task(p);
        write_lock_irq(&tasklist_lock);
        tracehook_finish_release_task(p);
        __exit_signal(p);
@@@ -971,16 -976,19 +976,19 @@@ NORET_TYPE void do_exit(long code
                module_put(tsk->binfmt->module);
  
        proc_exit_connector(tsk);
+       /*
+        * Flush inherited counters to the parent - before the parent
+        * gets woken up by child-exit notifications.
+        */
+       perf_counter_exit_task(tsk);
        exit_notify(tsk, group_dead);
  #ifdef CONFIG_NUMA
        mpol_put(tsk->mempolicy);
        tsk->mempolicy = NULL;
  #endif
  #ifdef CONFIG_FUTEX
-       /*
-        * This must happen late, after the PID is not
-        * hashed anymore:
-        */
        if (unlikely(!list_empty(&tsk->pi_state_list)))
                exit_pi_state_list(tsk);
        if (unlikely(current->pi_state_cache))
@@@ -1472,7 -1480,6 +1480,7 @@@ static int wait_consider_task(struct ta
                 */
                if (*notask_error)
                        *notask_error = ret;
 +              return 0;
        }
  
        if (likely(!ptrace) && unlikely(p->ptrace)) {
diff --combined kernel/sched.c
index dcf2dc28931ad508363a7861a87a40c11ff7f4e5,5b3f6ec1b0b32db29ec326d2bf97021baad963fc..f04aa9664504025ad16e01f1c2c8c8da6a787545
@@@ -39,6 -39,7 +39,7 @@@
  #include <linux/completion.h>
  #include <linux/kernel_stat.h>
  #include <linux/debug_locks.h>
+ #include <linux/perf_counter.h>
  #include <linux/security.h>
  #include <linux/notifier.h>
  #include <linux/profile.h>
@@@ -68,6 -69,7 +69,6 @@@
  #include <linux/pagemap.h>
  #include <linux/hrtimer.h>
  #include <linux/tick.h>
 -#include <linux/bootmem.h>
  #include <linux/debugfs.h>
  #include <linux/ctype.h>
  #include <linux/ftrace.h>
@@@ -579,6 -581,7 +580,7 @@@ struct rq 
        struct load_weight load;
        unsigned long nr_load_updates;
        u64 nr_switches;
+       u64 nr_migrations_in;
  
        struct cfs_rq cfs;
        struct rt_rq rt;
@@@ -691,7 -694,7 +693,7 @@@ static inline int cpu_of(struct rq *rq
  #define task_rq(p)            cpu_rq(task_cpu(p))
  #define cpu_curr(cpu)         (cpu_rq(cpu)->curr)
  
static inline void update_rq_clock(struct rq *rq)
+ inline void update_rq_clock(struct rq *rq)
  {
        rq->clock = sched_clock_cpu(cpu_of(rq));
  }
@@@ -1968,12 -1971,16 +1970,16 @@@ void set_task_cpu(struct task_struct *p
                p->se.sleep_start -= clock_offset;
        if (p->se.block_start)
                p->se.block_start -= clock_offset;
+ #endif
        if (old_cpu != new_cpu) {
-               schedstat_inc(p, se.nr_migrations);
+               p->se.nr_migrations++;
+               new_rq->nr_migrations_in++;
+ #ifdef CONFIG_SCHEDSTATS
                if (task_hot(p, old_rq->clock, NULL))
                        schedstat_inc(p, se.nr_forced2_migrations);
-       }
  #endif
+               perf_counter_task_migration(p, new_cpu);
+       }
        p->se.vruntime -= old_cfsrq->min_vruntime -
                                         new_cfsrq->min_vruntime;
  
@@@ -2368,6 -2375,27 +2374,27 @@@ static int sched_balance_self(int cpu, 
  
  #endif /* CONFIG_SMP */
  
+ /**
+  * task_oncpu_function_call - call a function on the cpu on which a task runs
+  * @p:                the task to evaluate
+  * @func:     the function to be called
+  * @info:     the function call argument
+  *
+  * Calls the function @func when the task is currently running. This might
+  * be on the current CPU, which just calls the function directly
+  */
+ void task_oncpu_function_call(struct task_struct *p,
+                             void (*func) (void *info), void *info)
+ {
+       int cpu;
+       preempt_disable();
+       cpu = task_cpu(p);
+       if (task_curr(p))
+               smp_call_function_single(cpu, func, info, 1);
+       preempt_enable();
+ }
  /***
   * try_to_wake_up - wake up a thread
   * @p: the to-be-woken-up thread
@@@ -2535,6 -2563,7 +2562,7 @@@ static void __sched_fork(struct task_st
        p->se.exec_start                = 0;
        p->se.sum_exec_runtime          = 0;
        p->se.prev_sum_exec_runtime     = 0;
+       p->se.nr_migrations             = 0;
        p->se.last_wakeup               = 0;
        p->se.avg_overlap               = 0;
        p->se.start_runtime             = 0;
@@@ -2765,6 -2794,7 +2793,7 @@@ static void finish_task_switch(struct r
         */
        prev_state = prev->state;
        finish_arch_switch(prev);
+       perf_counter_task_sched_in(current, cpu_of(rq));
        finish_lock_switch(rq, prev);
  #ifdef CONFIG_SMP
        if (post_schedule)
@@@ -2979,6 -3009,15 +3008,15 @@@ static void calc_load_account_active(st
        }
  }
  
+ /*
+  * Externally visible per-cpu scheduler statistics:
+  * cpu_nr_migrations(cpu) - number of migrations into that cpu
+  */
+ u64 cpu_nr_migrations(int cpu)
+ {
+       return cpu_rq(cpu)->nr_migrations_in;
+ }
  /*
   * Update rq->cpu_load[] statistics. This function is usually called every
   * scheduler tick (TICK_NSEC).
@@@ -5077,6 -5116,8 +5115,8 @@@ void scheduler_tick(void
        curr->sched_class->task_tick(rq, curr, 0);
        spin_unlock(&rq->lock);
  
+       perf_counter_task_tick(curr, cpu);
  #ifdef CONFIG_SMP
        rq->idle_at_tick = idle_cpu(cpu);
        trigger_load_balance(rq, cpu);
@@@ -5292,6 -5333,7 +5332,7 @@@ need_resched_nonpreemptible
  
        if (likely(prev != next)) {
                sched_info_switch(prev, next);
+               perf_counter_task_sched_out(prev, next, cpu);
  
                rq->nr_switches++;
                rq->curr = next;
@@@ -7535,8 -7577,10 +7576,10 @@@ migration_call(struct notifier_block *n
        return NOTIFY_OK;
  }
  
- /* Register at highest priority so that task migration (migrate_all_tasks)
-  * happens before everything else.
+ /*
+  * Register at high priority so that task migration (migrate_all_tasks)
+  * happens before everything else.  This has to be lower priority than
+  * the notifier in the perf_counter subsystem, though.
   */
  static struct notifier_block __cpuinitdata migration_notifier = {
        .notifier_call = migration_call,
@@@ -7781,21 -7825,24 +7824,21 @@@ static void rq_attach_root(struct rq *r
  
  static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
  {
 +      gfp_t gfp = GFP_KERNEL;
 +
        memset(rd, 0, sizeof(*rd));
  
 -      if (bootmem) {
 -              alloc_bootmem_cpumask_var(&def_root_domain.span);
 -              alloc_bootmem_cpumask_var(&def_root_domain.online);
 -              alloc_bootmem_cpumask_var(&def_root_domain.rto_mask);
 -              cpupri_init(&rd->cpupri, true);
 -              return 0;
 -      }
 +      if (bootmem)
 +              gfp = GFP_NOWAIT;
  
 -      if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
 +      if (!alloc_cpumask_var(&rd->span, gfp))
                goto out;
 -      if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
 +      if (!alloc_cpumask_var(&rd->online, gfp))
                goto free_span;
 -      if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
 +      if (!alloc_cpumask_var(&rd->rto_mask, gfp))
                goto free_online;
  
 -      if (cpupri_init(&rd->cpupri, false) != 0)
 +      if (cpupri_init(&rd->cpupri, bootmem) != 0)
                goto free_rto_mask;
        return 0;
  
@@@ -9119,7 -9166,7 +9162,7 @@@ void __init sched_init(void
         * we use alloc_bootmem().
         */
        if (alloc_size) {
 -              ptr = (unsigned long)alloc_bootmem(alloc_size);
 +              ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
  
  #ifdef CONFIG_FAIR_GROUP_SCHED
                init_task_group.se = (struct sched_entity **)ptr;
                 * 1024) and two child groups A0 and A1 (of weight 1024 each),
                 * then A0's share of the cpu resource is:
                 *
-                *      A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
+                *      A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
                 *
                 * We achieve this by letting init_task_group's tasks sit
                 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
        current->sched_class = &fair_sched_class;
  
        /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
 -      alloc_bootmem_cpumask_var(&nohz_cpu_mask);
 +      alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
  #ifdef CONFIG_SMP
  #ifdef CONFIG_NO_HZ
 -      alloc_bootmem_cpumask_var(&nohz.cpu_mask);
 -      alloc_bootmem_cpumask_var(&nohz.ilb_grp_nohz_mask);
 +      alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
 +      alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
  #endif
 -      alloc_bootmem_cpumask_var(&cpu_isolated_map);
 +      alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
  #endif /* SMP */
  
+       perf_counter_init();
        scheduler_running = 1;
  }
  
diff --combined kernel/sysctl.c
index 944ba03cae199ad6139f96f65113bca007fe4fda,a7e4eb0525b19e768cf6d729f0db3b07d5d35dd5..ce664f98e3fb67d7741182b9bce3fd29f4b8b88f
@@@ -49,6 -49,7 +49,7 @@@
  #include <linux/reboot.h>
  #include <linux/ftrace.h>
  #include <linux/slow-work.h>
+ #include <linux/perf_counter.h>
  
  #include <asm/uaccess.h>
  #include <asm/processor.h>
@@@ -114,7 -115,6 +115,7 @@@ static int ngroups_max = NGROUPS_MAX
  
  #ifdef CONFIG_MODULES
  extern char modprobe_path[];
 +extern int modules_disabled;
  #endif
  #ifdef CONFIG_CHR_DEV_SG
  extern int sg_big_buff;
@@@ -535,17 -535,6 +536,17 @@@ static struct ctl_table kern_table[] = 
                .proc_handler   = &proc_dostring,
                .strategy       = &sysctl_string,
        },
 +      {
 +              .ctl_name       = CTL_UNNUMBERED,
 +              .procname       = "modules_disabled",
 +              .data           = &modules_disabled,
 +              .maxlen         = sizeof(int),
 +              .mode           = 0644,
 +              /* only handle a transition from default "0" to "1" */
 +              .proc_handler   = &proc_dointvec_minmax,
 +              .extra1         = &one,
 +              .extra2         = &one,
 +      },
  #endif
  #if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
        {
                .child          = slow_work_sysctls,
        },
  #endif
+ #ifdef CONFIG_PERF_COUNTERS
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "perf_counter_paranoid",
+               .data           = &sysctl_perf_counter_paranoid,
+               .maxlen         = sizeof(sysctl_perf_counter_paranoid),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec,
+       },
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "perf_counter_mlock_kb",
+               .data           = &sysctl_perf_counter_mlock,
+               .maxlen         = sizeof(sysctl_perf_counter_mlock),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec,
+       },
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "perf_counter_max_sample_rate",
+               .data           = &sysctl_perf_counter_sample_rate,
+               .maxlen         = sizeof(sysctl_perf_counter_sample_rate),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec,
+       },
+ #endif
  /*
   * NOTE: do not add new entries to this table unless you have read
   * Documentation/sysctl/ctl_unnumbered.txt
@@@ -1245,6 -1260,7 +1272,6 @@@ static struct ctl_table vm_table[] = 
                .strategy       = &sysctl_jiffies,
        },
  #endif
 -#ifdef CONFIG_SECURITY
        {
                .ctl_name       = CTL_UNNUMBERED,
                .procname       = "mmap_min_addr",
                .mode           = 0644,
                .proc_handler   = &proc_doulongvec_minmax,
        },
 -#endif
  #ifdef CONFIG_NUMA
        {
                .ctl_name       = CTL_UNNUMBERED,
diff --combined mm/mmap.c
index 2b43fa1aa3c8318cbaa5e6b80da0c26944b90b4a,8101de490c73941ab8815733a3899c614cdb8cb7..34579b23ebd55ebed1a99a5473c6ca0693b559e2
+++ b/mm/mmap.c
@@@ -28,6 -28,7 +28,7 @@@
  #include <linux/mempolicy.h>
  #include <linux/rmap.h>
  #include <linux/mmu_notifier.h>
+ #include <linux/perf_counter.h>
  
  #include <asm/uaccess.h>
  #include <asm/cacheflush.h>
@@@ -87,9 -88,6 +88,9 @@@ int sysctl_overcommit_ratio = 50;     /* de
  int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
  struct percpu_counter vm_committed_as;
  
 +/* amount of vm to protect from userspace access */
 +unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
 +
  /*
   * Check that a process has enough memory to allocate a new virtual
   * mapping. 0 means there is enough memory for the allocation to
@@@ -1222,6 -1220,8 +1223,8 @@@ munmap_back
        if (correct_wcount)
                atomic_inc(&inode->i_writecount);
  out:
+       perf_counter_mmap(vma);
        mm->total_vm += len >> PAGE_SHIFT;
        vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
@@@ -2308,6 -2308,8 +2311,8 @@@ int install_special_mapping(struct mm_s
  
        mm->total_vm += len >> PAGE_SHIFT;
  
+       perf_counter_mmap(vma);
        return 0;
  }