]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge branch 'core' of git://amd64.org/linux/rric into perf/core
authorIngo Molnar <mingo@elte.hu>
Tue, 15 Nov 2011 10:05:18 +0000 (11:05 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 15 Nov 2011 10:05:18 +0000 (11:05 +0100)
1  2 
Documentation/kernel-parameters.txt
arch/x86/oprofile/nmi_int.c
kernel/events/core.c

index a0c5c5f4fce6e9587346a4a049c9725e5ca45de5,f7735a125f4b21cd1bb1758ba575dd4df531323e..fd5c913c33c14ee26dc1ff48ed9d7b5af8ddc0b2
@@@ -49,7 -49,6 +49,7 @@@ parameter is applicable
        EDD     BIOS Enhanced Disk Drive Services (EDD) is enabled
        EFI     EFI Partitioning (GPT) is enabled
        EIDE    EIDE/ATAPI support is enabled.
 +      EVM     Extended Verification Module
        FB      The frame buffer device is enabled.
        FTRACE  Function tracing enabled.
        GCOV    GCOV profiling is enabled.
@@@ -164,7 -163,7 +164,7 @@@ bytes respectively. Such letter suffixe
                        rsdt -- prefer RSDT over (default) XSDT
                        copy_dsdt -- copy DSDT to memory
  
 -                      See also Documentation/power/pm.txt, pci=noacpi
 +                      See also Documentation/power/runtime_pm.txt, pci=noacpi
  
        acpi_rsdp=      [ACPI,EFI,KEXEC]
                        Pass the RSDP address to the kernel, mostly used
                        behaviour to be specified.  Bit 0 enables warnings,
                        bit 1 enables fixups, and bit 2 sends a segfault.
  
 +      align_va_addr=  [X86-64]
 +                      Align virtual addresses by clearing slice [14:12] when
 +                      allocating a VMA at process creation time. This option
 +                      gives you up to 3% performance improvement on AMD F15h
 +                      machines (where it is enabled by default) for a
 +                      CPU-intensive style benchmark, and it can vary highly in
 +                      a microbenchmark depending on workload and compiler.
 +
 +                      1: only for 32-bit processes
 +                      2: only for 64-bit processes
 +                      on: enable for both 32- and 64-bit processes
 +                      off: disable for both 32- and 64-bit processes
 +
        amd_iommu=      [HW,X86-84]
                        Pass parameters to the AMD IOMMU driver in the system.
                        Possible values are:
        amijoy.map=     [HW,JOY] Amiga joystick support
                        Map of devices attached to JOY0DAT and JOY1DAT
                        Format: <a>,<b>
 -                      See also Documentation/kernel/input/joystick.txt
 +                      See also Documentation/input/joystick.txt
  
        analog.map=     [HW,JOY] Analog joystick and gamepad support
                        Specifies type or capabilities of an analog joystick
        bttv.radio=     Most important insmod options are available as
                        kernel args too.
        bttv.pll=       See Documentation/video4linux/bttv/Insmod-options
 -      bttv.tuner=     and Documentation/video4linux/bttv/CARDLIST
 +      bttv.tuner=
  
        bulk_remove=off [PPC]  This parameter disables the use of the pSeries
                        firmware feature for flushing multiple hpte entries
  
        elevator=       [IOSCHED]
                        Format: {"cfq" | "deadline" | "noop"}
 -                      See Documentation/block/as-iosched.txt and
 +                      See Documentation/block/cfq-iosched.txt and
                        Documentation/block/deadline-iosched.txt for details.
  
 -      elfcorehdr=     [IA-64,PPC,SH,X86]
 +      elfcorehdr=[size[KMG]@]offset[KMG] [IA64,PPC,SH,X86,S390]
                        Specifies physical address of start of kernel core
 -                      image elf header. Generally kexec loader will
 -                      pass this option to capture kernel.
 +                      image elf header and optionally the size. Generally
 +                      kexec loader will pass this option to capture kernel.
                        See Documentation/kdump/kdump.txt for details.
  
        enable_mtrr_cleanup [X86]
                        This option is obsoleted by the "netdev=" option, which
                        has equivalent usage. See its documentation for details.
  
 +      evm=            [EVM]
 +                      Format: { "fix" }
 +                      Permit 'security.evm' to be updated regardless of
 +                      current integrity status.
 +
        failslab=
        fail_page_alloc=
        fail_make_request=[KNL]
                        General fault injection mechanism.
                        Format: <interval>,<probability>,<space>,<times>
 -                      See also /Documentation/fault-injection/.
 +                      See also Documentation/fault-injection/.
  
        floppy=         [HW]
                        See Documentation/blockdev/floppy.txt.
        ignore_loglevel [KNL]
                        Ignore loglevel setting - this will print /all/
                        kernel messages to the console. Useful for debugging.
 +                      We also add it as printk module parameter, so users
 +                      could change it dynamically, usually by
 +                      /sys/module/printk/parameters/ignore_loglevel.
  
        ihash_entries=  [KNL]
                        Set number of hash buckets for inode cache.
                        has the capability. With this option, super page will
                        not be supported.
        intremap=       [X86-64, Intel-IOMMU]
 -                      Format: { on (default) | off | nosid }
                        on      enable Interrupt Remapping (default)
                        off     disable Interrupt Remapping
                        nosid   disable Source ID checking
 +                      no_x2apic_optout
 +                              BIOS x2APIC opt-out request will be ignored
  
        inttest=        [IA-64]
  
                        [KVM,Intel] Disable FlexPriority feature (TPR shadow).
                        Default is 1 (enabled)
  
 +      kvm-intel.nested=
 +                      [KVM,Intel] Enable VMX nesting (nVMX).
 +                      Default is 0 (disabled)
 +
        kvm-intel.unrestricted_guest=
                        [KVM,Intel] Disable unrestricted guest feature
                        (virtualized real and unpaged mode) on capable
                        debugging driver suspend/resume hooks).  This may
                        not work reliably with all consoles, but is known
                        to work with serial and VGA consoles.
 +                      To facilitate more flexible debugging, we also add
 +                      console_suspend, a printk module parameter to control
 +                      it. Users could use console_suspend (usually
 +                      /sys/module/printk/parameters/console_suspend) to
 +                      turn on/off it dynamically.
  
        noaliencache    [MM, NUMA, SLAB] Disables the allocation of alien
                        caches in the slab allocator.  Saves per-node memory,
  
        noresidual      [PPC] Don't use residual data on PReP machines.
  
 +      nordrand        [X86] Disable the direct use of the RDRAND
 +                      instruction even if it is supported by the
 +                      processor.  RDRAND is still available to user
 +                      space applications.
 +
        noresume        [SWSUSP] Disables resume and restores original swap
                        space.
  
                        arch_perfmon: [X86] Force use of architectural
                                perfmon on Intel CPUs instead of the
                                CPU specific event set.
+                       timer: [X86] Force use of architectural NMI
+                               timer mode (see also oprofile.timer
+                               for generic hr timer mode)
  
        oops=panic      Always panic on oopses. Default is to just kill the
                        process, but there is a small probability of
                        in <PAGE_SIZE> units (needed only for swap files).
                        See  Documentation/power/swsusp-and-swap-files.txt
  
 +      resumedelay=    [HIBERNATION] Delay (in seconds) to pause before attempting to
 +                      read the resume files
 +
 +      resumewait      [HIBERNATION] Wait (indefinitely) for resume device to show up.
 +                      Useful for devices that are detected asynchronously
 +                      (e.g. USB and MMC devices).
 +
        hibernate=      [HIBERNATION]
                noresume        Don't check if there's a hibernation image
                                present during boot.
                        Format: <integer>
  
        sonypi.*=       [HW] Sony Programmable I/O Control Device driver
 -                      See Documentation/sonypi.txt
 +                      See Documentation/laptops/sonypi.txt
  
        specialix=      [HW,SERIAL] Specialix multi-serial port adapter
                        See Documentation/serial/specialix.txt.
                        functions are at fixed addresses, they make nice
                        targets for exploits that can control RIP.
  
 -                      emulate     [default] Vsyscalls turn into traps and are
 -                                  emulated reasonably safely.
 +                      emulate     Vsyscalls turn into traps and are emulated
 +                                  reasonably safely.
  
 -                      native      Vsyscalls are native syscall instructions.
 +                      native      [default] Vsyscalls are native syscall
 +                                  instructions.
                                    This is a little bit faster than trapping
                                    and makes a few dynamic recompilers work
                                    better than they would in emulation mode.
index 75f9528e037283b4dabaef93f8bcf9ab3c9984fb,990c35bfa88f7c9f98a7cb5b99604d568b644030..26b8a8514ee566e8a7d235a06de1037c2641adec
@@@ -344,10 -344,10 +344,10 @@@ static void nmi_cpu_setup(void *dummy
        int cpu = smp_processor_id();
        struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
        nmi_cpu_save_registers(msrs);
 -      spin_lock(&oprofilefs_lock);
 +      raw_spin_lock(&oprofilefs_lock);
        model->setup_ctrs(model, msrs);
        nmi_cpu_setup_mux(cpu, msrs);
 -      spin_unlock(&oprofilefs_lock);
 +      raw_spin_unlock(&oprofilefs_lock);
        per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
        apic_write(APIC_LVTPC, APIC_DM_NMI);
  }
@@@ -385,6 -385,8 +385,6 @@@ static void nmi_cpu_shutdown(void *dumm
        apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
        apic_write(APIC_LVTERR, v);
        nmi_cpu_restore_registers(msrs);
 -      if (model->cpu_down)
 -              model->cpu_down();
  }
  
  static void nmi_cpu_up(void *dummy)
@@@ -595,24 -597,36 +595,36 @@@ static int __init p4_init(char **cpu_ty
        return 0;
  }
  
- static int force_arch_perfmon;
- static int force_cpu_type(const char *str, struct kernel_param *kp)
+ enum __force_cpu_type {
+       reserved = 0,           /* do not force */
+       timer,
+       arch_perfmon,
+ };
+ static int force_cpu_type;
+ static int set_cpu_type(const char *str, struct kernel_param *kp)
  {
-       if (!strcmp(str, "arch_perfmon")) {
-               force_arch_perfmon = 1;
+       if (!strcmp(str, "timer")) {
+               force_cpu_type = timer;
+               printk(KERN_INFO "oprofile: forcing NMI timer mode\n");
+       } else if (!strcmp(str, "arch_perfmon")) {
+               force_cpu_type = arch_perfmon;
                printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
+       } else {
+               force_cpu_type = 0;
        }
  
        return 0;
  }
- module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
+ module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
  
  static int __init ppro_init(char **cpu_type)
  {
        __u8 cpu_model = boot_cpu_data.x86_model;
        struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
  
-       if (force_arch_perfmon && cpu_has_arch_perfmon)
+       if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon)
                return 0;
  
        /*
@@@ -679,6 -693,9 +691,9 @@@ int __init op_nmi_init(struct oprofile_
        if (!cpu_has_apic)
                return -ENODEV;
  
+       if (force_cpu_type == timer)
+               return -ENODEV;
        switch (vendor) {
        case X86_VENDOR_AMD:
                /* Needs to be at least an Athlon (or hammer in 32bit mode) */
diff --combined kernel/events/core.c
index 8d9dea56c262bbe91494f5600b95fe10a01068c9,d2e28bdd523a9b206b69b6e66ae4b684747ddd9a..924338bb489ce1f43a400e75a2c5f5df94491cb4
  #include <linux/reboot.h>
  #include <linux/vmstat.h>
  #include <linux/device.h>
 +#include <linux/export.h>
  #include <linux/vmalloc.h>
  #include <linux/hardirq.h>
  #include <linux/rculist.h>
  #include <linux/uaccess.h>
 -#include <linux/suspend.h>
  #include <linux/syscalls.h>
  #include <linux/anon_inodes.h>
  #include <linux/kernel_stat.h>
@@@ -1322,6 -1322,7 +1322,7 @@@ retry
        }
        raw_spin_unlock_irq(&ctx->lock);
  }
+ EXPORT_SYMBOL_GPL(perf_event_disable);
  
  static void perf_set_shadow_time(struct perf_event *event,
                                 struct perf_event_context *ctx,
@@@ -1806,6 -1807,7 +1807,7 @@@ retry
  out:
        raw_spin_unlock_irq(&ctx->lock);
  }
+ EXPORT_SYMBOL_GPL(perf_event_enable);
  
  int perf_event_refresh(struct perf_event *event, int refresh)
  {
@@@ -2569,6 -2571,215 +2571,6 @@@ static u64 perf_event_read(struct perf_
        return perf_event_count(event);
  }
  
 -/*
 - * Callchain support
 - */
 -
 -struct callchain_cpus_entries {
 -      struct rcu_head                 rcu_head;
 -      struct perf_callchain_entry     *cpu_entries[0];
 -};
 -
 -static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
 -static atomic_t nr_callchain_events;
 -static DEFINE_MUTEX(callchain_mutex);
 -struct callchain_cpus_entries *callchain_cpus_entries;
 -
 -
 -__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
 -                                struct pt_regs *regs)
 -{
 -}
 -
 -__weak void perf_callchain_user(struct perf_callchain_entry *entry,
 -                              struct pt_regs *regs)
 -{
 -}
 -
 -static void release_callchain_buffers_rcu(struct rcu_head *head)
 -{
 -      struct callchain_cpus_entries *entries;
 -      int cpu;
 -
 -      entries = container_of(head, struct callchain_cpus_entries, rcu_head);
 -
 -      for_each_possible_cpu(cpu)
 -              kfree(entries->cpu_entries[cpu]);
 -
 -      kfree(entries);
 -}
 -
 -static void release_callchain_buffers(void)
 -{
 -      struct callchain_cpus_entries *entries;
 -
 -      entries = callchain_cpus_entries;
 -      rcu_assign_pointer(callchain_cpus_entries, NULL);
 -      call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
 -}
 -
 -static int alloc_callchain_buffers(void)
 -{
 -      int cpu;
 -      int size;
 -      struct callchain_cpus_entries *entries;
 -
 -      /*
 -       * We can't use the percpu allocation API for data that can be
 -       * accessed from NMI. Use a temporary manual per cpu allocation
 -       * until that gets sorted out.
 -       */
 -      size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
 -
 -      entries = kzalloc(size, GFP_KERNEL);
 -      if (!entries)
 -              return -ENOMEM;
 -
 -      size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
 -
 -      for_each_possible_cpu(cpu) {
 -              entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
 -                                                       cpu_to_node(cpu));
 -              if (!entries->cpu_entries[cpu])
 -                      goto fail;
 -      }
 -
 -      rcu_assign_pointer(callchain_cpus_entries, entries);
 -
 -      return 0;
 -
 -fail:
 -      for_each_possible_cpu(cpu)
 -              kfree(entries->cpu_entries[cpu]);
 -      kfree(entries);
 -
 -      return -ENOMEM;
 -}
 -
 -static int get_callchain_buffers(void)
 -{
 -      int err = 0;
 -      int count;
 -
 -      mutex_lock(&callchain_mutex);
 -
 -      count = atomic_inc_return(&nr_callchain_events);
 -      if (WARN_ON_ONCE(count < 1)) {
 -              err = -EINVAL;
 -              goto exit;
 -      }
 -
 -      if (count > 1) {
 -              /* If the allocation failed, give up */
 -              if (!callchain_cpus_entries)
 -                      err = -ENOMEM;
 -              goto exit;
 -      }
 -
 -      err = alloc_callchain_buffers();
 -      if (err)
 -              release_callchain_buffers();
 -exit:
 -      mutex_unlock(&callchain_mutex);
 -
 -      return err;
 -}
 -
 -static void put_callchain_buffers(void)
 -{
 -      if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
 -              release_callchain_buffers();
 -              mutex_unlock(&callchain_mutex);
 -      }
 -}
 -
 -static int get_recursion_context(int *recursion)
 -{
 -      int rctx;
 -
 -      if (in_nmi())
 -              rctx = 3;
 -      else if (in_irq())
 -              rctx = 2;
 -      else if (in_softirq())
 -              rctx = 1;
 -      else
 -              rctx = 0;
 -
 -      if (recursion[rctx])
 -              return -1;
 -
 -      recursion[rctx]++;
 -      barrier();
 -
 -      return rctx;
 -}
 -
 -static inline void put_recursion_context(int *recursion, int rctx)
 -{
 -      barrier();
 -      recursion[rctx]--;
 -}
 -
 -static struct perf_callchain_entry *get_callchain_entry(int *rctx)
 -{
 -      int cpu;
 -      struct callchain_cpus_entries *entries;
 -
 -      *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
 -      if (*rctx == -1)
 -              return NULL;
 -
 -      entries = rcu_dereference(callchain_cpus_entries);
 -      if (!entries)
 -              return NULL;
 -
 -      cpu = smp_processor_id();
 -
 -      return &entries->cpu_entries[cpu][*rctx];
 -}
 -
 -static void
 -put_callchain_entry(int rctx)
 -{
 -      put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
 -}
 -
 -static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
 -{
 -      int rctx;
 -      struct perf_callchain_entry *entry;
 -
 -
 -      entry = get_callchain_entry(&rctx);
 -      if (rctx == -1)
 -              return NULL;
 -
 -      if (!entry)
 -              goto exit_put;
 -
 -      entry->nr = 0;
 -
 -      if (!user_mode(regs)) {
 -              perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
 -              perf_callchain_kernel(entry, regs);
 -              if (current->mm)
 -                      regs = task_pt_regs(current);
 -              else
 -                      regs = NULL;
 -      }
 -
 -      if (regs) {
 -              perf_callchain_store(entry, PERF_CONTEXT_USER);
 -              perf_callchain_user(entry, regs);
 -      }
 -
 -exit_put:
 -      put_callchain_entry(rctx);
 -
 -      return entry;
 -}
 -
  /*
   * Initialize the perf_event context in a task_struct:
   */
@@@ -3335,7 -3546,7 +3337,7 @@@ static void perf_mmap_close(struct vm_a
                struct ring_buffer *rb = event->rb;
  
                atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
 -              vma->vm_mm->locked_vm -= event->mmap_locked;
 +              vma->vm_mm->pinned_vm -= event->mmap_locked;
                rcu_assign_pointer(event->rb, NULL);
                mutex_unlock(&event->mmap_mutex);
  
@@@ -3416,7 -3627,7 +3418,7 @@@ static int perf_mmap(struct file *file
  
        lock_limit = rlimit(RLIMIT_MEMLOCK);
        lock_limit >>= PAGE_SHIFT;
 -      locked = vma->vm_mm->locked_vm + extra;
 +      locked = vma->vm_mm->pinned_vm + extra;
  
        if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
                !capable(CAP_IPC_LOCK)) {
        atomic_long_add(user_extra, &user->locked_vm);
        event->mmap_locked = extra;
        event->mmap_user = get_current_user();
 -      vma->vm_mm->locked_vm += event->mmap_locked;
 +      vma->vm_mm->pinned_vm += event->mmap_locked;
  
  unlock:
        if (!ret)
@@@ -4528,6 -4739,7 +4530,6 @@@ static void perf_swevent_overflow(struc
        struct hw_perf_event *hwc = &event->hw;
        int throttle = 0;
  
 -      data->period = event->hw.last_period;
        if (!overflow)
                overflow = perf_swevent_set_period(event);
  
@@@ -4561,12 -4773,6 +4563,12 @@@ static void perf_swevent_event(struct p
        if (!is_sampling_event(event))
                return;
  
 +      if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
 +              data->period = nr;
 +              return perf_swevent_overflow(event, 1, data, regs);
 +      } else
 +              data->period = event->hw.last_period;
 +
        if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
                return perf_swevent_overflow(event, 1, data, regs);
  
@@@ -6649,7 -6855,7 +6651,7 @@@ static void __cpuinit perf_event_init_c
        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
  
        mutex_lock(&swhash->hlist_mutex);
 -      if (swhash->hlist_refcount > 0 && !swhash->swevent_hlist) {
 +      if (swhash->hlist_refcount > 0) {
                struct swevent_hlist *hlist;
  
                hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
@@@ -6738,7 -6944,14 +6740,7 @@@ perf_cpu_notify(struct notifier_block *
  {
        unsigned int cpu = (long)hcpu;
  
 -      /*
 -       * Ignore suspend/resume action, the perf_pm_notifier will
 -       * take care of that.
 -       */
 -      if (action & CPU_TASKS_FROZEN)
 -              return NOTIFY_OK;
 -
 -      switch (action) {
 +      switch (action & ~CPU_TASKS_FROZEN) {
  
        case CPU_UP_PREPARE:
        case CPU_DOWN_FAILED:
        return NOTIFY_OK;
  }
  
 -static void perf_pm_resume_cpu(void *unused)
 -{
 -      struct perf_cpu_context *cpuctx;
 -      struct perf_event_context *ctx;
 -      struct pmu *pmu;
 -      int idx;
 -
 -      idx = srcu_read_lock(&pmus_srcu);
 -      list_for_each_entry_rcu(pmu, &pmus, entry) {
 -              cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
 -              ctx = cpuctx->task_ctx;
 -
 -              perf_ctx_lock(cpuctx, ctx);
 -              perf_pmu_disable(cpuctx->ctx.pmu);
 -
 -              cpu_ctx_sched_out(cpuctx, EVENT_ALL);
 -              if (ctx)
 -                      ctx_sched_out(ctx, cpuctx, EVENT_ALL);
 -
 -              perf_pmu_enable(cpuctx->ctx.pmu);
 -              perf_ctx_unlock(cpuctx, ctx);
 -      }
 -      srcu_read_unlock(&pmus_srcu, idx);
 -}
 -
 -static void perf_pm_suspend_cpu(void *unused)
 -{
 -      struct perf_cpu_context *cpuctx;
 -      struct perf_event_context *ctx;
 -      struct pmu *pmu;
 -      int idx;
 -
 -      idx = srcu_read_lock(&pmus_srcu);
 -      list_for_each_entry_rcu(pmu, &pmus, entry) {
 -              cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
 -              ctx = cpuctx->task_ctx;
 -
 -              perf_ctx_lock(cpuctx, ctx);
 -              perf_pmu_disable(cpuctx->ctx.pmu);
 -
 -              perf_event_sched_in(cpuctx, ctx, current);
 -
 -              perf_pmu_enable(cpuctx->ctx.pmu);
 -              perf_ctx_unlock(cpuctx, ctx);
 -      }
 -      srcu_read_unlock(&pmus_srcu, idx);
 -}
 -
 -static int perf_resume(void)
 -{
 -      get_online_cpus();
 -      smp_call_function(perf_pm_resume_cpu, NULL, 1);
 -      put_online_cpus();
 -
 -      return NOTIFY_OK;
 -}
 -
 -static int perf_suspend(void)
 -{
 -      get_online_cpus();
 -      smp_call_function(perf_pm_suspend_cpu, NULL, 1);
 -      put_online_cpus();
 -
 -      return NOTIFY_OK;
 -}
 -
 -static int perf_pm(struct notifier_block *self, unsigned long action, void *ptr)
 -{
 -      switch (action) {
 -      case PM_POST_HIBERNATION:
 -      case PM_POST_SUSPEND:
 -              return perf_resume();
 -      case PM_HIBERNATION_PREPARE:
 -      case PM_SUSPEND_PREPARE:
 -              return perf_suspend();
 -      default:
 -              return NOTIFY_DONE;
 -      }
 -}
 -
 -static struct notifier_block perf_pm_notifier = {
 -      .notifier_call = perf_pm,
 -};
 -
  void __init perf_event_init(void)
  {
        int ret;
        perf_tp_register();
        perf_cpu_notifier(perf_cpu_notify);
        register_reboot_notifier(&perf_reboot_notifier);
 -      register_pm_notifier(&perf_pm_notifier);
  
        ret = init_hw_breakpoint();
        WARN(ret, "hw_breakpoint initialization failed with: %d", ret);