return -EINVAL;
}
- xen_percpu_init();
-
- register_cpu_notifier(&xen_cpu_notifier);
-
- pv_time_ops.steal_clock = xen_stolen_accounting;
- static_key_slow_inc(¶virt_steal_enabled);
+ xen_time_setup_guest();
+
if (xen_initial_domain())
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
}
}
- /* Unmasks the IPI on the CPU when it's online. */
- static int bcm2836_arm_irqchip_cpu_notify(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+ static int bcm2836_cpu_starting(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
- unsigned int int_reg = LOCAL_MAILBOX_INT_CONTROL0;
- unsigned int mailbox = 0;
-
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
- bcm2836_arm_irqchip_unmask_per_cpu_irq(int_reg, mailbox, cpu);
- else if (action == CPU_DYING)
- bcm2836_arm_irqchip_mask_per_cpu_irq(int_reg, mailbox, cpu);
-
- return NOTIFY_OK;
+ bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
+ cpu);
+ return 0;
}
- static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
- .notifier_call = bcm2836_arm_irqchip_cpu_notify,
- .priority = 100,
- };
+ static int bcm2836_cpu_dying(unsigned int cpu)
+ {
+ bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
+ cpu);
+ return 0;
+ }
#ifdef CONFIG_ARM
-int __init bcm2836_smp_boot_secondary(unsigned int cpu,
- struct task_struct *idle)
+static int __init bcm2836_smp_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
{
unsigned long secondary_startup_phys =
(unsigned long)virt_to_phys((void *)secondary_startup);
static inline void perf_restore_debug_store(void) { }
#endif
+static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
+{
+ return frag->pad < sizeof(u64);
+}
+
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
- /*
- * This has to have a higher priority than migration_notifier in sched/core.c.
- */
- #define perf_cpu_notifier(fn) \
- do { \
- static struct notifier_block fn##_nb = \
- { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
- unsigned long cpu = smp_processor_id(); \
- unsigned long flags; \
- \
- cpu_notifier_register_begin(); \
- fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
- (void *)(unsigned long)cpu); \
- local_irq_save(flags); \
- fn(&fn##_nb, (unsigned long)CPU_STARTING, \
- (void *)(unsigned long)cpu); \
- local_irq_restore(flags); \
- fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
- (void *)(unsigned long)cpu); \
- __register_cpu_notifier(&fn##_nb); \
- cpu_notifier_register_done(); \
- } while (0)
-
- /*
- * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
- * callback for already online CPUs.
- */
- #define __perf_cpu_notifier(fn) \
- do { \
- static struct notifier_block fn##_nb = \
- { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
- \
- __register_cpu_notifier(&fn##_nb); \
- } while (0)
-
struct perf_pmu_events_attr {
struct device_attribute attr;
u64 id;
/* as we're called from CPU_ONLINE, the following shouldn't fail */
for_each_pool_worker(worker, pool)
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
- pool->attrs->cpumask) < 0);
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
}
- /*
- * Workqueues should be brought up before normal priority CPU notifiers.
- * This will be registered high priority CPU notifier.
- */
- static int workqueue_cpu_up_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+ int workqueue_prepare_cpu(unsigned int cpu)
+ {
+ struct worker_pool *pool;
+
+ for_each_cpu_worker_pool(pool, cpu) {
+ if (pool->nr_workers)
+ continue;
+ if (!create_worker(pool))
+ return -ENOMEM;
+ }
+ return 0;
+ }
+
+ int workqueue_online_cpu(unsigned int cpu)
{
- int cpu = (unsigned long)hcpu;
struct worker_pool *pool;
struct workqueue_struct *wq;
int pi;