]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - arch/powerpc/kernel/time.c
[CELL] oprofile: add support to OProfile for profiling CELL BE SPUs
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kernel / time.c
index f6f0c6b07c4cf335e41357def28b7388bf07d793..727a6699f2f41c36ffaac9a205a1aaf3405ec554 100644 (file)
@@ -77,9 +77,8 @@
 /* keep track of when we need to update the rtc */
 time_t last_rtc_update;
 #ifdef CONFIG_PPC_ISERIES
-unsigned long iSeries_recal_titan = 0;
-unsigned long iSeries_recal_tb = 0; 
-static unsigned long first_settimeofday = 1;
+static unsigned long __initdata iSeries_recal_titan;
+static signed long __initdata iSeries_recal_tb;
 #endif
 
 /* The decrementer counts down by 128 every 128ns on a 601. */
@@ -113,8 +112,9 @@ u64 ticklen_to_xs;  /* 0.64 fraction */
 DEFINE_SPINLOCK(rtc_lock);
 EXPORT_SYMBOL_GPL(rtc_lock);
 
-u64 tb_to_ns_scale;
-unsigned tb_to_ns_shift;
+static u64 tb_to_ns_scale __read_mostly;
+static unsigned tb_to_ns_shift __read_mostly;
+static unsigned long boot_tb __read_mostly;
 
 struct gettimeofday_struct do_gtod;
 
@@ -122,6 +122,7 @@ extern struct timezone sys_tz;
 static long timezone_offset;
 
 unsigned long ppc_proc_freq;
+EXPORT_SYMBOL(ppc_proc_freq);
 unsigned long ppc_tb_freq;
 
 static u64 tb_last_jiffy __cacheline_aligned_in_smp;
@@ -214,7 +215,6 @@ static void account_process_time(struct pt_regs *regs)
        run_posix_cpu_timers(current);
 }
 
-#ifdef CONFIG_PPC_SPLPAR
 /*
  * Stuff for accounting stolen time.
  */
@@ -222,19 +222,28 @@ struct cpu_purr_data {
        int     initialized;                    /* thread is running */
        u64     tb;                     /* last TB value read */
        u64     purr;                   /* last PURR value read */
-       spinlock_t lock;
 };
 
+/*
+ * Each entry in the cpu_purr_data array is manipulated only by its
+ * "owner" cpu -- usually in the timer interrupt but also occasionally
+ * in process context for cpu online.  As long as cpus do not touch
+ * each others' cpu_purr_data, disabling local interrupts is
+ * sufficient to serialize accesses.
+ */
 static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
 
 static void snapshot_tb_and_purr(void *data)
 {
+       unsigned long flags;
        struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
 
+       local_irq_save(flags);
        p->tb = mftb();
        p->purr = mfspr(SPRN_PURR);
        wmb();
        p->initialized = 1;
+       local_irq_restore(flags);
 }
 
 /*
@@ -242,15 +251,14 @@ static void snapshot_tb_and_purr(void *data)
  */
 void snapshot_timebases(void)
 {
-       int cpu;
-
        if (!cpu_has_feature(CPU_FTR_PURR))
                return;
-       for_each_possible_cpu(cpu)
-               spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock);
        on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
 }
 
+/*
+ * Must be called with interrupts disabled.
+ */
 void calculate_steal_time(void)
 {
        u64 tb, purr;
@@ -262,7 +270,6 @@ void calculate_steal_time(void)
        pme = &per_cpu(cpu_purr_data, smp_processor_id());
        if (!pme->initialized)
                return;         /* this can happen in early boot */
-       spin_lock(&pme->lock);
        tb = mftb();
        purr = mfspr(SPRN_PURR);
        stolen = (tb - pme->tb) - (purr - pme->purr);
@@ -270,9 +277,9 @@ void calculate_steal_time(void)
                account_steal_time(current, stolen);
        pme->tb = tb;
        pme->purr = purr;
-       spin_unlock(&pme->lock);
 }
 
+#ifdef CONFIG_PPC_SPLPAR
 /*
  * Must be called before the cpu is added to the online map when
  * a cpu is being brought up at runtime.
@@ -284,12 +291,12 @@ static void snapshot_purr(void)
 
        if (!cpu_has_feature(CPU_FTR_PURR))
                return;
+       local_irq_save(flags);
        pme = &per_cpu(cpu_purr_data, smp_processor_id());
-       spin_lock_irqsave(&pme->lock, flags);
        pme->tb = mftb();
        pme->purr = mfspr(SPRN_PURR);
        pme->initialized = 1;
-       spin_unlock_irqrestore(&pme->lock, flags);
+       local_irq_restore(flags);
 }
 
 #endif /* CONFIG_PPC_SPLPAR */
@@ -550,10 +557,15 @@ EXPORT_SYMBOL(profile_pc);
  * returned by the service processor for the timebase frequency.  
  */
 
-static void iSeries_tb_recal(void)
+static int __init iSeries_tb_recal(void)
 {
        struct div_result divres;
        unsigned long titan, tb;
+
+       /* Make sure we only run on iSeries */
+       if (!firmware_has_feature(FW_FEATURE_ISERIES))
+               return -ENODEV;
+
        tb = get_tb();
        titan = HvCallXm_loadTod();
        if ( iSeries_recal_titan ) {
@@ -594,8 +606,18 @@ static void iSeries_tb_recal(void)
        }
        iSeries_recal_titan = titan;
        iSeries_recal_tb = tb;
+
+       return 0;
 }
-#endif
+late_initcall(iSeries_tb_recal);
+
+/* Called from platform early init */
+void __init iSeries_time_init_early(void)
+{
+       iSeries_recal_tb = get_tb();
+       iSeries_recal_titan = HvCallXm_loadTod();
+}
+#endif /* CONFIG_PPC_ISERIES */
 
 /*
  * For iSeries shared processors, we have to let the hypervisor
@@ -711,30 +733,15 @@ void wakeup_decrementer(void)
 void __init smp_space_timers(unsigned int max_cpus)
 {
        int i;
-       unsigned long half = tb_ticks_per_jiffy / 2;
-       unsigned long offset = tb_ticks_per_jiffy / max_cpus;
        u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
 
        /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
        previous_tb -= tb_ticks_per_jiffy;
-       /*
-        * The stolen time calculation for POWER5 shared-processor LPAR
-        * systems works better if the two threads' timebase interrupts
-        * are staggered by half a jiffy with respect to each other.
-        */
+
        for_each_possible_cpu(i) {
                if (i == boot_cpuid)
                        continue;
-               if (i == (boot_cpuid ^ 1))
-                       per_cpu(last_jiffy, i) =
-                               per_cpu(last_jiffy, boot_cpuid) - half;
-               else if (i & 1)
-                       per_cpu(last_jiffy, i) =
-                               per_cpu(last_jiffy, i ^ 1) + half;
-               else {
-                       previous_tb += offset;
-                       per_cpu(last_jiffy, i) = previous_tb;
-               }
+               per_cpu(last_jiffy, i) = previous_tb;
        }
 }
 #endif
@@ -750,7 +757,7 @@ unsigned long long sched_clock(void)
 {
        if (__USE_RTC())
                return get_rtc();
-       return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;
+       return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
 }
 
 int do_settimeofday(struct timespec *tv)
@@ -774,12 +781,6 @@ int do_settimeofday(struct timespec *tv)
         * to the RTC again, or write to the RTC but then they don't call
         * settimeofday to perform this operation.
         */
-#ifdef CONFIG_PPC_ISERIES
-       if (firmware_has_feature(FW_FEATURE_ISERIES) && first_settimeofday) {
-               iSeries_tb_recal();
-               first_settimeofday = 0;
-       }
-#endif
 
        /* Make userspace gettimeofday spin until we're done. */
        ++vdso_data->tb_update_count;
@@ -834,7 +835,7 @@ static int __init get_freq(char *name, int cells, unsigned long *val)
        cpu = of_find_node_by_type(NULL, "cpu");
 
        if (cpu) {
-               fp = get_property(cpu, name, NULL);
+               fp = of_get_property(cpu, name, NULL);
                if (fp) {
                        found = 1;
                        *val = of_read_ulong(fp, cells);
@@ -975,6 +976,8 @@ void __init time_init(void)
        }
        tb_to_ns_scale = scale;
        tb_to_ns_shift = shift;
+       /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
+       boot_tb = get_tb();
 
        tm = get_boot_time();