]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge branch 'consolidate-clksrc-i8253' of master.kernel.org:~rmk/linux-2.6-arm into...
authorThomas Gleixner <tglx@linutronix.de>
Sat, 14 May 2011 10:06:36 +0000 (12:06 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Sat, 14 May 2011 10:06:36 +0000 (12:06 +0200)
Conflicts:
arch/ia64/kernel/cyclone.c
arch/mips/kernel/i8253.c
arch/x86/kernel/i8253.c

Reason: Resolve conflicts so further cleanups do not conflict further

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1  2 
arch/alpha/kernel/time.c
arch/blackfin/kernel/time-ts.c
arch/ia64/kernel/time.c
arch/microblaze/kernel/timer.c
arch/mips/sgi-ip27/ip27-timer.c
arch/x86/kernel/apb_timer.c
arch/x86/kernel/i8253.c
arch/x86/lguest/boot.c
arch/x86/xen/time.c
include/linux/clocksource.h

diff --combined arch/alpha/kernel/time.c
index 33b81108dc836f6bfbbe93b09a4a29f28f91ef4b,918e8e0b72ff1e3220845ea410277a83ff326517..818e74ed45dc01bbc0eaef0eb60b2ccbf8c137e9
@@@ -153,13 -153,14 +153,14 @@@ void read_persistent_clock(struct times
                year += 100;
  
        ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+       ts->tv_nsec = 0;
  }
  
  
  
  /*
   * timer_interrupt() needs to keep up the real-time clock,
-  * as well as call the "do_timer()" routine every clocktick
+  * as well as call the "xtime_update()" routine every clocktick
   */
  irqreturn_t timer_interrupt(int irq, void *dev)
  {
        profile_tick(CPU_PROFILING);
  #endif
  
-       write_seqlock(&xtime_lock);
        /*
         * Calculate how many ticks have passed since the last update,
         * including any previous partial leftover.  Save any resulting
        nticks = delta >> FIX_SHIFT;
  
        if (nticks)
-               do_timer(nticks);
-       write_sequnlock(&xtime_lock);
+               xtime_update(nticks);
  
        if (test_irq_work_pending()) {
                clear_irq_work_pending();
@@@ -378,7 -375,8 +375,7 @@@ static struct clocksource clocksource_r
  
  static inline void register_rpcc_clocksource(long cycle_freq)
  {
 -      clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4);
 -      clocksource_register(&clocksource_rpcc);
 +      clocksource_register_hz(&clocksource_rpcc, cycle_freq);
  }
  #else /* !CONFIG_SMP */
  static inline void register_rpcc_clocksource(long cycle_freq)
index 4a013714500bd4195e9310135e99d56ac7955dd0,cdb4beb6bc8fc9507bf0531317b8b834eca9d4c8..9e9b60d969dcc468b00dd2ec2fd9227f1377feea
  #include <asm/gptimers.h>
  #include <asm/nmi.h>
  
 -/* Accelerators for sched_clock()
 - * convert from cycles(64bits) => nanoseconds (64bits)
 - *  basic equation:
 - *            ns = cycles / (freq / ns_per_sec)
 - *            ns = cycles * (ns_per_sec / freq)
 - *            ns = cycles * (10^9 / (cpu_khz * 10^3))
 - *            ns = cycles * (10^6 / cpu_khz)
 - *
 - *    Then we use scaling math (suggested by george@mvista.com) to get:
 - *            ns = cycles * (10^6 * SC / cpu_khz) / SC
 - *            ns = cycles * cyc2ns_scale / SC
 - *
 - *    And since SC is a constant power of two, we can convert the div
 - *  into a shift.
 - *
 - *  We can use khz divisor instead of mhz to keep a better precision, since
 - *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
 - *  (mathieu.desnoyers@polymtl.ca)
 - *
 - *                    -johnstul@us.ibm.com "math is hard, lets go shopping!"
 - */
 -
 -#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
  
  #if defined(CONFIG_CYCLES_CLOCKSOURCE)
  
@@@ -40,6 -63,7 +40,6 @@@ static struct clocksource bfin_cs_cycle
        .rating         = 400,
        .read           = bfin_read_cycles,
        .mask           = CLOCKSOURCE_MASK(64),
 -      .shift          = CYC2NS_SCALE_FACTOR,
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
  };
  
@@@ -51,7 -75,10 +51,7 @@@ static inline unsigned long long bfin_c
  
  static int __init bfin_cs_cycles_init(void)
  {
 -      bfin_cs_cycles.mult = \
 -              clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift);
 -
 -      if (clocksource_register(&bfin_cs_cycles))
 +      if (clocksource_register_hz(&bfin_cs_cycles, get_cclk()))
                panic("failed to register clocksource");
  
        return 0;
@@@ -84,6 -111,7 +84,6 @@@ static struct clocksource bfin_cs_gptim
        .rating         = 350,
        .read           = bfin_read_gptimer0,
        .mask           = CLOCKSOURCE_MASK(32),
 -      .shift          = CYC2NS_SCALE_FACTOR,
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
  };
  
@@@ -97,7 -125,10 +97,7 @@@ static int __init bfin_cs_gptimer0_init
  {
        setup_gptimer0();
  
 -      bfin_cs_gptimer0.mult = \
 -              clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift);
 -
 -      if (clocksource_register(&bfin_cs_gptimer0))
 +      if (clocksource_register_hz(&bfin_cs_gptimer0, get_sclk()))
                panic("failed to register clocksource");
  
        return 0;
@@@ -175,8 -206,14 +175,14 @@@ irqreturn_t bfin_gptmr0_interrupt(int i
  {
        struct clock_event_device *evt = dev_id;
        smp_mb();
-       evt->event_handler(evt);
+       /*
+        * We want to ACK before we handle so that we can handle smaller timer
+        * intervals.  This way if the timer expires again while we're handling
+        * things, we're more likely to see that 2nd int rather than swallowing
+        * it by ACKing the int at the end of this handler.
+        */
        bfin_gptmr0_ack();
+       evt->event_handler(evt);
        return IRQ_HANDLED;
  }
  
diff --combined arch/ia64/kernel/time.c
index 41c40f0e4796fea795e42c571ce370e4f55d82cf,156ad803d5b7aedce2b059545cf1bde0cd44d32e..04440cc09b40cbd2a124b78474f6bba30a26d0b2
@@@ -73,6 -73,8 +73,6 @@@ static struct clocksource clocksource_i
        .rating         = 350,
        .read           = itc_get_cycles,
        .mask           = CLOCKSOURCE_MASK(64),
 -      .mult           = 0, /*to be calculated*/
 -      .shift          = 16,
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
  #ifdef CONFIG_PARAVIRT
        .resume         = paravirt_clocksource_resume,
@@@ -188,19 -190,10 +188,10 @@@ timer_interrupt (int irq, void *dev_id
  
                new_itm += local_cpu_data->itm_delta;
  
-               if (smp_processor_id() == time_keeper_id) {
-                       /*
-                        * Here we are in the timer irq handler. We have irqs locally
-                        * disabled, but we don't know if the timer_bh is running on
-                        * another CPU. We need to avoid to SMP race by acquiring the
-                        * xtime_lock.
-                        */
-                       write_seqlock(&xtime_lock);
-                       do_timer(1);
-                       local_cpu_data->itm_next = new_itm;
-                       write_sequnlock(&xtime_lock);
-               } else
-                       local_cpu_data->itm_next = new_itm;
+               if (smp_processor_id() == time_keeper_id)
+                       xtime_update(1);
+               local_cpu_data->itm_next = new_itm;
  
                if (time_after(new_itm, ia64_get_itc()))
                        break;
@@@ -220,7 -213,7 +211,7 @@@ skip_process_time_accounting
                 * comfort, we increase the safety margin by
                 * intentionally dropping the next tick(s).  We do NOT
                 * update itm.next because that would force us to call
-                * do_timer() which in turn would let our clock run
+                * xtime_update() which in turn would let our clock run
                 * too fast (with the potentially devastating effect
                 * of losing monotony of time).
                 */
@@@ -372,8 -365,11 +363,8 @@@ ia64_init_itm (void
        ia64_cpu_local_tick();
  
        if (!itc_clocksource) {
 -              /* Sort out mult/shift values: */
 -              clocksource_itc.mult =
 -                      clocksource_hz2mult(local_cpu_data->itc_freq,
 -                                              clocksource_itc.shift);
 -              clocksource_register(&clocksource_itc);
 +              clocksource_register_hz(&clocksource_itc,
 +                                              local_cpu_data->itc_freq);
                itc_clocksource = &clocksource_itc;
        }
  }
index 68ec7d1e84391c4d733c6faadd03670ff46ac2d4,d8a214f11ac29d2f8569d972f989a97e7a7b6fd3..e5550ce4e0eb783f5e6a4f29e7526c1cf3cbc6b0
@@@ -38,8 -38,8 +38,8 @@@ static unsigned int timer_baseaddr
  #define TIMER_BASE    timer_baseaddr
  #endif
  
- unsigned int freq_div_hz;
- unsigned int timer_clock_freq;
static unsigned int freq_div_hz;
static unsigned int timer_clock_freq;
  
  #define TCSR0 (0x00)
  #define TLR0  (0x04)
@@@ -202,7 -202,7 +202,7 @@@ static struct cyclecounter microblaze_c
        .shift = 8,
  };
  
- int __init init_microblaze_timecounter(void)
static int __init init_microblaze_timecounter(void)
  {
        microblaze_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC,
                                microblaze_cc.shift);
@@@ -217,12 -217,16 +217,12 @@@ static struct clocksource clocksource_m
        .rating         = 300,
        .read           = microblaze_read,
        .mask           = CLOCKSOURCE_MASK(32),
 -      .shift          = 8, /* I can shift it */
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
  };
  
  static int __init microblaze_clocksource_init(void)
  {
 -      clocksource_microblaze.mult =
 -                      clocksource_hz2mult(timer_clock_freq,
 -                                              clocksource_microblaze.shift);
 -      if (clocksource_register(&clocksource_microblaze))
 +      if (clocksource_register_hz(&clocksource_microblaze, timer_clock_freq))
                panic("failed to register clocksource");
  
        /* stop timer1 */
index 3cac88382d4c12dea1fa4af8ff15ce4698d84d2b,a152538d3c9744cb27f40a0e18d5a69eeb94c91c..8d0d2690e962d807f9ecb44246f0eeb11f6a263b
  #include <asm/sn/sn0/hubio.h>
  #include <asm/pci/bridge.h>
  
- static void enable_rt_irq(unsigned int irq)
+ static void enable_rt_irq(struct irq_data *d)
  {
  }
  
- static void disable_rt_irq(unsigned int irq)
+ static void disable_rt_irq(struct irq_data *d)
  {
  }
  
  static struct irq_chip rt_irq_type = {
        .name           = "SN HUB RT timer",
-       .ack            = disable_rt_irq,
-       .mask           = disable_rt_irq,
-       .mask_ack       = disable_rt_irq,
-       .unmask         = enable_rt_irq,
-       .eoi            = enable_rt_irq,
+       .irq_mask       = disable_rt_irq,
+       .irq_unmask     = enable_rt_irq,
  };
  
  static int rt_next_event(unsigned long delta, struct clock_event_device *evt)
@@@ -156,7 -153,7 +153,7 @@@ static void __init hub_rt_clock_event_g
                        panic("Allocation of irq number for timer failed");
        } while (xchg(&rt_timer_irq, irq));
  
-       set_irq_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq);
+       irq_set_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq);
        setup_irq(irq, &hub_rt_irqaction);
  }
  
@@@ -177,7 -174,8 +174,7 @@@ static void __init hub_rt_clocksource_i
  {
        struct clocksource *cs = &hub_rt_clocksource;
  
 -      clocksource_set_clock(cs, CYCLES_PER_SEC);
 -      clocksource_register(cs);
 +      clocksource_register_hz(cs, CYCLES_PER_SEC);
  }
  
  void __init plat_time_init(void)
index 29ebf5a3b1921072fa83b9fbe9ba472519c781cb,cd1ffed4ee2226bd2ec6381c7c5de67bc7c13348..289e92862fd97aac3534eca0048cd646abb60860
@@@ -177,6 -177,7 +177,6 @@@ static struct clocksource clocksource_a
        .rating         = APBT_CLOCKSOURCE_RATING,
        .read           = apbt_read_clocksource,
        .mask           = APBT_MASK,
 -      .shift          = APBT_SHIFT,
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
        .resume         = apbt_restart_clocksource,
  };
@@@ -283,7 -284,7 +283,7 @@@ static int __init apbt_clockevent_regis
        memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
  
        if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
-               apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
+               adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
                global_clock_event = &adev->evt;
                printk(KERN_DEBUG "%s clockevent registered as global\n",
                       global_clock_event->name);
@@@ -315,7 -316,7 +315,7 @@@ static void apbt_setup_irq(struct apbt_
        irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
        irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
        /* APB timer irqs are set up as mp_irqs, timer is edge type */
-       __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge");
+       __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge");
  
        if (system_state == SYSTEM_BOOTING) {
                if (request_irq(adev->irq, apbt_interrupt_handler,
@@@ -507,64 -508,12 +507,12 @@@ static int apbt_next_event(unsigned lon
        return 0;
  }
  
- /*
-  * APB timer clock is not in sync with pclk on Langwell, which translates to
-  * unreliable read value caused by sampling error. the error does not add up
-  * overtime and only happens when sampling a 0 as a 1 by mistake. so the time
-  * would go backwards. the following code is trying to prevent time traveling
-  * backwards. little bit paranoid.
-  */
  static cycle_t apbt_read_clocksource(struct clocksource *cs)
  {
-       unsigned long t0, t1, t2;
-       static unsigned long last_read;
- bad_count:
-       t1 = apbt_readl(phy_cs_timer_id,
-                       APBTMR_N_CURRENT_VALUE);
-       t2 = apbt_readl(phy_cs_timer_id,
-                       APBTMR_N_CURRENT_VALUE);
-       if (unlikely(t1 < t2)) {
-               pr_debug("APBT: read current count error %lx:%lx:%lx\n",
-                        t1, t2, t2 - t1);
-               goto bad_count;
-       }
-       /*
-        * check against cached last read, makes sure time does not go back.
-        * it could be a normal rollover but we will do tripple check anyway
-        */
-       if (unlikely(t2 > last_read)) {
-               /* check if we have a normal rollover */
-               unsigned long raw_intr_status =
-                       apbt_readl_reg(APBTMRS_RAW_INT_STATUS);
-               /*
-                * cs timer interrupt is masked but raw intr bit is set if
-                * rollover occurs. then we read EOI reg to clear it.
-                */
-               if (raw_intr_status & (1 << phy_cs_timer_id)) {
-                       apbt_readl(phy_cs_timer_id, APBTMR_N_EOI);
-                       goto out;
-               }
-               pr_debug("APB CS going back %lx:%lx:%lx ",
-                        t2, last_read, t2 - last_read);
- bad_count_x3:
-               pr_debug("triple check enforced\n");
-               t0 = apbt_readl(phy_cs_timer_id,
-                               APBTMR_N_CURRENT_VALUE);
-               udelay(1);
-               t1 = apbt_readl(phy_cs_timer_id,
-                               APBTMR_N_CURRENT_VALUE);
-               udelay(1);
-               t2 = apbt_readl(phy_cs_timer_id,
-                               APBTMR_N_CURRENT_VALUE);
-               if ((t2 > t1) || (t1 > t0)) {
-                       printk(KERN_ERR "Error: APB CS tripple check failed\n");
-                       goto bad_count_x3;
-               }
-       }
- out:
-       last_read = t2;
-       return (cycle_t)~t2;
+       unsigned long current_count;
+       current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE);
+       return (cycle_t)~current_count;
  }
  
  static int apbt_clocksource_register(void)
        if (t1 == apbt_read_clocksource(&clocksource_apbt))
                panic("APBT counter not counting. APBT disabled\n");
  
 -      /*
 -       * initialize and register APBT clocksource
 -       * convert that to ns/clock cycle
 -       * mult = (ns/c) * 2^APBT_SHIFT
 -       */
 -      clocksource_apbt.mult = div_sc(MSEC_PER_SEC,
 -                                     (unsigned long) apbt_freq, APBT_SHIFT);
 -      clocksource_register(&clocksource_apbt);
 +      clocksource_register_khz(&clocksource_apbt, (u32)apbt_freq*1000);
  
        return 0;
  }
diff --combined arch/x86/kernel/i8253.c
index 212fe6590aab9d5f87aa6187c7717ef14759db64,b904dfbf6dbc4d4e1a5c202d60b9d3301c2c5088..577e90cadaebbd4acb68d7b3065f9f5f6d9eb7e6
@@@ -117,79 -117,6 +117,6 @@@ void __init setup_pit_timer(void
  }
  
  #ifndef CONFIG_X86_64
- /*
-  * Since the PIT overflows every tick, its not very useful
-  * to just read by itself. So use jiffies to emulate a free
-  * running counter:
-  */
- static cycle_t pit_read(struct clocksource *cs)
- {
-       static int old_count;
-       static u32 old_jifs;
-       unsigned long flags;
-       int count;
-       u32 jifs;
-       raw_spin_lock_irqsave(&i8253_lock, flags);
-       /*
-        * Although our caller may have the read side of xtime_lock,
-        * this is now a seqlock, and we are cheating in this routine
-        * by having side effects on state that we cannot undo if
-        * there is a collision on the seqlock and our caller has to
-        * retry.  (Namely, old_jifs and old_count.)  So we must treat
-        * jiffies as volatile despite the lock.  We read jiffies
-        * before latching the timer count to guarantee that although
-        * the jiffies value might be older than the count (that is,
-        * the counter may underflow between the last point where
-        * jiffies was incremented and the point where we latch the
-        * count), it cannot be newer.
-        */
-       jifs = jiffies;
-       outb_pit(0x00, PIT_MODE);       /* latch the count ASAP */
-       count = inb_pit(PIT_CH0);       /* read the latched count */
-       count |= inb_pit(PIT_CH0) << 8;
-       /* VIA686a test code... reset the latch if count > max + 1 */
-       if (count > LATCH) {
-               outb_pit(0x34, PIT_MODE);
-               outb_pit(LATCH & 0xff, PIT_CH0);
-               outb_pit(LATCH >> 8, PIT_CH0);
-               count = LATCH - 1;
-       }
-       /*
-        * It's possible for count to appear to go the wrong way for a
-        * couple of reasons:
-        *
-        *  1. The timer counter underflows, but we haven't handled the
-        *     resulting interrupt and incremented jiffies yet.
-        *  2. Hardware problem with the timer, not giving us continuous time,
-        *     the counter does small "jumps" upwards on some Pentium systems,
-        *     (see c't 95/10 page 335 for Neptun bug.)
-        *
-        * Previous attempts to handle these cases intelligently were
-        * buggy, so we just do the simple thing now.
-        */
-       if (count > old_count && jifs == old_jifs)
-               count = old_count;
-       old_count = count;
-       old_jifs = jifs;
-       raw_spin_unlock_irqrestore(&i8253_lock, flags);
-       count = (LATCH - 1) - count;
-       return (cycle_t)(jifs * LATCH) + count;
- }
- static struct clocksource pit_cs = {
-       .name           = "pit",
-       .rating         = 110,
-       .read           = pit_read,
-       .mask           = CLOCKSOURCE_MASK(32),
- };
  static int __init init_pit_clocksource(void)
  {
         /*
            pit_ce.mode != CLOCK_EVT_MODE_PERIODIC)
                return 0;
  
-       return clocksource_register_hz(&pit_cs, CLOCK_TICK_RATE);
+       return clocksource_i8253_init();
  }
  arch_initcall(init_pit_clocksource);
--
  #endif /* !CONFIG_X86_64 */
diff --combined arch/x86/lguest/boot.c
index 5b96fd95bdab2fcab3285f128d1367253bb588bc,1cd608973ce594f6e0853170ef3e7abc84425c82..4e0068ead6b46efdba0dc9d293f49a4f73ffdd55
@@@ -397,7 -397,7 +397,7 @@@ static void lguest_load_tr_desc(void
   * instead we just use the real "cpuid" instruction.  Then I pretty much turned
   * off feature bits until the Guest booted.  (Don't say that: you'll damage
   * lguest sales!)  Shut up, inner voice!  (Hey, just pointing out that this is
-  * hardly future proof.)  Noone's listening!  They don't like you anyway,
+  * hardly future proof.)  No one's listening!  They don't like you anyway,
   * parenthetic weirdo!
   *
   * Replacing the cpuid so we can turn features off is great for the kernel, but
@@@ -847,7 -847,7 +847,7 @@@ static void __init lguest_init_IRQ(void
  void lguest_setup_irq(unsigned int irq)
  {
        irq_alloc_desc_at(irq, 0);
-       set_irq_chip_and_handler_name(irq, &lguest_irq_controller,
+       irq_set_chip_and_handler_name(irq, &lguest_irq_controller,
                                      handle_level_irq, "level");
  }
  
@@@ -913,6 -913,8 +913,6 @@@ static struct clocksource lguest_clock 
        .rating         = 200,
        .read           = lguest_clock_read,
        .mask           = CLOCKSOURCE_MASK(64),
 -      .mult           = 1 << 22,
 -      .shift          = 22,
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
  };
  
@@@ -993,9 -995,9 +993,9 @@@ static void lguest_time_irq(unsigned in
  static void lguest_time_init(void)
  {
        /* Set up the timer interrupt (0) to go to our simple timer routine */
-       set_irq_handler(0, lguest_time_irq);
+       irq_set_handler(0, lguest_time_irq);
  
 -      clocksource_register(&lguest_clock);
 +      clocksource_register_hz(&lguest_clock, NSEC_PER_SEC);
  
        /* We can't set cpumask in the initializer: damn C limitations!  Set it
         * here and register our timer device. */
diff --combined arch/x86/xen/time.c
index 04e11597a8c55b12f7dc11abb286b265cb71f54a,2e2d370a47b1517bf1caa2503c04a96e1fa3b79a..c532d280ce3698a28d8d5bb034178b834dbe0dbb
@@@ -26,6 -26,8 +26,6 @@@
  
  #include "xen-ops.h"
  
 -#define XEN_SHIFT 22
 -
  /* Xen may fire a timer up to this many ns early */
  #define TIMER_SLOP    100000
  #define NS_PER_TICK   (1000000000LL / HZ)
@@@ -209,6 -211,8 +209,6 @@@ static struct clocksource xen_clocksour
        .rating = 400,
        .read = xen_clocksource_get_cycles,
        .mask = ~0,
 -      .mult = 1<<XEN_SHIFT,           /* time directly in nanoseconds */
 -      .shift = XEN_SHIFT,
        .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  };
  
@@@ -393,7 -397,9 +393,9 @@@ void xen_setup_timer(int cpu
                name = "<timer kasprintf failed>";
  
        irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
-                                     IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER,
+                                     IRQF_DISABLED|IRQF_PERCPU|
+                                     IRQF_NOBALANCING|IRQF_TIMER|
+                                     IRQF_FORCE_RESUME,
                                      name, NULL);
  
        evt = &per_cpu(xen_clock_events, cpu);
@@@ -442,7 -448,7 +444,7 @@@ static __init void xen_time_init(void
        int cpu = smp_processor_id();
        struct timespec tp;
  
 -      clocksource_register(&xen_clocksource);
 +      clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
  
        if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
                /* Successfully turned off 100Hz tick, so we have the
index 94c1f38e922a9b10d83e54266599d2cc4265f9dd,f13469b3df86b308abca0c2c7966ff5cf4227fd9..0fb0b7e793947d2b2255119b54fea0662cb6b5c6
@@@ -161,7 -161,7 +161,7 @@@ struct clocksource 
        /*
         * First part of structure is read mostly
         */
 -      char *name;
 +      const char *name;
        struct list_head list;
        int rating;
        cycle_t (*read)(struct clocksource *cs);
@@@ -341,4 -341,6 +341,6 @@@ static inline void update_vsyscall_tz(v
  
  extern void timekeeping_notify(struct clocksource *clock);
  
+ extern int clocksource_i8253_init(void);
  #endif /* _LINUX_CLOCKSOURCE_H */