Commit 61743fe4 authored by john stultz's avatar john stultz Committed by Linus Torvalds
Browse files

[PATCH] Time: i386 Conversion - part 4: Remove Old timer_opts Code



Remove the old timers/timer_opts infrastructure which has been disabled.  It
is a fairly straightforward set of deletions

Note that this does not provide any i386 clocksources, so you will only have
the jiffies clocksource.  To get full replacements for the code being removed
here, the timeofday-clocks-i386 patch will be needed.
Signed-off-by: default avatarJohn Stultz <johnstul@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 6f84fa2f
#
# Makefile for x86 timers
#
obj-y := timer.o timer_none.o timer_tsc.o timer_pit.o common.o
obj-$(CONFIG_X86_CYCLONE_TIMER) += timer_cyclone.o
obj-$(CONFIG_HPET_TIMER) += timer_hpet.o
obj-$(CONFIG_X86_PM_TIMER) += timer_pm.o
/*
* Common functions used across the timers go here
*/
#include <linux/init.h>
#include <linux/timex.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/timer.h>
#include <asm/hpet.h>
#include "mach_timer.h"
/* ------ Calibrate the TSC -------
* Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
* Too much 64-bit arithmetic here to do this cleanly in C, and for
* accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
* output busy loop as low as possible. We avoid reading the CTC registers
* directly because of the awkward 8-bit access mechanism of the 82C54
* device.
*/
#define CALIBRATE_TIME (5 * 1000020/HZ)
unsigned long calibrate_tsc(void)
{
mach_prepare_counter();
{
unsigned long startlow, starthigh;
unsigned long endlow, endhigh;
unsigned long count;
rdtsc(startlow,starthigh);
mach_countup(&count);
rdtsc(endlow,endhigh);
/* Error: ECTCNEVERSET */
if (count <= 1)
goto bad_ctc;
/* 64-bit subtract - gcc just messes up with long longs */
__asm__("subl %2,%0\n\t"
"sbbl %3,%1"
:"=a" (endlow), "=d" (endhigh)
:"g" (startlow), "g" (starthigh),
"0" (endlow), "1" (endhigh));
/* Error: ECPUTOOFAST */
if (endhigh)
goto bad_ctc;
/* Error: ECPUTOOSLOW */
if (endlow <= CALIBRATE_TIME)
goto bad_ctc;
__asm__("divl %2"
:"=a" (endlow), "=d" (endhigh)
:"r" (endlow), "0" (0), "1" (CALIBRATE_TIME));
return endlow;
}
/*
* The CTC wasn't reliable: we got a hit on the very first read,
* or the CPU was so fast/slow that the quotient wouldn't fit in
* 32 bits..
*/
bad_ctc:
return 0;
}
#ifdef CONFIG_HPET_TIMER
/* ------ Calibrate the TSC using HPET -------
* Return 2^32 * (1 / (TSC clocks per usec)) for getting the CPU freq.
* Second output is parameter 1 (when non NULL)
* Set 2^32 * (1 / (tsc per HPET clk)) for delay_hpet().
* calibrate_tsc() calibrates the processor TSC by comparing
* it to the HPET timer of known frequency.
* Too much 64-bit arithmetic here to do this cleanly in C
*/
#define CALIBRATE_CNT_HPET (5 * hpet_tick)
#define CALIBRATE_TIME_HPET (5 * KERNEL_TICK_USEC)
unsigned long __devinit calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr)
{
unsigned long tsc_startlow, tsc_starthigh;
unsigned long tsc_endlow, tsc_endhigh;
unsigned long hpet_start, hpet_end;
unsigned long result, remain;
hpet_start = hpet_readl(HPET_COUNTER);
rdtsc(tsc_startlow, tsc_starthigh);
do {
hpet_end = hpet_readl(HPET_COUNTER);
} while ((hpet_end - hpet_start) < CALIBRATE_CNT_HPET);
rdtsc(tsc_endlow, tsc_endhigh);
/* 64-bit subtract - gcc just messes up with long longs */
__asm__("subl %2,%0\n\t"
"sbbl %3,%1"
:"=a" (tsc_endlow), "=d" (tsc_endhigh)
:"g" (tsc_startlow), "g" (tsc_starthigh),
"0" (tsc_endlow), "1" (tsc_endhigh));
/* Error: ECPUTOOFAST */
if (tsc_endhigh)
goto bad_calibration;
/* Error: ECPUTOOSLOW */
if (tsc_endlow <= CALIBRATE_TIME_HPET)
goto bad_calibration;
ASM_DIV64_REG(result, remain, tsc_endlow, 0, CALIBRATE_TIME_HPET);
if (remain > (tsc_endlow >> 1))
result++; /* rounding the result */
if (tsc_hpet_quotient_ptr) {
unsigned long tsc_hpet_quotient;
ASM_DIV64_REG(tsc_hpet_quotient, remain, tsc_endlow, 0,
CALIBRATE_CNT_HPET);
if (remain > (tsc_endlow >> 1))
tsc_hpet_quotient++; /* rounding the result */
*tsc_hpet_quotient_ptr = tsc_hpet_quotient;
}
return result;
bad_calibration:
/*
* the CPU was so fast/slow that the quotient wouldn't fit in
* 32 bits..
*/
return 0;
}
#endif
unsigned long read_timer_tsc(void)
{
unsigned long retval;
rdtscl(retval);
return retval;
}
/* calculate cpu_khz */
void init_cpu_khz(void)
{
if (cpu_has_tsc) {
unsigned long tsc_quotient = calibrate_tsc();
if (tsc_quotient) {
/* report CPU clock rate in Hz.
* The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
* clock/second. Our precision is about 100 ppm.
*/
{ unsigned long eax=0, edx=1000;
__asm__("divl %2"
:"=a" (cpu_khz), "=d" (edx)
:"r" (tsc_quotient),
"0" (eax), "1" (edx));
printk("Detected %u.%03u MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000);
}
}
}
}
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <asm/timer.h>
#ifdef CONFIG_HPET_TIMER
/*
* HPET memory read is slower than tsc reads, but is more dependable as it
* always runs at constant frequency and reduces complexity due to
* cpufreq. So, we prefer HPET timer to tsc based one. Also, we cannot use
* timer_pit when HPET is active. So, we default to timer_tsc.
*/
#endif
/* list of timers, ordered by preference, NULL terminated */
static struct init_timer_opts* __initdata timers[] = {
#ifdef CONFIG_X86_CYCLONE_TIMER
&timer_cyclone_init,
#endif
#ifdef CONFIG_HPET_TIMER
&timer_hpet_init,
#endif
#ifdef CONFIG_X86_PM_TIMER
&timer_pmtmr_init,
#endif
&timer_tsc_init,
&timer_pit_init,
NULL,
};
static char clock_override[10] __initdata;
static int __init clock_setup(char* str)
{
if (str)
strlcpy(clock_override, str, sizeof(clock_override));
return 1;
}
__setup("clock=", clock_setup);
/* The chosen timesource has been found to be bad.
* Fall back to a known good timesource (the PIT)
*/
void clock_fallback(void)
{
cur_timer = &timer_pit;
}
/* iterates through the list of timers, returning the first
* one that initializes successfully.
*/
struct timer_opts* __init select_timer(void)
{
int i = 0;
/* find most preferred working timer */
while (timers[i]) {
if (timers[i]->init)
if (timers[i]->init(clock_override) == 0)
return timers[i]->opts;
++i;
}
panic("select_timer: Cannot find a suitable timer\n");
return NULL;
}
int read_current_timer(unsigned long *timer_val)
{
if (cur_timer->read_timer) {
*timer_val = cur_timer->read_timer();
return 0;
}
return -1;
}
/* Cyclone-timer:
* This code implements timer_ops for the cyclone counter found
* on IBM x440, x360, and other Summit based systems.
*
* Copyright (C) 2002 IBM, John Stultz (johnstul@us.ibm.com)
*/
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/timex.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/jiffies.h>
#include <asm/timer.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/fixmap.h>
#include <asm/i8253.h>
#include "io_ports.h"
/* Number of usecs that the last interrupt was delayed */
static int delay_at_last_interrupt;
#define CYCLONE_CBAR_ADDR 0xFEB00CD0
#define CYCLONE_PMCC_OFFSET 0x51A0
#define CYCLONE_MPMC_OFFSET 0x51D0
#define CYCLONE_MPCS_OFFSET 0x51A8
#define CYCLONE_TIMER_FREQ 100000000
#define CYCLONE_TIMER_MASK (((u64)1<<40)-1) /* 40 bit mask */
int use_cyclone = 0;
static u32* volatile cyclone_timer; /* Cyclone MPMC0 register */
static u32 last_cyclone_low;
static u32 last_cyclone_high;
static unsigned long long monotonic_base;
static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
/* helper macro to atomically read both cyclone counter registers */
#define read_cyclone_counter(low,high) \
do{ \
high = cyclone_timer[1]; low = cyclone_timer[0]; \
} while (high != cyclone_timer[1]);
static void mark_offset_cyclone(void)
{
unsigned long lost, delay;
unsigned long delta = last_cyclone_low;
int count;
unsigned long long this_offset, last_offset;
write_seqlock(&monotonic_lock);
last_offset = ((unsigned long long)last_cyclone_high<<32)|last_cyclone_low;
spin_lock(&i8253_lock);
read_cyclone_counter(last_cyclone_low,last_cyclone_high);
/* read values for delay_at_last_interrupt */
outb_p(0x00, 0x43); /* latch the count ASAP */
count = inb_p(0x40); /* read the latched count */
count |= inb(0x40) << 8;
/*
* VIA686a test code... reset the latch if count > max + 1
* from timer_pit.c - cjb
*/
if (count > LATCH) {
outb_p(0x34, PIT_MODE);
outb_p(LATCH & 0xff, PIT_CH0);
outb(LATCH >> 8, PIT_CH0);
count = LATCH - 1;
}
spin_unlock(&i8253_lock);
/* lost tick compensation */
delta = last_cyclone_low - delta;
delta /= (CYCLONE_TIMER_FREQ/1000000);
delta += delay_at_last_interrupt;
lost = delta/(1000000/HZ);
delay = delta%(1000000/HZ);
if (lost >= 2)
jiffies_64 += lost-1;
/* update the monotonic base value */
this_offset = ((unsigned long long)last_cyclone_high<<32)|last_cyclone_low;
monotonic_base += (this_offset - last_offset) & CYCLONE_TIMER_MASK;
write_sequnlock(&monotonic_lock);
/* calculate delay_at_last_interrupt */
count = ((LATCH-1) - count) * TICK_SIZE;
delay_at_last_interrupt = (count + LATCH/2) / LATCH;
/* catch corner case where tick rollover occured
* between cyclone and pit reads (as noted when
* usec delta is > 90% # of usecs/tick)
*/
if (lost && abs(delay - delay_at_last_interrupt) > (900000/HZ))
jiffies_64++;
}
static unsigned long get_offset_cyclone(void)
{
u32 offset;
if(!cyclone_timer)
return delay_at_last_interrupt;
/* Read the cyclone timer */
offset = cyclone_timer[0];
/* .. relative to previous jiffy */
offset = offset - last_cyclone_low;
/* convert cyclone ticks to microseconds */
/* XXX slow, can we speed this up? */
offset = offset/(CYCLONE_TIMER_FREQ/1000000);
/* our adjusted time offset in microseconds */
return delay_at_last_interrupt + offset;
}
static unsigned long long monotonic_clock_cyclone(void)
{
u32 now_low, now_high;
unsigned long long last_offset, this_offset, base;
unsigned long long ret;
unsigned seq;
/* atomically read monotonic base & last_offset */
do {
seq = read_seqbegin(&monotonic_lock);
last_offset = ((unsigned long long)last_cyclone_high<<32)|last_cyclone_low;
base = monotonic_base;
} while (read_seqretry(&monotonic_lock, seq));
/* Read the cyclone counter */
read_cyclone_counter(now_low,now_high);
this_offset = ((unsigned long long)now_high<<32)|now_low;
/* convert to nanoseconds */
ret = base + ((this_offset - last_offset)&CYCLONE_TIMER_MASK);
return ret * (1000000000 / CYCLONE_TIMER_FREQ);
}
static int __init init_cyclone(char* override)
{
u32* reg;
u32 base; /* saved cyclone base address */
u32 pageaddr; /* page that contains cyclone_timer register */
u32 offset; /* offset from pageaddr to cyclone_timer register */
int i;
/* check clock override */
if (override[0] && strncmp(override,"cyclone",7))
return -ENODEV;
/*make sure we're on a summit box*/
if(!use_cyclone) return -ENODEV;
printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n");
/* find base address */
pageaddr = (CYCLONE_CBAR_ADDR)&PAGE_MASK;
offset = (CYCLONE_CBAR_ADDR)&(~PAGE_MASK);
set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
reg = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n");
return -ENODEV;
}
base = *reg;
if(!base){
printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n");
return -ENODEV;
}
/* setup PMCC */
pageaddr = (base + CYCLONE_PMCC_OFFSET)&PAGE_MASK;
offset = (base + CYCLONE_PMCC_OFFSET)&(~PAGE_MASK);
set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
reg = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n");
return -ENODEV;
}
reg[0] = 0x00000001;
/* setup MPCS */
pageaddr = (base + CYCLONE_MPCS_OFFSET)&PAGE_MASK;
offset = (base + CYCLONE_MPCS_OFFSET)&(~PAGE_MASK);
set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
reg = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n");
return -ENODEV;
}
reg[0] = 0x00000001;
/* map in cyclone_timer */
pageaddr = (base + CYCLONE_MPMC_OFFSET)&PAGE_MASK;
offset = (base + CYCLONE_MPMC_OFFSET)&(~PAGE_MASK);
set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
cyclone_timer = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
if(!cyclone_timer){
printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n");
return -ENODEV;
}
/*quick test to make sure its ticking*/
for(i=0; i<3; i++){
u32 old = cyclone_timer[0];
int stall = 100;
while(stall--) barrier();
if(cyclone_timer[0] == old){
printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n");
cyclone_timer = 0;
return -ENODEV;
}
}
init_cpu_khz();
/* Everything looks good! */
return 0;
}
static void delay_cyclone(unsigned long loops)
{
unsigned long bclock, now;
if(!cyclone_timer)
return;
bclock = cyclone_timer[0];
do {
rep_nop();
now = cyclone_timer[0];
} while ((now-bclock) < loops);
}
/************************************************************/
/* cyclone timer_opts struct */
static struct timer_opts timer_cyclone = {
.name = "cyclone",
.mark_offset = mark_offset_cyclone,
.get_offset = get_offset_cyclone,
.monotonic_clock = monotonic_clock_cyclone,
.delay = delay_cyclone,
};
struct init_timer_opts __initdata timer_cyclone_init = {
.init = init_cyclone,
.opts = &timer_cyclone,
};
/*
* This code largely moved from arch/i386/kernel/time.c.
* See comments there for proper credits.
*/
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/timex.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/jiffies.h>
#include <asm/timer.h>
#include <asm/io.h>
#include <asm/processor.h>
#include "io_ports.h"
#include "mach_timer.h"
#include <asm/hpet.h>
static unsigned long hpet_usec_quotient __read_mostly; /* convert hpet clks to usec */
static unsigned long tsc_hpet_quotient __read_mostly; /* convert tsc to hpet clks */
static unsigned long hpet_last; /* hpet counter value at last tick*/
static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */
static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */
static unsigned long long monotonic_base;
static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
/* convert from cycles(64bits) => nanoseconds (64bits)
* basic equation:
* ns = cycles / (freq / ns_per_sec)
* ns = cycles * (ns_per_sec / freq)
* ns = cycles * (10^9 / (cpu_khz * 10^3))
* ns = cycles * (10^6 / cpu_khz)
*
* Then we use scaling math (suggested by george@mvista.com) to get:
* ns = cycles * (10^6 * SC / cpu_khz) / SC
* ns = cycles * cyc2ns_scale / SC
*
* And since SC is a constant power of two, we can convert the div
* into a shift.
*
* We can use khz divisor instead of mhz to keep a better percision, since
* cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
* (mathieu.desnoyers@polymtl.ca)
*
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
static unsigned long cyc2ns_scale __read_mostly;
#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
static inline void set_cyc2ns_scale(unsigned long cpu_khz)
{
cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
}
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
}
static unsigned long long monotonic_clock_hpet(void)
{
unsigned long long last_offset, this_offset, base;
unsigned seq;
/* atomically read monotonic base & last_offset */
do {
seq = read_seqbegin(&monotonic_lock);
last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
base = monotonic_base;
} while (read_seqretry(&monotonic_lock, seq));
/* Read the Time Stamp Counter */
rdtscll(this_offset);
/* return the value in ns */
return base + cycles_2_ns(this_offset - last_offset);
}
static unsigned long get_offset_hpet(void)
{
register unsigned long eax, edx;
eax = hpet_readl(HPET_COUNTER);
eax -= hpet_last; /* hpet delta */
eax = min(hpet_tick, eax);
/*
* Time offset = (hpet delta) * ( usecs per HPET clock )
* = (hpet delta) * ( usecs per tick / HPET clocks per tick)
* = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
*
* Where,
* hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
*
* Using a mull instead of a divl saves some cycles in critical path.
*/
ASM_MUL64_REG(eax, edx, hpet_usec_quotient, eax);
/* our adjusted time offset in microseconds */
return edx;