Commit efa7e867 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] Prevent people from directly including <asm/rwsem.h>.
  [IA64] remove time interpolator
  [IA64] Convert to generic timekeeping/clocksource
  [IA64] refresh some config files for 64K pagesize
  [IA64] Delete iosapic_free_rte()
  [IA64] fallocate system call
  [IA64] Enable percpu vector domain for IA64_DIG
  [IA64] Enable percpu vector domain for IA64_GENERIC
  [IA64] Support irq migration across domain
  [IA64] Add support for vector domain
  [IA64] Add mapping table between irq and vector
  [IA64] Check if irq is sharable
  [IA64] Fix invalid irq vector assumption for iosapic
  [IA64] Use dynamic irq for iosapic interrupts
  [IA64] Use per iosapic lock for indirect iosapic register access
  [IA64] Cleanup lock order in iosapic_register_intr
  [IA64] Remove duplicated members in iosapic_rte_info
  [IA64] Remove block structure for locking in iosapic.c
parents 02d6112c bd807f9c
......@@ -1154,6 +1154,8 @@ and is between 256 and 4096 characters. It is defined in the file
nointroute [IA-64]
nojitter [IA64] Disables jitter checking for ITC timers.
nolapic [IA-32,APIC] Do not enable or use the local APIC.
nolapic_timer [IA-32,APIC] Do not use the local APIC timer.
......@@ -1885,6 +1887,9 @@ and is between 256 and 4096 characters. It is defined in the file
vdso=1: enable VDSO (default)
vdso=0: disable VDSO mapping
vector= [IA-64,SMP]
vector=percpu: enable percpu vector domain
video= [FB] Frame buffer configuration
See Documentation/fb/modedb.txt.
......
Time Interpolators
------------------
Time interpolators are a base of time calculation between timer ticks and
allow an accurate determination of time down to the accuracy of the time
source in nanoseconds.
The architecture specific code typically provides gettimeofday and
settimeofday under Linux. The time interpolator provides both if an arch
defines CONFIG_TIME_INTERPOLATION. The arch still must set up timer tick
operations and call the necessary functions to advance the clock.
With the time interpolator a standardized interface exists for time
interpolation between ticks. The provided logic is highly scalable
and has been tested in SMP situations of up to 512 CPUs.
If CONFIG_TIME_INTERPOLATION is defined then the architecture specific code
(or the device drivers - like HPET) may register time interpolators.
These are typically defined in the following way:
static struct time_interpolator my_interpolator {
.frequency = MY_FREQUENCY,
.source = TIME_SOURCE_MMIO32,
.shift = 8, /* scaling for higher accuracy */
.drift = -1, /* Unknown drift */
.jitter = 0 /* time source is stable */
};
void time_init(void)
{
....
/* Initialization of the timer *.
my_interpolator.address = &my_timer;
register_time_interpolator(&my_interpolator);
....
}
For more details see include/linux/timex.h and kernel/timer.c.
Christoph Lameter <christoph@lameter.com>, October 31, 2004
......@@ -62,7 +62,11 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config TIME_INTERPOLATION
config GENERIC_TIME
bool
default y
config GENERIC_TIME_VSYSCALL
bool
default y
......
......@@ -85,7 +85,7 @@ CONFIG_MMU=y
CONFIG_SWIOTLB=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_TIME_INTERPOLATION=y
CONFIG_GENERIC_TIME=y
CONFIG_EFI=y
CONFIG_GENERIC_IOMAP=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
......
......@@ -86,7 +86,7 @@ CONFIG_MMU=y
CONFIG_SWIOTLB=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_TIME_INTERPOLATION=y
CONFIG_GENERIC_TIME=y
CONFIG_EFI=y
CONFIG_GENERIC_IOMAP=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
......
......@@ -86,7 +86,7 @@ CONFIG_MMU=y
CONFIG_SWIOTLB=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_TIME_INTERPOLATION=y
CONFIG_GENERIC_TIME=y
CONFIG_EFI=y
CONFIG_GENERIC_IOMAP=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
......
......@@ -93,7 +93,7 @@ CONFIG_SWIOTLB=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_TIME_INTERPOLATION=y
CONFIG_GENERIC_TIME=y
CONFIG_DMI=y
CONFIG_EFI=y
CONFIG_GENERIC_IOMAP=y
......
This diff is collapsed.
......@@ -96,7 +96,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_TIME_INTERPOLATION=y
CONFIG_GENERIC_TIME=y
CONFIG_DMI=y
CONFIG_EFI=y
CONFIG_GENERIC_IOMAP=y
......
This diff is collapsed.
......@@ -7,6 +7,7 @@
#define ASM_OFFSETS_C 1
#include <linux/sched.h>
#include <linux/clocksource.h>
#include <asm-ia64/processor.h>
#include <asm-ia64/ptrace.h>
......@@ -15,6 +16,7 @@
#include <asm-ia64/mca.h>
#include "../kernel/sigframe.h"
#include "../kernel/fsyscall_gtod_data.h"
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
......@@ -256,17 +258,24 @@ void foo(void)
BLANK();
/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr));
DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source));
DEFINE(IA64_TIME_INTERPOLATOR_SHIFT_OFFSET, offsetof (struct time_interpolator, shift));
DEFINE(IA64_TIME_INTERPOLATOR_NSEC_OFFSET, offsetof (struct time_interpolator, nsec_per_cyc));
DEFINE(IA64_TIME_INTERPOLATOR_OFFSET_OFFSET, offsetof (struct time_interpolator, offset));
DEFINE(IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET, offsetof (struct time_interpolator, last_cycle));
DEFINE(IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET, offsetof (struct time_interpolator, last_counter));
DEFINE(IA64_TIME_INTERPOLATOR_JITTER_OFFSET, offsetof (struct time_interpolator, jitter));
DEFINE(IA64_TIME_INTERPOLATOR_MASK_OFFSET, offsetof (struct time_interpolator, mask));
DEFINE(IA64_TIME_SOURCE_CPU, TIME_SOURCE_CPU);
DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64);
DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32);
DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
DEFINE(IA64_GTOD_LOCK_OFFSET,
offsetof (struct fsyscall_gtod_data_t, lock));
DEFINE(IA64_GTOD_WALL_TIME_OFFSET,
offsetof (struct fsyscall_gtod_data_t, wall_time));
DEFINE(IA64_GTOD_MONO_TIME_OFFSET,
offsetof (struct fsyscall_gtod_data_t, monotonic_time));
DEFINE(IA64_CLKSRC_MASK_OFFSET,
offsetof (struct fsyscall_gtod_data_t, clk_mask));
DEFINE(IA64_CLKSRC_MULT_OFFSET,
offsetof (struct fsyscall_gtod_data_t, clk_mult));
DEFINE(IA64_CLKSRC_SHIFT_OFFSET,
offsetof (struct fsyscall_gtod_data_t, clk_shift));
DEFINE(IA64_CLKSRC_MMIO_OFFSET,
offsetof (struct fsyscall_gtod_data_t, clk_fsys_mmio));
DEFINE(IA64_CLKSRC_CYCLE_LAST_OFFSET,
offsetof (struct fsyscall_gtod_data_t, clk_cycle_last));
DEFINE(IA64_ITC_JITTER_OFFSET,
offsetof (struct itc_jitter_data_t, itc_jitter));
DEFINE(IA64_ITC_LASTCYCLE_OFFSET,
offsetof (struct itc_jitter_data_t, itc_lastcycle));
}
......@@ -3,6 +3,7 @@
#include <linux/time.h>
#include <linux/errno.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
#include <asm/io.h>
/* IBM Summit (EXA) Cyclone counter code*/
......@@ -18,13 +19,21 @@ void __init cyclone_setup(void)
use_cyclone = 1;
}
static void __iomem *cyclone_mc;
struct time_interpolator cyclone_interpolator = {
.source = TIME_SOURCE_MMIO64,
.shift = 16,
.frequency = CYCLONE_TIMER_FREQ,
.drift = -100,
.mask = (1LL << 40) - 1
static cycle_t read_cyclone(void)
{
return (cycle_t)readq((void __iomem *)cyclone_mc);
}
static struct clocksource clocksource_cyclone = {
.name = "cyclone",
.rating = 300,
.read = read_cyclone,
.mask = (1LL << 40) - 1,
.mult = 0, /*to be caluclated*/
.shift = 16,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
int __init init_cyclone_clock(void)
......@@ -44,13 +53,15 @@ int __init init_cyclone_clock(void)
offset = (CYCLONE_CBAR_ADDR);
reg = (u64*)ioremap_nocache(offset, sizeof(u64));
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n");
printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
" register.\n");
use_cyclone = 0;
return -ENODEV;
}
base = readq(reg);
if(!base){
printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n");
printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
" value.\n");
use_cyclone = 0;
return -ENODEV;
}
......@@ -60,7 +71,8 @@ int __init init_cyclone_clock(void)
offset = (base + CYCLONE_PMCC_OFFSET);
reg = (u64*)ioremap_nocache(offset, sizeof(u64));
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n");
printk(KERN_ERR "Summit chipset: Could not find valid PMCC"
" register.\n");
use_cyclone = 0;
return -ENODEV;
}
......@@ -71,7 +83,8 @@ int __init init_cyclone_clock(void)
offset = (base + CYCLONE_MPCS_OFFSET);
reg = (u64*)ioremap_nocache(offset, sizeof(u64));
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n");
printk(KERN_ERR "Summit chipset: Could not find valid MPCS"
" register.\n");
use_cyclone = 0;
return -ENODEV;
}
......@@ -82,7 +95,8 @@ int __init init_cyclone_clock(void)
offset = (base + CYCLONE_MPMC_OFFSET);
cyclone_timer = (u32*)ioremap_nocache(offset, sizeof(u32));
if(!cyclone_timer){
printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n");
printk(KERN_ERR "Summit chipset: Could not find valid MPMC"
" register.\n");
use_cyclone = 0;
return -ENODEV;
}
......@@ -93,7 +107,8 @@ int __init init_cyclone_clock(void)
int stall = 100;
while(stall--) barrier();
if(readl(cyclone_timer) == old){
printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n");
printk(KERN_ERR "Summit chipset: Counter not counting!"
" DISABLED\n");
iounmap(cyclone_timer);
cyclone_timer = 0;
use_cyclone = 0;
......@@ -101,8 +116,11 @@ int __init init_cyclone_clock(void)
}
}
/* initialize last tick */
cyclone_interpolator.addr = cyclone_timer;
register_time_interpolator(&cyclone_interpolator);
cyclone_mc = cyclone_timer;
clocksource_cyclone.fsys_mmio = cyclone_timer;
clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ,
clocksource_cyclone.shift);
clocksource_register(&clocksource_cyclone);
return 0;
}
......
......@@ -1581,7 +1581,7 @@ sys_call_table:
data8 sys_sync_file_range // 1300
data8 sys_tee
data8 sys_vmsplice
data8 sys_ni_syscall // reserved for move_pages
data8 sys_fallocate
data8 sys_getcpu
data8 sys_epoll_pwait // 1305
data8 sys_utimensat
......
This diff is collapsed.
/*
* (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
* Contributed by Peter Keilty <peter.keilty@hp.com>
*
* fsyscall gettimeofday data
*/
struct fsyscall_gtod_data_t {
seqlock_t lock;
struct timespec wall_time;
struct timespec monotonic_time;
cycle_t clk_mask;
u32 clk_mult;
u32 clk_shift;
void *clk_fsys_mmio;
cycle_t clk_cycle_last;
} __attribute__ ((aligned (L1_CACHE_BYTES)));
struct itc_jitter_data_t {
int itc_jitter;
cycle_t itc_lastcycle;
} __attribute__ ((aligned (L1_CACHE_BYTES)));
This diff is collapsed.
......@@ -35,7 +35,7 @@ void ack_bad_irq(unsigned int irq)
#ifdef CONFIG_IA64_GENERIC
unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
{
return (unsigned int) vec;
return __get_cpu_var(vector_irq)[vec];
}
#endif
......
......@@ -46,6 +46,12 @@
#define IRQ_DEBUG 0
#define IRQ_VECTOR_UNASSIGNED (0)
#define IRQ_UNUSED (0)
#define IRQ_USED (1)
#define IRQ_RSVD (2)
/* These can be overridden in platform_irq_init */
int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
......@@ -54,6 +60,8 @@ int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
void __iomem *ipi_base_addr = ((void __iomem *)
(__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
static cpumask_t vector_allocation_domain(int cpu);
/*
* Legacy IRQ to IA-64 vector translation table.
*/
......@@ -64,46 +72,269 @@ __u8 isa_irq_to_vector_map[16] = {
};
EXPORT_SYMBOL(isa_irq_to_vector_map);
static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)];
DEFINE_SPINLOCK(vector_lock);
struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
[0 ... NR_IRQS - 1] = {
.vector = IRQ_VECTOR_UNASSIGNED,
.domain = CPU_MASK_NONE
}
};
DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
[0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR
};
static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = {
[0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE
};
static int irq_status[NR_IRQS] = {
[0 ... NR_IRQS -1] = IRQ_UNUSED
};
int check_irq_used(int irq)
{
if (irq_status[irq] == IRQ_USED)
return 1;
return -1;
}
static void reserve_irq(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&vector_lock, flags);
irq_status[irq] = IRQ_RSVD;
spin_unlock_irqrestore(&vector_lock, flags);
}
static inline int find_unassigned_irq(void)
{
int irq;
for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
if (irq_status[irq] == IRQ_UNUSED)
return irq;
return -ENOSPC;
}
static inline int find_unassigned_vector(cpumask_t domain)
{
cpumask_t mask;
int pos;
cpus_and(mask, domain, cpu_online_map);
if (cpus_empty(mask))
return -EINVAL;
for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
cpus_and(mask, domain, vector_table[pos]);
if (!cpus_empty(mask))
continue;
return IA64_FIRST_DEVICE_VECTOR + pos;
}
return -ENOSPC;
}
static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
{
cpumask_t mask;
int cpu, pos;
struct irq_cfg *cfg = &irq_cfg[irq];
cpus_and(mask, domain, cpu_online_map);
if (cpus_empty(mask))
return -EINVAL;
if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
return 0;
if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
return -EBUSY;
for_each_cpu_mask(cpu, mask)
per_cpu(vector_irq, cpu)[vector] = irq;
cfg->vector = vector;
cfg->domain = domain;
irq_status[irq] = IRQ_USED;
pos = vector - IA64_FIRST_DEVICE_VECTOR;
cpus_or(vector_table[pos], vector_table[pos], domain);
return 0;
}
int bind_irq_vector(int irq, int vector, cpumask_t domain)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&vector_lock, flags);
ret = __bind_irq_vector(irq, vector, domain);
spin_unlock_irqrestore(&vector_lock, flags);
return ret;
}
static void __clear_irq_vector(int irq)
{
int vector, cpu, pos;
cpumask_t mask;
cpumask_t domain;
struct irq_cfg *cfg = &irq_cfg[irq];
BUG_ON((unsigned)irq >= NR_IRQS);
BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
vector = cfg->vector;
domain = cfg->domain;
cpus_and(mask, cfg->domain, cpu_online_map);
for_each_cpu_mask(cpu, mask)
per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
cfg->vector = IRQ_VECTOR_UNASSIGNED;
cfg->domain = CPU_MASK_NONE;
irq_status[irq] = IRQ_UNUSED;
pos = vector - IA64_FIRST_DEVICE_VECTOR;
cpus_andnot(vector_table[pos], vector_table[pos], domain);
}
static void clear_irq_vector(int irq)
{
unsigned long flags;
spin_lock_irqsave(&vector_lock, flags);
__clear_irq_vector(irq);
spin_unlock_irqrestore(&vector_lock, flags);
}
int
assign_irq_vector (int irq)
{
int pos, vector;
again:
pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
vector = IA64_FIRST_DEVICE_VECTOR + pos;
if (vector > IA64_LAST_DEVICE_VECTOR)
return -ENOSPC;
if (test_and_set_bit(pos, ia64_vector_mask))
goto again;
unsigned long flags;
int vector, cpu;
cpumask_t domain;
vector = -ENOSPC;
spin_lock_irqsave(&vector_lock, flags);
if (irq < 0) {
goto out;
}
for_each_online_cpu(cpu) {
domain = vector_allocation_domain(cpu);
vector = find_unassigned_vector(domain);
if (vector >= 0)
break;
}
if (vector < 0)
goto out;
BUG_ON(__bind_irq_vector(irq, vector, domain));
out:
spin_unlock_irqrestore(&vector_lock, flags);
return vector;
}
void
free_irq_vector (int vector)
{
int pos;
if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
if (vector < IA64_FIRST_DEVICE_VECTOR ||
vector > IA64_LAST_DEVICE_VECTOR)
return;
pos = vector - IA64_FIRST_DEVICE_VECTOR;
if (!test_and_clear_bit(pos, ia64_vector_mask))
printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
clear_irq_vector(vector);
}
int
reserve_irq_vector (int vector)
{
int pos;
if (vector < IA64_FIRST_DEVICE_VECTOR ||
vector > IA64_LAST_DEVICE_VECTOR)
return -EINVAL;
return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
}
pos = vector - IA64_FIRST_DEVICE_VECTOR;
return test_and_set_bit(pos, ia64_vector_mask);
/*
* Initialize vector_irq on a new cpu. This function must be called
* with vector_lock held.
*/
void __setup_vector_irq(int cpu)
{
int irq, vector;
/* Clear vector_irq */
for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
/* Mark the inuse vectors */
for (irq = 0; irq < NR_IRQS; ++irq) {
if (!cpu_isset(cpu, irq_cfg[irq].domain))
continue;
vector = irq_to_vector(irq);
per_cpu(vector_irq, cpu)[vector] = irq;
}
}
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
static enum vector_domain_type {
VECTOR_DOMAIN_NONE,
VECTOR_DOMAIN_PERCPU
} vector_domain_type = VECTOR_DOMAIN_NONE;
static cpumask_t vector_allocation_domain(int cpu)
{
if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
return cpumask_of_cpu(cpu);
return CPU_MASK_ALL;
}
static int __init parse_vector_domain(char *arg)
{
if (!arg)
return -EINVAL;
if (!strcmp(arg, "percpu")) {
vector_domain_type = VECTOR_DOMAIN_PERCPU;
no_int_routing = 1;
}
return 1;
}
early_param("vector", parse_vector_domain);
#else
static cpumask_t vector_allocation_domain(int cpu)
{
return CPU_MASK_ALL;
}
#endif
void destroy_and_reserve_irq(unsigned int irq)
{
dynamic_irq_cleanup(irq);
clear_irq_vector(irq);
reserve_irq(irq);
}
static int __reassign_irq_vector(int irq, int cpu)
{
struct irq_cfg *cfg = &irq_cfg[irq];
int vector;
cpumask_t domain;
if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
return -EINVAL;
if (cpu_isset(cpu, cfg->domain))
return 0;
domain = vector_allocation_domain(cpu);
vector = find_unassigned_vector(domain);
if (vector < 0)
return -ENOSPC;
__clear_irq_vector(irq);
BUG_ON(__bind_irq_vector(irq, vector, domain));
return 0;
}
int reassign_irq_vector(int irq, int cpu)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&vector_lock, flags);
ret = __reassign_irq_vector(irq, cpu);
spin_unlock_irqrestore(&vector_lock, flags);
return ret;
}
/*
......@@ -111,18 +342,35 @@ reserve_irq_vector (int vector)
*/
int create_irq(void)
{
int vector = assign_irq_vector(AUTO_ASSIGN);
if (vector >= 0)
dynamic_irq_init(vector);
return vector;
unsigned long flags;
int irq, vector, cpu;
cpumask_t domain;
irq = vector = -ENOSPC;
spin_lock_irqsave(&vector_lock, flags);
for_each_online_cpu(cpu) {
domain = vector_allocation_domain(cpu);
vector = find_unassigned_vector(domain);
if (vector >= 0)
break;
}
if (vector < 0)
goto out;
irq = find_unassigned_irq();
if (irq < 0)
goto out;
BUG_ON(__bind_irq_vector(irq, vector, domain));
out:
spin_unlock_irqrestore(&vector_lock, flags);
if (irq >= 0)
dynamic_irq_init(irq);
return irq;
}
void destroy_irq(unsigned int irq)
{
dynamic_irq_cleanup(irq);
free_irq_vector(irq);
clear_irq_vector(irq);
}
#ifdef CONFIG_SMP
......@@ -301,14 +549,13 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
irq_desc_t *desc;
unsigned int irq;
for (irq = 0; irq < NR_IRQS; ++irq)
if (irq_to_vector(irq) == vec) {
desc = irq_desc + irq;
desc->status |= IRQ_PER_CPU;
desc->chip = &irq_type_ia64_lsapic;
if (action)
setup_irq(irq, action);
}
irq = vec;
BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
desc = irq_desc + irq;
desc->status |= IRQ_PER_CPU;
desc->chip = &irq_type_ia64_lsapic;
if (action)
setup_irq(irq, action);
}
void __init
......
......@@ -13,6 +13,7 @@
#define MSI_DATA_VECTOR_SHIFT 0
#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
#define MSI_DATA_VECTOR_MASK 0xffffff00
#define MSI_DATA_DELIVERY_SHIFT 8
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
......@@ -50,17 +51,29 @@ static struct irq_chip ia64_msi_chip;
static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
{
struct msi_msg msg;
u32 addr;
u32 addr, data;
int cpu = first_cpu(cpu_mask);
if (!cpu_online(cpu))
return;
if (reassign_irq_vector(irq, cpu))
return;
read_msi_msg(irq, &msg);
addr = msg.address_lo;
addr &= MSI_ADDR_DESTID_MASK;
addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask)));
addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
msg.address_lo = addr;
data = msg.data;
data &= MSI_DATA_VECTOR_MASK;
data |= MSI_DATA_VECTOR(irq_to_vector(irq));
msg.data = data;
write_msi_msg(irq, &msg);
irq_desc[irq].affinity = cpu_mask;
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
}
#endif /* CONFIG_SMP */
......@@ -69,13 +82,15 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
struct msi_msg msg;
unsigned long dest_phys_id;
int irq, vector;
cpumask_t mask;
irq = create_irq();
if (irq < 0)
return irq;
set_irq_msi(irq, desc);
dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
cpus_and(mask, irq_to_domain(irq), cpu_online_map);
dest_phys_id = cpu_physical_id(first_cpu(mask));
vector = irq_to_vector(irq);
msg.address_hi = 0;
......
......@@ -395,9 +395,13 @@ smp_callin (void)
fix_b0_for_bsp();
lock_ipi_calllock();
spin_lock(&vector_lock);
/* Setup the per cpu irq handling data structures */
__setup_vector_irq(cpuid);
cpu_set(cpuid, cpu_online_map);
unlock_ipi_calllock();
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
spin_unlock(&vector_lock);
smp_setup_percpu_timer();
......
......@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/efi.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
#include <asm/machvec.h>
#include <asm/delay.h>
......@@ -28,6 +29,16 @@
#include <asm/sections.h>
#include <asm/system.h>
#include "fsyscall_gtod_data.h"
static cycle_t itc_get_cycles(void);
struct fsyscall_gtod_data_t fsyscall_gtod_data = {
.lock = SEQLOCK_UNLOCKED,
};
struct itc_jitter_data_t itc_jitter_data;
volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
#ifdef CONFIG_IA64_DEBUG_IRQ
......@@ -37,11 +48,16 @@ EXPORT_SYMBOL(last_cli_ip);
#endif
static struct time_interpolator itc_interpolator = {
.shift = 16,
.mask = 0xffffffffffffffffLL,
.source = TIME_SOURCE_CPU
static struct clocksource clocksource_itc = {
.name = "itc",
.rating = 350,
.read = itc_get_cycles,
.mask = 0xffffffffffffffff,
.mult = 0, /*to be caluclated*/
.shift = 16,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct clocksource *itc_clocksource;
static irqreturn_t
timer_interrupt (int irq, void *dev_id)
......@@ -210,8 +226,6 @@ ia64_init_itm (void)
+ itc_freq/2)/itc_freq;
if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
itc_interpolator.frequency = local_cpu_data->itc_freq;
itc_interpolator.drift = itc_drift;
#ifdef CONFIG_SMP
/* On IA64 in an SMP configuration ITCs are never accurately synchronized.
* Jitter compensation requires a cmpxchg which may limit
......@@ -223,15 +237,50 @@ ia64_init_itm (void)
* even going backward) if the ITC offsets between the individual CPUs
* are too large.
*/
if (!nojitter) itc_interpolator.jitter = 1;
if (!nojitter)
itc_jitter_data.itc_jitter = 1;
#endif
register_time_interpolator(&itc_interpolator);
}
/* Setup the CPU local timer tick */
ia64_cpu_local_tick();
if (!itc_clocksource) {
/* Sort out mult/shift values: */
clocksource_itc.mult =
clocksource_hz2mult(local_cpu_data->itc_freq,
clocksource_itc.shift);
clocksource_register(&clocksource_itc);
itc_clocksource = &clocksource_itc;
}
}
static cycle_t itc_get_cycles()
{
u64 lcycle, now, ret;
if (!itc_jitter_data.itc_jitter)
return get_cycles();
lcycle = itc_jitter_data.itc_lastcycle;
now = get_cycles();
if (lcycle && time_after(lcycle, now))
return lcycle;
/*
* Keep track of the last timer value returned.
* In an SMP environment, you could lose out in contention of
* cmpxchg. If so, your cmpxchg returns new value which the
* winner of contention updated to. Use the new value instead.
*/
ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
if (unlikely(ret != lcycle))
return ret;
return now;
}
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_IRQPOLL,
......@@ -307,3 +356,34 @@ ia64_setup_printk_clock(void)
if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT))
ia64_printk_clock = ia64_itc_printk_clock;
}
void update_vsyscall(struct timespec *wall, struct clocksource *c)
{
unsigned long flags;
write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
/* copy fsyscall clock data */
fsyscall_gtod_data.clk_mask = c->mask;
fsyscall_gtod_data.clk_mult = c->mult;
fsyscall_gtod_data.clk_shift = c->shift;
fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio;
fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
/* copy kernel time structures */
fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec
+ wall->tv_sec;
fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec
+ wall->tv_nsec;
/* normalize */
while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
fsyscall_gtod_data.monotonic_time.tv_sec++;
}
write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
}
......@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/clocksource.h>
#include <asm/hw_irq.h>
#include <asm/system.h>
......@@ -22,11 +23,21 @@
extern unsigned long sn_rtc_cycles_per_second;
static struct time_interpolator sn2_interpolator = {
.drift = -1,
.shift = 10,
.mask = (1LL << 55) - 1,
.source = TIME_SOURCE_MMIO64
static void __iomem *sn2_mc;
static cycle_t read_sn2(void)
{
return (cycle_t)readq(sn2_mc);
}
static struct clocksource clocksource_sn2 = {
.name = "sn2_rtc",
.rating = 300,
.read = read_sn2,
.mask = (1LL << 55) - 1,
.mult = 0,
.shift = 10,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
/*
......@@ -47,9 +58,11 @@ ia64_sn_udelay (unsigned long usecs)
void __init sn_timer_init(void)
{
sn2_interpolator.frequency = sn_rtc_cycles_per_second;
sn2_interpolator.addr = RTC_COUNTER_ADDR;
register_time_interpolator(&sn2_interpolator);
sn2_mc = RTC_COUNTER_ADDR;
clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR;
clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second,
clocksource_sn2.shift);
clocksource_register(&clocksource_sn2);
ia64_udelay = &ia64_sn_udelay;
}
......@@ -475,7 +475,7 @@ static void acpi_processor_idle(void)
/* Get end time (ticks) */
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
#ifdef CONFIG_GENERIC_TIME
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
/* TSC halts in C2, so notify users */
mark_tsc_unstable("possible TSC halt in C2");
#endif
......@@ -517,7 +517,7 @@ static void acpi_processor_idle(void)
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
}
#ifdef CONFIG_GENERIC_TIME
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
/* TSC halts in C3, so notify users */
mark_tsc_unstable("TSC halts in C3");
#endif
......
......@@ -29,6 +29,7 @@
#include <linux/bcd.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <linux/clocksource.h>
#include <asm/current.h>
#include <asm/uaccess.h>
......@@ -51,8 +52,34 @@
#define HPET_RANGE_SIZE 1024 /* from HPET spec */
#if BITS_PER_LONG == 64
#define write_counter(V, MC) writeq(V, MC)
#define read_counter(MC) readq(MC)
#else
#define write_counter(V, MC) writel(V, MC)
#define read_counter(MC) readl(MC)
#endif
static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
static void __iomem *hpet_mctr;
static cycle_t read_hpet(void)
{
return (cycle_t)read_counter((void __iomem *)hpet_mctr);
}
static struct clocksource clocksource_hpet = {
.name = "hpet",
.rating = 250,
.read = read_hpet,
.mask = 0xffffffffffffffff,
.mult = 0, /*to be caluclated*/
.shift = 10,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct clocksource *hpet_clocksource;
/* A lock for concurrent access by app and isr hpet activity. */
static DEFINE_SPINLOCK(hpet_lock);
/* A lock for concurrent intermodule access to hpet and isr hpet activity. */
......@@ -79,7 +106,7 @@ struct hpets {
struct hpets *hp_next;
struct hpet __iomem *hp_hpet;
unsigned long hp_hpet_phys;
struct time_interpolator *hp_interpolator;
struct clocksource *hp_clocksource;
unsigned long long hp_tick_freq;
unsigned long hp_delta;
unsigned int hp_ntimer;
......@@ -94,13 +121,6 @@ static struct hpets *hpets;
#define HPET_PERIODIC 0x0004
#define HPET_SHARED_IRQ 0x0008
#if BITS_PER_LONG == 64
#define write_counter(V, MC) writeq(V, MC)
#define read_counter(MC) readq(MC)
#else
#define write_counter(V, MC) writel(V, MC)
#define read_counter(MC) readl(MC)
#endif
#ifndef readq
static inline unsigned long long readq(void __iomem *addr)
......@@ -737,27 +757,6 @@ static ctl_table dev_root[] = {
static struct ctl_table_header *sysctl_header;
static void hpet_register_interpolator(struct hpets *hpetp)
{
#ifdef CONFIG_TIME_INTERPOLATION
struct time_interpolator *ti;
ti = kzalloc(sizeof(*ti), GFP_KERNEL);
if (!ti)
return;
ti->source = TIME_SOURCE_MMIO64;
ti->shift = 10;
ti->addr = &hpetp->hp_hpet->hpet_mc;
ti->frequency = hpetp->hp_tick_freq;
ti->drift = HPET_DRIFT;
ti->mask = -1;
hpetp->hp_interpolator = ti;
register_time_interpolator(ti);
#endif
}
/*
* Adjustment for when arming the timer with
* initial conditions. That is, main counter
......@@ -909,7 +908,16 @@ int hpet_alloc(struct hpet_data *hdp)
}
hpetp->hp_delta = hpet_calibrate(hpetp);
hpet_register_interpolator(hpetp);
if (!hpet_clocksource) {
hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr);
clocksource_hpet.mult = clocksource_hz2mult(hpetp->hp_tick_freq,
clocksource_hpet.shift);
clocksource_register(&clocksource_hpet);
hpetp->hp_clocksource = &clocksource_hpet;
hpet_clocksource = &clocksource_hpet;
}
return 0;
}
......@@ -995,7 +1003,7 @@ static int hpet_acpi_add(struct acpi_device *device)
static int hpet_acpi_remove(struct acpi_device *device, int type)
{
/* XXX need to unregister interpolator, dealloc mem, etc */
/* XXX need to unregister clocksource, dealloc mem, etc */
return -EINVAL;
}
......
......@@ -90,13 +90,27 @@ enum {
extern __u8 isa_irq_to_vector_map[16];
#define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)]
struct irq_cfg {
ia64_vector vector;
cpumask_t domain;
};
extern spinlock_t vector_lock;
extern struct irq_cfg irq_cfg[NR_IRQS];
#define irq_to_domain(x) irq_cfg[(x)].domain
DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
extern int assign_irq_vector (int irq); /* allocate a free vector */
extern void free_irq_vector (int vector);
extern int reserve_irq_vector (int vector);
extern void __setup_vector_irq(int cpu);
extern int reassign_irq_vector(int irq, int cpu);
extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
extern int check_irq_used (int irq);
extern void destroy_and_reserve_irq (unsigned int irq);
static inline void ia64_resend_irq(unsigned int vector)
{
......@@ -113,7 +127,7 @@ extern irq_desc_t irq_desc[NR_IRQS];
static inline unsigned int
__ia64_local_vector_to_irq (ia64_vector vec)
{
return (unsigned int) vec;
return __get_cpu_var(vector_irq)[vec];
}
#endif
......@@ -131,7 +145,7 @@ __ia64_local_vector_to_irq (ia64_vector vec)
static inline ia64_vector
irq_to_vector (int irq)
{
return (ia64_vector) irq;
return irq_cfg[irq].vector;
}
/*
......
......@@ -47,19 +47,21 @@
#define IOSAPIC_MASK_SHIFT 16
#define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT)
#define IOSAPIC_VECTOR_MASK 0xffffff00
#ifndef __ASSEMBLY__
#ifdef CONFIG_IOSAPIC
#define NR_IOSAPICS 256
static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg)
static inline unsigned int __iosapic_read(char __iomem *iosapic, unsigned int reg)
{
writel(reg, iosapic + IOSAPIC_REG_SELECT);
return readl(iosapic + IOSAPIC_WINDOW);
}
static inline void iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
static inline void __iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
{
writel(reg, iosapic + IOSAPIC_REG_SELECT);
writel(val, iosapic + IOSAPIC_WINDOW);
......
......@@ -14,8 +14,13 @@
#include <linux/types.h>
#include <linux/cpumask.h>
#define NR_IRQS 256
#define NR_IRQ_VECTORS NR_IRQS
#define NR_VECTORS 256
#if (NR_VECTORS + 32 * NR_CPUS) < 1024
#define NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
#else
#define NR_IRQS 1024
#endif
static __inline__ int
irq_canonicalize (int irq)
......
......@@ -21,6 +21,10 @@
#ifndef _ASM_IA64_RWSEM_H
#define _ASM_IA64_RWSEM_H
#ifndef _LINUX_RWSEM_H
#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
#endif
#include <linux/list.h>
#include <linux/spinlock.h>
......
......@@ -292,7 +292,7 @@
#define __NR_sync_file_range 1300
#define __NR_tee 1301
#define __NR_vmsplice 1302
/* 1303 reserved for move_pages */
#define __NR_fallocate 1303
#define __NR_getcpu 1304
#define __NR_epoll_pwait 1305
#define __NR_utimensat 1306
......
......@@ -67,6 +67,12 @@ struct clocksource {
unsigned long flags;
cycle_t (*vread)(void);
void (*resume)(void);
#ifdef CONFIG_IA64
void *fsys_mmio; /* used by fsyscall asm code */
#define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr))
#else
#define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0)
#endif
/* timekeeping specific data, ignore */
cycle_t cycle_interval;
......
......@@ -224,66 +224,6 @@ static inline int ntp_synced(void)
__x < 0 ? -(-__x >> __s) : __x >> __s; \
})
#ifdef CONFIG_TIME_INTERPOLATION
#define TIME_SOURCE_CPU 0
#define TIME_SOURCE_MMIO64 1
#define TIME_SOURCE_MMIO32 2
#define TIME_SOURCE_FUNCTION 3
/* For proper operations time_interpolator clocks must run slightly slower
* than the standard clock since the interpolator may only correct by having
* time jump forward during a tick. A slower clock is usually a side effect
* of the integer divide of the nanoseconds in a second by the frequency.
* The accuracy of the division can be increased by specifying a shift.
* However, this may cause the clock not to be slow enough.
* The interpolator will self-tune the clock by slowing down if no
* resets occur or speeding up if the time jumps per analysis cycle
* become too high.
*
* Setting jitter compensates for a fluctuating timesource by comparing
* to the last value read from the timesource to insure that an earlier value
* is not returned by a later call. The price to pay
* for the compensation is that the timer routines are not as scalable anymore.
*/
struct time_interpolator {
u16 source; /* time source flags */
u8 shift; /* increases accuracy of multiply by shifting. */
/* Note that bits may be lost if shift is set too high */
u8 jitter; /* if set compensate for fluctuations */
u32 nsec_per_cyc; /* set by register_time_interpolator() */
void *addr; /* address of counter or function */
cycles_t mask; /* mask the valid bits of the counter */
unsigned long offset; /* nsec offset at last update of interpolator */
u64 last_counter; /* counter value in units of the counter at last update */
cycles_t last_cycle; /* Last timer value if TIME_SOURCE_JITTER is set */
u64 frequency; /* frequency in counts/second */
long drift; /* drift in parts-per-million (or -1) */
unsigned long skips; /* skips forward */
unsigned long ns_skipped; /* nanoseconds skipped */
struct time_interpolator *next;
};
extern void register_time_interpolator(struct time_interpolator *);
extern void unregister_time_interpolator(struct time_interpolator *);
extern void time_interpolator_reset(void);
extern unsigned long time_interpolator_get_offset(void);
extern void time_interpolator_update(long delta_nsec);
#else /* !CONFIG_TIME_INTERPOLATION */
static inline void time_interpolator_reset(void)
{
}
static inline void time_interpolator_update(long delta_nsec)
{
}
#endif /* !CONFIG_TIME_INTERPOLATION */
#define TICK_LENGTH_SHIFT 32
#ifdef CONFIG_NO_HZ
......
......@@ -136,7 +136,6 @@ static inline void warp_clock(void)
write_seqlock_irq(&xtime_lock);
wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
xtime.tv_sec += sys_tz.tz_minuteswest * 60;
time_interpolator_reset();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
}
......@@ -309,92 +308,6 @@ struct timespec timespec_trunc(struct timespec t, unsigned gran)
}
EXPORT_SYMBOL(timespec_trunc);
#ifdef CONFIG_TIME_INTERPOLATION
void getnstimeofday (struct timespec *tv)
{
unsigned long seq,sec,nsec;
do {
seq = read_seqbegin(&xtime_lock);
sec = xtime.tv_sec;
nsec = xtime.tv_nsec+time_interpolator_get_offset();
} while (unlikely(read_seqretry(&xtime_lock, seq)));
while (unlikely(nsec >= NSEC_PER_SEC)) {
nsec -= NSEC_PER_SEC;
++sec;
}
tv->tv_sec = sec;
tv->tv_nsec = nsec;
}
EXPORT_SYMBOL_GPL(getnstimeofday);
int do_settimeofday (struct timespec *tv)
{
time_t wtm_sec, sec = tv->tv_sec;
long wtm_nsec, nsec = tv->tv_nsec;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irq(&xtime_lock);
{
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT;
time_esterror = NTP_PHASE_LIMIT;
time_interpolator_reset();
}
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
EXPORT_SYMBOL(do_settimeofday);
void do_gettimeofday (struct timeval *tv)
{
unsigned long seq, nsec, usec, sec, offset;
do {
seq = read_seqbegin(&xtime_lock);
offset = time_interpolator_get_offset();
sec = xtime.tv_sec;
nsec = xtime.tv_nsec;
} while (unlikely(read_seqretry(&xtime_lock, seq)));
usec = (nsec + offset) / 1000;
while (unlikely(usec >= USEC_PER_SEC)) {
usec -= USEC_PER_SEC;
++sec;
}
tv->tv_sec = sec;
tv->tv_usec = usec;
/*
* Make sure xtime.tv_sec [returned by sys_time()] always
* follows the gettimeofday() result precisely. This
* condition is extremely unlikely, it can hit at most
* once per second:
*/
if (unlikely(xtime.tv_sec != tv->tv_sec)) {
unsigned long flags;
write_seqlock_irqsave(&xtime_lock, flags);
update_wall_time();
write_sequnlock_irqrestore(&xtime_lock, flags);
}
}
EXPORT_SYMBOL(do_gettimeofday);
#else /* CONFIG_TIME_INTERPOLATION */
#ifndef CONFIG_GENERIC_TIME
/*
* Simulate gettimeofday using do_gettimeofday which only allows a timeval
......@@ -410,7 +323,6 @@ void getnstimeofday(struct timespec *tv)
}
EXPORT_SYMBOL_GPL(getnstimeofday);
#endif
#endif /* CONFIG_TIME_INTERPOLATION */
/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
* Assumes input in normal date format, i.e. 1980-12-31 23:59:59
......
......@@ -116,11 +116,6 @@ void second_overflow(void)
if (xtime.tv_sec % 86400 == 0) {
xtime.tv_sec--;
wall_to_monotonic.tv_sec++;
/*
* The timer interpolator will make time change
* gradually instead of an immediate jump by one second
*/
time_interpolator_update(-NSEC_PER_SEC);
time_state = TIME_OOP;
printk(KERN_NOTICE "Clock: inserting leap second "
"23:59:60 UTC\n");
......@@ -130,11 +125,6 @@ void second_overflow(void)
if ((xtime.tv_sec + 1) % 86400 == 0) {
xtime.tv_sec++;
wall_to_monotonic.tv_sec--;
/*
* Use of time interpolator for a gradual change of
* time
*/
time_interpolator_update(NSEC_PER_SEC);
time_state = TIME_WAIT;
printk(KERN_NOTICE "Clock: deleting leap second "
"23:59:59 UTC\n");
......
......@@ -466,10 +466,6 @@ void update_wall_time(void)
second_overflow();
}
/* interpolator bits */
time_interpolator_update(clock->xtime_interval
>> clock->shift);
/* accumulate error between NTP and clock interval */
clock->error += current_tick_length();
clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
......
......@@ -1349,194 +1349,6 @@ void __init init_timers(void)
open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
}
#ifdef CONFIG_TIME_INTERPOLATION
struct time_interpolator *time_interpolator __read_mostly;
static struct time_interpolator *time_interpolator_list __read_mostly;
static DEFINE_SPINLOCK(time_interpolator_lock);
static inline cycles_t time_interpolator_get_cycles(unsigned int src)
{
unsigned long (*x)(void);
switch (src)
{
case TIME_SOURCE_FUNCTION:
x = time_interpolator->addr;
return x();
case TIME_SOURCE_MMIO64 :
return readq_relaxed((void __iomem *)time_interpolator->addr);
case TIME_SOURCE_MMIO32 :
return readl_relaxed((void __iomem *)time_interpolator->addr);
default: return get_cycles();
}
}
static inline u64 time_interpolator_get_counter(int writelock)
{
unsigned int src = time_interpolator->source;
if (time_interpolator->jitter)
{
cycles_t lcycle;
cycles_t now;
do {
lcycle = time_interpolator->last_cycle;
now = time_interpolator_get_cycles(src);
if (lcycle && time_after(lcycle, now))
return lcycle;
/* When holding the xtime write lock, there's no need
* to add the overhead of the cmpxchg. Readers are
* force to retry until the write lock is released.
*/
if (writelock) {
time_interpolator->last_cycle = now;
return now;
}
/* Keep track of the last timer value returned. The use of cmpxchg here
* will cause contention in an SMP environment.
*/
} while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
return now;
}
else
return time_interpolator_get_cycles(src);
}
void time_interpolator_reset(void)
{
time_interpolator->offset = 0;
time_interpolator->last_counter = time_interpolator_get_counter(1);
}
#define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
unsigned long time_interpolator_get_offset(void)
{
/* If we do not have a time interpolator set up then just return zero */
if (!time_interpolator)
return 0;
return time_interpolator->offset +
GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
}
#define INTERPOLATOR_ADJUST 65536
#define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
void time_interpolator_update(long delta_nsec)
{
u64 counter;
unsigned long offset;
/* If there is no time interpolator set up then do nothing */
if (!time_interpolator)
return;
/*
* The interpolator compensates for late ticks by accumulating the late
* time in time_interpolator->offset. A tick earlier than expected will
* lead to a reset of the offset and a corresponding jump of the clock
* forward. Again this only works if the interpolator clock is running
* slightly slower than the regular clock and the tuning logic insures
* that.
*/
counter = time_interpolator_get_counter(1);
offset = time_interpolator->offset +
GET_TI_NSECS(counter, time_interpolator);
if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
time_interpolator->offset = offset - delta_nsec;
else {
time_interpolator->skips++;
time_interpolator->ns_skipped += delta_nsec - offset;
time_interpolator->offset = 0;
}
time_interpolator->last_counter = counter;
/* Tuning logic for time interpolator invoked every minute or so.
* Decrease interpolator clock speed if no skips occurred and an offset is carried.
* Increase interpolator clock speed if we skip too much time.
*/
if (jiffies % INTERPOLATOR_ADJUST == 0)
{
if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
time_interpolator->nsec_per_cyc--;
if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
time_interpolator->nsec_per_cyc++;
time_interpolator->skips = 0;
time_interpolator->ns_skipped = 0;
}
}
static inline int
is_better_time_interpolator(struct time_interpolator *new)
{
if (!time_interpolator)
return 1;
return new->frequency > 2*time_interpolator->frequency ||
(unsigned long)new->drift < (unsigned long)time_interpolator->drift;
}
void
register_time_interpolator(struct time_interpolator *ti)
{
unsigned long flags;
/* Sanity check */
BUG_ON(ti->frequency == 0 || ti->mask == 0);
ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
spin_lock(&time_interpolator_lock);
write_seqlock_irqsave(&xtime_lock, flags);
if (is_better_time_interpolator(ti)) {
time_interpolator = ti;
time_interpolator_reset();
}
write_sequnlock_irqrestore(&xtime_lock, flags);
ti->next = time_interpolator_list;
time_interpolator_list = ti;
spin_unlock(&time_interpolator_lock);
}
void
unregister_time_interpolator(struct time_interpolator *ti)
{
struct time_interpolator *curr, **prev;
unsigned long flags;
spin_lock(&time_interpolator_lock);
prev = &time_interpolator_list;
for (curr = *prev; curr; curr = curr->next) {
if (curr == ti) {
*prev = curr->next;
break;
}
prev = &curr->next;
}
write_seqlock_irqsave(&xtime_lock, flags);
if (ti == time_interpolator) {
/* we lost the best time-interpolator: */
time_interpolator = NULL;
/* find the next-best interpolator */
for (curr = time_interpolator_list; curr; curr = curr->next)
if (is_better_time_interpolator(curr))
time_interpolator = curr;
time_interpolator_reset();
}
write_sequnlock_irqrestore(&xtime_lock, flags);
spin_unlock(&time_interpolator_lock);
}
#endif /* CONFIG_TIME_INTERPOLATION */
/**
* msleep - sleep safely even with waitqueue interruptions
* @msecs: Time in milliseconds to sleep for
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment