Commit d369ddd2 authored by David S. Miller's avatar David S. Miller

[SPARC64]: Add __read_mostly support.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9126dfde
...@@ -45,8 +45,8 @@ extern void calibrate_delay(void); ...@@ -45,8 +45,8 @@ extern void calibrate_delay(void);
/* Please don't make this stuff initdata!!! --DaveM */ /* Please don't make this stuff initdata!!! --DaveM */
static unsigned char boot_cpu_id; static unsigned char boot_cpu_id;
cpumask_t cpu_online_map = CPU_MASK_NONE; cpumask_t cpu_online_map = CPU_MASK_NONE __read_mostly;
cpumask_t phys_cpu_present_map = CPU_MASK_NONE; cpumask_t phys_cpu_present_map = CPU_MASK_NONE __read_mostly;
static cpumask_t smp_commenced_mask; static cpumask_t smp_commenced_mask;
static cpumask_t cpu_callout_map; static cpumask_t cpu_callout_map;
...@@ -155,7 +155,7 @@ void cpu_panic(void) ...@@ -155,7 +155,7 @@ void cpu_panic(void)
panic("SMP bolixed\n"); panic("SMP bolixed\n");
} }
static unsigned long current_tick_offset; static unsigned long current_tick_offset __read_mostly;
/* This tick register synchronization scheme is taken entirely from /* This tick register synchronization scheme is taken entirely from
* the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
...@@ -1193,8 +1193,8 @@ void smp_send_stop(void) ...@@ -1193,8 +1193,8 @@ void smp_send_stop(void)
{ {
} }
unsigned long __per_cpu_base; unsigned long __per_cpu_base __read_mostly;
unsigned long __per_cpu_shift; unsigned long __per_cpu_shift __read_mostly;
EXPORT_SYMBOL(__per_cpu_base); EXPORT_SYMBOL(__per_cpu_base);
EXPORT_SYMBOL(__per_cpu_shift); EXPORT_SYMBOL(__per_cpu_shift);
......
...@@ -73,7 +73,7 @@ static __initdata struct sparc64_tick_ops dummy_tick_ops = { ...@@ -73,7 +73,7 @@ static __initdata struct sparc64_tick_ops dummy_tick_ops = {
.get_tick = dummy_get_tick, .get_tick = dummy_get_tick,
}; };
struct sparc64_tick_ops *tick_ops = &dummy_tick_ops; struct sparc64_tick_ops *tick_ops __read_mostly = &dummy_tick_ops;
#define TICK_PRIV_BIT (1UL << 63) #define TICK_PRIV_BIT (1UL << 63)
...@@ -195,7 +195,7 @@ static unsigned long tick_add_tick(unsigned long adj, unsigned long offset) ...@@ -195,7 +195,7 @@ static unsigned long tick_add_tick(unsigned long adj, unsigned long offset)
return new_tick; return new_tick;
} }
static struct sparc64_tick_ops tick_operations = { static struct sparc64_tick_ops tick_operations __read_mostly = {
.init_tick = tick_init_tick, .init_tick = tick_init_tick,
.get_tick = tick_get_tick, .get_tick = tick_get_tick,
.get_compare = tick_get_compare, .get_compare = tick_get_compare,
...@@ -276,7 +276,7 @@ static unsigned long stick_add_compare(unsigned long adj) ...@@ -276,7 +276,7 @@ static unsigned long stick_add_compare(unsigned long adj)
return new_compare; return new_compare;
} }
static struct sparc64_tick_ops stick_operations = { static struct sparc64_tick_ops stick_operations __read_mostly = {
.init_tick = stick_init_tick, .init_tick = stick_init_tick,
.get_tick = stick_get_tick, .get_tick = stick_get_tick,
.get_compare = stick_get_compare, .get_compare = stick_get_compare,
...@@ -422,7 +422,7 @@ static unsigned long hbtick_add_compare(unsigned long adj) ...@@ -422,7 +422,7 @@ static unsigned long hbtick_add_compare(unsigned long adj)
return val; return val;
} }
static struct sparc64_tick_ops hbtick_operations = { static struct sparc64_tick_ops hbtick_operations __read_mostly = {
.init_tick = hbtick_init_tick, .init_tick = hbtick_init_tick,
.get_tick = hbtick_get_tick, .get_tick = hbtick_get_tick,
.get_compare = hbtick_get_compare, .get_compare = hbtick_get_compare,
...@@ -437,10 +437,9 @@ static struct sparc64_tick_ops hbtick_operations = { ...@@ -437,10 +437,9 @@ static struct sparc64_tick_ops hbtick_operations = {
* NOTE: On SUN5 systems the ticker interrupt comes in using 2 * NOTE: On SUN5 systems the ticker interrupt comes in using 2
* interrupts, one at level14 and one with softint bit 0. * interrupts, one at level14 and one with softint bit 0.
*/ */
unsigned long timer_tick_offset; unsigned long timer_tick_offset __read_mostly;
unsigned long timer_tick_compare;
static unsigned long timer_ticks_per_nsec_quotient; static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
#define TICK_SIZE (tick_nsec / 1000) #define TICK_SIZE (tick_nsec / 1000)
...@@ -464,7 +463,7 @@ static inline void timer_check_rtc(void) ...@@ -464,7 +463,7 @@ static inline void timer_check_rtc(void)
static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{ {
unsigned long ticks, pstate; unsigned long ticks, compare, pstate;
write_seqlock(&xtime_lock); write_seqlock(&xtime_lock);
...@@ -483,14 +482,14 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) ...@@ -483,14 +482,14 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
: "=r" (pstate) : "=r" (pstate)
: "i" (PSTATE_IE)); : "i" (PSTATE_IE));
timer_tick_compare = tick_ops->add_compare(timer_tick_offset); compare = tick_ops->add_compare(timer_tick_offset);
ticks = tick_ops->get_tick(); ticks = tick_ops->get_tick();
/* Restore PSTATE_IE. */ /* Restore PSTATE_IE. */
__asm__ __volatile__("wrpr %0, 0x0, %%pstate" __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
: /* no outputs */ : /* no outputs */
: "r" (pstate)); : "r" (pstate));
} while (time_after_eq(ticks, timer_tick_compare)); } while (time_after_eq(ticks, compare));
timer_check_rtc(); timer_check_rtc();
...@@ -506,11 +505,6 @@ void timer_tick_interrupt(struct pt_regs *regs) ...@@ -506,11 +505,6 @@ void timer_tick_interrupt(struct pt_regs *regs)
do_timer(regs); do_timer(regs);
/*
* Only keep timer_tick_offset uptodate, but don't set TICK_CMPR.
*/
timer_tick_compare = tick_ops->get_compare() + timer_tick_offset;
timer_check_rtc(); timer_check_rtc();
write_sequnlock(&xtime_lock); write_sequnlock(&xtime_lock);
......
...@@ -32,6 +32,8 @@ SECTIONS ...@@ -32,6 +32,8 @@ SECTIONS
.data1 : { *(.data1) } .data1 : { *(.data1) }
. = ALIGN(64); . = ALIGN(64);
.data.cacheline_aligned : { *(.data.cacheline_aligned) } .data.cacheline_aligned : { *(.data.cacheline_aligned) }
. = ALIGN(64);
.data.read_mostly : { *(.data.read_mostly) }
_edata = .; _edata = .;
PROVIDE (edata = .); PROVIDE (edata = .);
.fixup : { *(.fixup) } .fixup : { *(.fixup) }
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES #define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif #endif
#ifdef CONFIG_X86 #if defined(CONFIG_X86) || defined(CONFIG_SPARC64)
#define __read_mostly __attribute__((__section__(".data.read_mostly"))) #define __read_mostly __attribute__((__section__(".data.read_mostly")))
#else #else
#define __read_mostly #define __read_mostly
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment