Commit 6684e323 authored by Trond Myklebust's avatar Trond Myklebust

Merge branch 'origin'

parents 7531d692 0560551d
...@@ -3267,6 +3267,7 @@ W: http://tpmdd.sourceforge.net ...@@ -3267,6 +3267,7 @@ W: http://tpmdd.sourceforge.net
P: Marcel Selhorst P: Marcel Selhorst
M: tpm@selhorst.net M: tpm@selhorst.net
W: http://www.prosec.rub.de/tpm/ W: http://www.prosec.rub.de/tpm/
L: tpmdd-devel@lists.sourceforge.net
S: Maintained S: Maintained
Telecom Clock Driver for MCPL0010 Telecom Clock Driver for MCPL0010
......
...@@ -19,6 +19,7 @@ obj-$(CONFIG_X86_CPUID) += cpuid.o ...@@ -19,6 +19,7 @@ obj-$(CONFIG_X86_CPUID) += cpuid.o
obj-$(CONFIG_MICROCODE) += microcode.o obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o
obj-$(CONFIG_SMP) += smpcommon.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
......
...@@ -341,15 +341,17 @@ static int powernow_acpi_init(void) ...@@ -341,15 +341,17 @@ static int powernow_acpi_init(void)
pc.val = (unsigned long) acpi_processor_perf->states[0].control; pc.val = (unsigned long) acpi_processor_perf->states[0].control;
for (i = 0; i < number_scales; i++) { for (i = 0; i < number_scales; i++) {
u8 fid, vid; u8 fid, vid;
unsigned int speed; struct acpi_processor_px *state =
&acpi_processor_perf->states[i];
unsigned int speed, speed_mhz;
pc.val = (unsigned long) acpi_processor_perf->states[i].control; pc.val = (unsigned long) state->control;
dprintk ("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", dprintk ("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
i, i,
(u32) acpi_processor_perf->states[i].core_frequency, (u32) state->core_frequency,
(u32) acpi_processor_perf->states[i].power, (u32) state->power,
(u32) acpi_processor_perf->states[i].transition_latency, (u32) state->transition_latency,
(u32) acpi_processor_perf->states[i].control, (u32) state->control,
pc.bits.sgtc); pc.bits.sgtc);
vid = pc.bits.vid; vid = pc.bits.vid;
...@@ -360,6 +362,18 @@ static int powernow_acpi_init(void) ...@@ -360,6 +362,18 @@ static int powernow_acpi_init(void)
powernow_table[i].index |= (vid << 8); /* upper 8 bits */ powernow_table[i].index |= (vid << 8); /* upper 8 bits */
speed = powernow_table[i].frequency; speed = powernow_table[i].frequency;
speed_mhz = speed / 1000;
/* processor_perflib will multiply the MHz value by 1000 to
* get a KHz value (e.g. 1266000). However, powernow-k7 works
* with true KHz values (e.g. 1266768). To ensure that all
* powernow frequencies are available, we must ensure that
* ACPI doesn't restrict them, so we round up the MHz value
* to ensure that perflib's computed KHz value is greater than
* or equal to powernow's KHz value.
*/
if (speed % 1000 > 0)
speed_mhz++;
if ((fid_codes[fid] % 10)==5) { if ((fid_codes[fid] % 10)==5) {
if (have_a0 == 1) if (have_a0 == 1)
...@@ -368,10 +382,16 @@ static int powernow_acpi_init(void) ...@@ -368,10 +382,16 @@ static int powernow_acpi_init(void)
dprintk (" FID: 0x%x (%d.%dx [%dMHz]) " dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
"VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
fid_codes[fid] % 10, speed/1000, vid, fid_codes[fid] % 10, speed_mhz, vid,
mobile_vid_table[vid]/1000, mobile_vid_table[vid]/1000,
mobile_vid_table[vid]%1000); mobile_vid_table[vid]%1000);
if (state->core_frequency != speed_mhz) {
state->core_frequency = speed_mhz;
dprintk(" Corrected ACPI frequency to %d\n",
speed_mhz);
}
if (latency < pc.bits.sgtc) if (latency < pc.bits.sgtc)
latency = pc.bits.sgtc; latency = pc.bits.sgtc;
...@@ -602,7 +622,7 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy) ...@@ -602,7 +622,7 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy)
result = powernow_acpi_init(); result = powernow_acpi_init();
if (result) { if (result) {
printk (KERN_INFO PFX "ACPI and legacy methods failed\n"); printk (KERN_INFO PFX "ACPI and legacy methods failed\n");
printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.shtml\n"); printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.html\n");
} }
} else { } else {
/* SGTC use the bus clock as timer */ /* SGTC use the bus clock as timer */
......
...@@ -521,7 +521,7 @@ static int check_supported_cpu(unsigned int cpu) ...@@ -521,7 +521,7 @@ static int check_supported_cpu(unsigned int cpu)
if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
((eax & CPUID_XMOD) > CPUID_XMOD_REV_G)) { ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax); printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
goto out; goto out;
} }
......
...@@ -46,7 +46,7 @@ struct powernow_k8_data { ...@@ -46,7 +46,7 @@ struct powernow_k8_data {
#define CPUID_XFAM 0x0ff00000 /* extended family */ #define CPUID_XFAM 0x0ff00000 /* extended family */
#define CPUID_XFAM_K8 0 #define CPUID_XFAM_K8 0
#define CPUID_XMOD 0x000f0000 /* extended model */ #define CPUID_XMOD 0x000f0000 /* extended model */
#define CPUID_XMOD_REV_G 0x00060000 #define CPUID_XMOD_REV_MASK 0x00080000
#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */ #define CPUID_XFAM_10H 0x00100000 /* family 0x10 */
#define CPUID_USE_XFAM_XMOD 0x00000f00 #define CPUID_USE_XFAM_XMOD 0x00000f00
#define CPUID_GET_MAX_CAPABILITIES 0x80000000 #define CPUID_GET_MAX_CAPABILITIES 0x80000000
......
...@@ -467,7 +467,7 @@ void flush_tlb_all(void) ...@@ -467,7 +467,7 @@ void flush_tlb_all(void)
* it goes straight through and wastes no time serializing * it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ... * anything. Worst case is that we lose a reschedule ...
*/ */
void native_smp_send_reschedule(int cpu) static void native_smp_send_reschedule(int cpu)
{ {
WARN_ON(cpu_is_offline(cpu)); WARN_ON(cpu_is_offline(cpu));
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
...@@ -546,7 +546,8 @@ static void __smp_call_function(void (*func) (void *info), void *info, ...@@ -546,7 +546,8 @@ static void __smp_call_function(void (*func) (void *info), void *info,
* You must not call this function with disabled interrupts or from a * You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler. * hardware interrupt handler or from a bottom half handler.
*/ */
int native_smp_call_function_mask(cpumask_t mask, static int
native_smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info, void (*func)(void *), void *info,
int wait) int wait)
{ {
...@@ -599,60 +600,6 @@ int native_smp_call_function_mask(cpumask_t mask, ...@@ -599,60 +600,6 @@ int native_smp_call_function_mask(cpumask_t mask,
return 0; return 0;
} }
/**
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
int wait)
{
return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);
/**
* smp_call_function_single - Run a function on another CPU
* @cpu: The target CPU. Cannot be the calling CPU.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*/
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
/* prevent preemption and reschedule on another processor */
int ret;
int me = get_cpu();
if (cpu == me) {
WARN_ON(1);
put_cpu();
return -EBUSY;
}
ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
put_cpu();
return ret;
}
EXPORT_SYMBOL(smp_call_function_single);
static void stop_this_cpu (void * dummy) static void stop_this_cpu (void * dummy)
{ {
local_irq_disable(); local_irq_disable();
...@@ -670,7 +617,7 @@ static void stop_this_cpu (void * dummy) ...@@ -670,7 +617,7 @@ static void stop_this_cpu (void * dummy)
* this function calls the 'stop' function on all other CPUs in the system. * this function calls the 'stop' function on all other CPUs in the system.
*/ */
void native_smp_send_stop(void) static void native_smp_send_stop(void)
{ {
/* Don't deadlock on the call lock in panic */ /* Don't deadlock on the call lock in panic */
int nolock = !spin_trylock(&call_lock); int nolock = !spin_trylock(&call_lock);
......
...@@ -98,9 +98,6 @@ EXPORT_SYMBOL(x86_cpu_to_apicid); ...@@ -98,9 +98,6 @@ EXPORT_SYMBOL(x86_cpu_to_apicid);
u8 apicid_2_node[MAX_APICID]; u8 apicid_2_node[MAX_APICID];
DEFINE_PER_CPU(unsigned long, this_cpu_off);
EXPORT_PER_CPU_SYMBOL(this_cpu_off);
/* /*
* Trampoline 80x86 program as an array. * Trampoline 80x86 program as an array.
*/ */
...@@ -763,25 +760,6 @@ static inline struct task_struct * alloc_idle_task(int cpu) ...@@ -763,25 +760,6 @@ static inline struct task_struct * alloc_idle_task(int cpu)
#define alloc_idle_task(cpu) fork_idle(cpu) #define alloc_idle_task(cpu) fork_idle(cpu)
#endif #endif
/* Initialize the CPU's GDT. This is either the boot CPU doing itself
(still using the master per-cpu area), or a CPU doing it for a
secondary which will soon come up. */
static __cpuinit void init_gdt(int cpu)
{
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
(u32 *)&gdt[GDT_ENTRY_PERCPU].b,
__per_cpu_offset[cpu], 0xFFFFF,
0x80 | DESCTYPE_S | 0x2, 0x8);
per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
per_cpu(cpu_number, cpu) = cpu;
}
/* Defined in head.S */
extern struct Xgt_desc_struct early_gdt_descr;
static int __cpuinit do_boot_cpu(int apicid, int cpu) static int __cpuinit do_boot_cpu(int apicid, int cpu)
/* /*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
......
/*
* SMP stuff which is common to all sub-architectures.
*/
#include <linux/module.h>
#include <asm/smp.h>
DEFINE_PER_CPU(unsigned long, this_cpu_off);
EXPORT_PER_CPU_SYMBOL(this_cpu_off);
/* Initialize the CPU's GDT. This is either the boot CPU doing itself
(still using the master per-cpu area), or a CPU doing it for a
secondary which will soon come up. */
__cpuinit void init_gdt(int cpu)
{
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
(u32 *)&gdt[GDT_ENTRY_PERCPU].b,
__per_cpu_offset[cpu], 0xFFFFF,
0x80 | DESCTYPE_S | 0x2, 0x8);
per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
per_cpu(cpu_number, cpu) = cpu;
}
/**
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
int wait)
{
return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);
/**
* smp_call_function_single - Run a function on another CPU
* @cpu: The target CPU. Cannot be the calling CPU.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*/
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
/* prevent preemption and reschedule on another processor */
int ret;
int me = get_cpu();
if (cpu == me) {
WARN_ON(1);
put_cpu();
return -EBUSY;
}
ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
put_cpu();
return ret;
}
EXPORT_SYMBOL(smp_call_function_single);
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/arch_hooks.h> #include <asm/arch_hooks.h>
#include <asm/pda.h>
/* TLB state -- visible externally, indexed physically */ /* TLB state -- visible externally, indexed physically */
DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
...@@ -422,7 +421,7 @@ find_smp_config(void) ...@@ -422,7 +421,7 @@ find_smp_config(void)
VOYAGER_SUS_IN_CONTROL_PORT); VOYAGER_SUS_IN_CONTROL_PORT);
current_thread_info()->cpu = boot_cpu_id; current_thread_info()->cpu = boot_cpu_id;
write_pda(cpu_number, boot_cpu_id); x86_write_percpu(cpu_number, boot_cpu_id);
} }
/* /*
...@@ -435,7 +434,7 @@ smp_store_cpu_info(int id) ...@@ -435,7 +434,7 @@ smp_store_cpu_info(int id)
*c = boot_cpu_data; *c = boot_cpu_data;
identify_cpu(c); identify_secondary_cpu(c);
} }
/* set up the trampoline and return the physical address of the code */ /* set up the trampoline and return the physical address of the code */
...@@ -459,7 +458,7 @@ start_secondary(void *unused) ...@@ -459,7 +458,7 @@ start_secondary(void *unused)
/* external functions not defined in the headers */ /* external functions not defined in the headers */
extern void calibrate_delay(void); extern void calibrate_delay(void);
secondary_cpu_init(); cpu_init();
/* OK, we're in the routine */ /* OK, we're in the routine */
ack_CPI(VIC_CPU_BOOT_CPI); ack_CPI(VIC_CPU_BOOT_CPI);
...@@ -572,7 +571,9 @@ do_boot_cpu(__u8 cpu) ...@@ -572,7 +571,9 @@ do_boot_cpu(__u8 cpu)
/* init_tasks (in sched.c) is indexed logically */ /* init_tasks (in sched.c) is indexed logically */
stack_start.esp = (void *) idle->thread.esp; stack_start.esp = (void *) idle->thread.esp;
init_gdt(cpu, idle); init_gdt(cpu);
per_cpu(current_task, cpu) = idle;
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
irq_ctx_init(cpu); irq_ctx_init(cpu);
/* Note: Don't modify initial ss override */ /* Note: Don't modify initial ss override */
...@@ -859,7 +860,7 @@ smp_invalidate_interrupt(void) ...@@ -859,7 +860,7 @@ smp_invalidate_interrupt(void)
/* This routine is called with a physical cpu mask */ /* This routine is called with a physical cpu mask */
static void static void
flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
unsigned long va) unsigned long va)
{ {
int stuck = 50000; int stuck = 50000;
...@@ -912,7 +913,7 @@ flush_tlb_current_task(void) ...@@ -912,7 +913,7 @@ flush_tlb_current_task(void)
cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
local_flush_tlb(); local_flush_tlb();
if (cpu_mask) if (cpu_mask)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL); voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
...@@ -934,7 +935,7 @@ flush_tlb_mm (struct mm_struct * mm) ...@@ -934,7 +935,7 @@ flush_tlb_mm (struct mm_struct * mm)
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
} }
if (cpu_mask) if (cpu_mask)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL); voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
...@@ -955,7 +956,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) ...@@ -955,7 +956,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
} }
if (cpu_mask) if (cpu_mask)
flush_tlb_others(cpu_mask, mm, va); voyager_flush_tlb_others(cpu_mask, mm, va);
preempt_enable(); preempt_enable();
} }
...@@ -1044,10 +1045,12 @@ smp_call_function_interrupt(void) ...@@ -1044,10 +1045,12 @@ smp_call_function_interrupt(void)
} }
static int static int
__smp_call_function_mask (void (*func) (void *info), void *info, int retry, voyager_smp_call_function_mask (cpumask_t cpumask,
int wait, __u32 mask) void (*func) (void *info), void *info,
int wait)
{ {
struct call_data_struct data; struct call_data_struct data;
u32 mask = cpus_addr(cpumask)[0];
mask &= ~(1<<smp_processor_id()); mask &= ~(1<<smp_processor_id());
...@@ -1083,47 +1086,6 @@ __smp_call_function_mask (void (*func) (void *info), void *info, int retry, ...@@ -1083,47 +1086,6 @@ __smp_call_function_mask (void (*func) (void *info), void *info, int retry,
return 0; return 0;
} }
/* Call this function on all CPUs using the function_interrupt above
<func> The function to run. This must be fast and non-blocking.
<info> An arbitrary pointer to pass to the function.
<retry> If true, keep retrying until ready.
<wait> If true, wait until function has completed on other CPUs.
[RETURNS] 0 on success, else a negative status code. Does not return until
remote CPUs are nearly ready to execute <<func>> or are or have executed.
*/
int
smp_call_function(void (*func) (void *info), void *info, int retry,
int wait)
{
__u32 mask = cpus_addr(cpu_online_map)[0];
return __smp_call_function_mask(func, info, retry, wait, mask);
}
EXPORT_SYMBOL(smp_call_function);
/*
* smp_call_function_single - Run a function on another CPU
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Currently unused.
* @wait: If true, wait until function has completed on other CPUs.
*
* Retrurns 0 on success, else a negative status code.
*
* Does not return until the remote CPU is nearly ready to execute <func>
* or is or has executed.
*/
int
smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
__u32 mask = 1 << cpu;
return __smp_call_function_mask(func, info, nonatomic, wait, mask);
}
EXPORT_SYMBOL(smp_call_function_single);
/* Sorry about the name. In an APIC based system, the APICs /* Sorry about the name. In an APIC based system, the APICs
* themselves are programmed to send a timer interrupt. This is used * themselves are programmed to send a timer interrupt. This is used
* by linux to reschedule the processor. Voyager doesn't have this, * by linux to reschedule the processor. Voyager doesn't have this,
...@@ -1237,8 +1199,8 @@ smp_alloc_memory(void) ...@@ -1237,8 +1199,8 @@ smp_alloc_memory(void)
} }
/* send a reschedule CPI to one CPU by physical CPU number*/ /* send a reschedule CPI to one CPU by physical CPU number*/
void static void
smp_send_reschedule(int cpu) voyager_smp_send_reschedule(int cpu)
{ {
send_one_CPI(cpu, VIC_RESCHEDULE_CPI); send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
} }
...@@ -1267,8 +1229,8 @@ safe_smp_processor_id(void) ...@@ -1267,8 +1229,8 @@ safe_smp_processor_id(void)
} }
/* broadcast a halt to all other CPUs */ /* broadcast a halt to all other CPUs */
void static void
smp_send_stop(void) voyager_smp_send_stop(void)
{ {
smp_call_function(smp_stop_cpu_function, NULL, 1, 1); smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
} }
...@@ -1930,23 +1892,26 @@ smp_voyager_power_off(void *dummy) ...@@ -1930,23 +1892,26 @@ smp_voyager_power_off(void *dummy)
smp_stop_cpu_function(NULL); smp_stop_cpu_function(NULL);
} }
void __init static void __init
smp_prepare_cpus(unsigned int max_cpus) voyager_smp_prepare_cpus(unsigned int max_cpus)
{ {
/* FIXME: ignore max_cpus for now */ /* FIXME: ignore max_cpus for now */
smp_boot_cpus(); smp_boot_cpus();
} }
void __devinit smp_prepare_boot_cpu(void) static void __devinit voyager_smp_prepare_boot_cpu(void)
{ {
init_gdt(smp_processor_id());
switch_to_new_gdt();
cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_callout_map); cpu_set(smp_processor_id(), cpu_callout_map);
cpu_set(smp_processor_id(), cpu_possible_map); cpu_set(smp_processor_id(), cpu_possible_map);
cpu_set(smp_processor_id(), cpu_present_map); cpu_set(smp_processor_id(), cpu_present_map);
} }
int __devinit static int __devinit
__cpu_up(unsigned int cpu) voyager_cpu_up(unsigned int cpu)
{ {
/* This only works at boot for x86. See "rewrite" above. */ /* This only works at boot for x86. See "rewrite" above. */
if (cpu_isset(cpu, smp_commenced_mask)) if (cpu_isset(cpu, smp_commenced_mask))
...@@ -1962,8 +1927,8 @@ __cpu_up(unsigned int cpu) ...@@ -1962,8 +1927,8 @@ __cpu_up(unsigned int cpu)
return 0; return 0;
} }
void __init static void __init
smp_cpus_done(unsigned int max_cpus) voyager_smp_cpus_done(unsigned int max_cpus)
{ {
zap_low_mappings(); zap_low_mappings();
} }
...@@ -1972,5 +1937,16 @@ void __init ...@@ -1972,5 +1937,16 @@ void __init
smp_setup_processor_id(void) smp_setup_processor_id(void)
{ {
current_thread_info()->cpu = hard_smp_processor_id(); current_thread_info()->cpu = hard_smp_processor_id();
write_pda(cpu_number, hard_smp_processor_id()); x86_write_percpu(cpu_number, hard_smp_processor_id());
} }
struct smp_ops smp_ops = {
.smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
.smp_prepare_cpus = voyager_smp_prepare_cpus,
.cpu_up = voyager_cpu_up,
.smp_cpus_done = voyager_smp_cpus_done,
.smp_send_stop = voyager_smp_send_stop,
.smp_send_reschedule = voyager_smp_send_reschedule,
.smp_call_function_mask = voyager_smp_call_function_mask,
};
...@@ -181,7 +181,7 @@ EXPORT_SYMBOL(strnlen_user); ...@@ -181,7 +181,7 @@ EXPORT_SYMBOL(strnlen_user);
* Zero Userspace * Zero Userspace
*/ */
unsigned long clear_user(void __user *to, unsigned long n) unsigned long __clear_user(void __user *to, unsigned long n)
{ {
unsigned long res; unsigned long res;
...@@ -219,4 +219,4 @@ unsigned long clear_user(void __user *to, unsigned long n) ...@@ -219,4 +219,4 @@ unsigned long clear_user(void __user *to, unsigned long n)
return res; return res;
} }
EXPORT_SYMBOL(clear_user); EXPORT_SYMBOL(__clear_user);
...@@ -3802,7 +3802,6 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node) ...@@ -3802,7 +3802,6 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node)
return ret; return ret;
} }
EXPORT_SYMBOL(current_io_context);
/* /*
* If the current task has no IO context then create one and initialise it. * If the current task has no IO context then create one and initialise it.
......
...@@ -384,9 +384,9 @@ static struct agp_device_ids via_agp_device_ids[] __devinitdata = ...@@ -384,9 +384,9 @@ static struct agp_device_ids via_agp_device_ids[] __devinitdata =
.device_id = PCI_DEVICE_ID_VIA_P4M800CE, .device_id = PCI_DEVICE_ID_VIA_P4M800CE,
.chipset_name = "VT3314", .chipset_name = "VT3314",
}, },
/* CX700 */ /* VT3324 / CX700 */
{ {
.device_id = PCI_DEVICE_ID_VIA_CX700, .device_id = PCI_DEVICE_ID_VIA_VT3324,
.chipset_name = "CX700", .chipset_name = "CX700",
}, },
/* VT3336 */ /* VT3336 */
...@@ -540,7 +540,7 @@ static const struct pci_device_id agp_via_pci_table[] = { ...@@ -540,7 +540,7 @@ static const struct pci_device_id agp_via_pci_table[] = {
ID(PCI_DEVICE_ID_VIA_83_87XX_1), ID(PCI_DEVICE_ID_VIA_83_87XX_1),
ID(PCI_DEVICE_ID_VIA_3296_0), ID(PCI_DEVICE_ID_VIA_3296_0),
ID(PCI_DEVICE_ID_VIA_P4M800CE), ID(PCI_DEVICE_ID_VIA_P4M800CE),
ID(PCI_DEVICE_ID_VIA_CX700), ID(PCI_DEVICE_ID_VIA_VT3324),
ID(PCI_DEVICE_ID_VIA_VT3336), ID(PCI_DEVICE_ID_VIA_VT3336),
ID(PCI_DEVICE_ID_VIA_P4M890), ID(PCI_DEVICE_ID_VIA_P4M890),
{ } { }
......
...@@ -346,12 +346,33 @@ static void cma_deref_id(struct rdma_id_private *id_priv) ...@@ -346,12 +346,33 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
complete(&id_priv->comp); complete(&id_priv->comp);
} }
static void cma_release_remove(struct rdma_id_private *id_priv) static int cma_disable_remove(struct rdma_id_private *id_priv,
enum cma_state state)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&id_priv->lock, flags);
if (id_priv->state == state) {
atomic_inc(&id_priv->dev_remove);
ret = 0;
} else
ret = -EINVAL;
spin_unlock_irqrestore(&id_priv->lock, flags);
return ret;
}
static void cma_enable_remove(struct rdma_id_private *id_priv)
{ {
if (atomic_dec_and_test(&id_priv->dev_remove)) if (atomic_dec_and_test(&id_priv->dev_remove))
wake_up(&id_priv->wait_remove); wake_up(&id_priv->wait_remove);
} }
static int cma_has_cm_dev(struct rdma_id_private *id_priv)
{
return (id_priv->id.device && id_priv->cm_id.ib);
}
struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
void *context, enum rdma_port_space ps) void *context, enum rdma_port_space ps)
{ {
...@@ -884,9 +905,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) ...@@ -884,9 +905,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
struct rdma_cm_event event; struct rdma_cm_event event;
int ret = 0; int ret = 0;
atomic_inc(&id_priv->dev_remove); if (cma_disable_remove(id_priv, CMA_CONNECT))
if (!cma_comp(id_priv, CMA_CONNECT)) return 0;
goto out;
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
switch (ib_event->event) { switch (ib_event->event) {
...@@ -942,12 +962,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) ...@@ -942,12 +962,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
/* Destroy the CM ID by returning a non-zero value. */ /* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL; id_priv->cm_id.ib = NULL;
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, CMA_DESTROYING);
cma_release_remove(id_priv); cma_enable_remove(id_priv);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
return ret; return ret;
} }
out: out:
cma_release_remove(id_priv); cma_enable_remove(id_priv);
return ret; return ret;
} }
...@@ -1057,11 +1077,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) ...@@ -1057,11 +1077,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
int offset, ret; int offset, ret;
listen_id = cm_id->context; listen_id = cm_id->context;
atomic_inc(&listen_id->dev_remove); if (cma_disable_remove(listen_id, CMA_LISTEN))
if (!cma_comp(listen_id, CMA_LISTEN)) { return -ECONNABORTED;
ret = -ECONNABORTED;
goto out;
}
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
offset = cma_user_data_offset(listen_id->id.ps); offset = cma_user_data_offset(listen_id->id.ps);
...@@ -1101,11 +1118,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) ...@@ -1101,11 +1118,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
release_conn_id: release_conn_id:
cma_exch(conn_id, CMA_DESTROYING); cma_exch(conn_id, CMA_DESTROYING);
cma_release_remove(conn_id); cma_enable_remove(conn_id);
rdma_destroy_id(&conn_id->id); rdma_destroy_id(&conn_id->id);
out: out:
cma_release_remove(listen_id); cma_enable_remove(listen_id);
return ret; return ret;
} }
...@@ -1171,9 +1188,10 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) ...@@ -1171,9 +1188,10 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
struct sockaddr_in *sin; struct sockaddr_in *sin;
int ret = 0; int ret = 0;
memset(&event, 0, sizeof event); if (cma_disable_remove(id_priv, CMA_CONNECT))
atomic_inc(&id_priv->dev_remove); return 0;
memset(&event, 0, sizeof event);
switch (iw_event->event) { switch (iw_event->event) {
case IW_CM_EVENT_CLOSE: case IW_CM_EVENT_CLOSE:
event.event = RDMA_CM_EVENT_DISCONNECTED; event.event = RDMA_CM_EVENT_DISCONNECTED;
...@@ -1214,12 +1232,12 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) ...@@ -1214,12 +1232,12 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
/* Destroy the CM ID by returning a non-zero value. */ /* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.iw = NULL; id_priv->cm_id.iw = NULL;
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, CMA_DESTROYING);
cma_release_remove(id_priv); cma_enable_remove(id_priv);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
return ret; return ret;
} }
cma_release_remove(id_priv); cma_enable_remove(id_priv);
return ret; return ret;
} }
...@@ -1234,11 +1252,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, ...@@ -1234,11 +1252,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
int ret; int ret;
listen_id = cm_id->context; listen_id = cm_id->context;
atomic_inc(&listen_id->dev_remove); if (cma_disable_remove(listen_id, CMA_LISTEN))
if (!cma_comp(listen_id, CMA_LISTEN)) { return -ECONNABORTED;
ret = -ECONNABORTED;
goto out;
}
/* Create a new RDMA id for the new IW CM ID */ /* Create a new RDMA id for the new IW CM ID */
new_cm_id = rdma_create_id(listen_id->id.event_handler, new_cm_id = rdma_create_id(listen_id->id.event_handler,
...@@ -1255,13 +1270,13 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, ...@@ -1255,13 +1270,13 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr); dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);
if (!dev) { if (!dev) {
ret = -EADDRNOTAVAIL; ret = -EADDRNOTAVAIL;
cma_release_remove(conn_id); cma_enable_remove(conn_id);
rdma_destroy_id(new_cm_id); rdma_destroy_id(new_cm_id);
goto out; goto out;
} }
ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
if (ret) { if (ret) {
cma_release_remove(conn_id); cma_enable_remove(conn_id);
rdma_destroy_id(new_cm_id); rdma_destroy_id(new_cm_id);
goto out; goto out;
} }
...@@ -1270,7 +1285,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, ...@@ -1270,7 +1285,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
ret = cma_acquire_dev(conn_id); ret = cma_acquire_dev(conn_id);
mutex_unlock(&lock); mutex_unlock(&lock);
if (ret) { if (ret) {
cma_release_remove(conn_id); cma_enable_remove(conn_id);
rdma_destroy_id(new_cm_id); rdma_destroy_id(new_cm_id);
goto out; goto out;
} }
...@@ -1293,14 +1308,14 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, ...@@ -1293,14 +1308,14 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
/* User wants to destroy the CM ID */ /* User wants to destroy the CM ID */
conn_id->cm_id.iw = NULL; conn_id->cm_id.iw = NULL;
cma_exch(conn_id, CMA_DESTROYING); cma_exch(conn_id, CMA_DESTROYING);
cma_release_remove(conn_id); cma_enable_remove(conn_id);
rdma_destroy_id(&conn_id->id); rdma_destroy_id(&conn_id->id);
} }
out: out:
if (dev) if (dev)
dev_put(dev); dev_put(dev);
cma_release_remove(listen_id); cma_enable_remove(listen_id);
return ret; return ret;
} }
...@@ -1519,7 +1534,7 @@ static void cma_work_handler(struct work_struct *_work) ...@@ -1519,7 +1534,7 @@ static void cma_work_handler(struct work_struct *_work)
destroy = 1; destroy = 1;
} }
out: out:
cma_release_remove(id_priv); cma_enable_remove(id_priv);
cma_deref_id(id_priv); cma_deref_id(id_priv);
if (destroy) if (destroy)
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
...@@ -1711,13 +1726,13 @@ static void addr_handler(int status, struct sockaddr *src_addr, ...@@ -1711,13 +1726,13 @@ static void addr_handler(int status, struct sockaddr *src_addr,
if (id_priv->id.event_handler(&id_priv->id, &event)) { if (id_priv->id.event_handler(&id_priv->id, &event)) {
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, CMA_DESTROYING);
cma_release_remove(id_priv); cma_enable_remove(id_priv);
cma_deref_id(id_priv); cma_deref_id(id_priv);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
return; return;
} }
out: out:
cma_release_remove(id_priv); cma_enable_remove(id_priv);
cma_deref_id(id_priv); cma_deref_id(id_priv);
} }
...@@ -2042,11 +2057,10 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, ...@@ -2042,11 +2057,10 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
int ret = 0; int ret = 0;
memset(&event, 0, sizeof event); if (cma_disable_remove(id_priv, CMA_CONNECT))
atomic_inc(&id_priv->dev_remove); return 0;
if (!cma_comp(id_priv, CMA_CONNECT))
goto out;
memset(&event, 0, sizeof event);
switch (ib_event->event) { switch (ib_event->event) {
case IB_CM_SIDR_REQ_ERROR: case IB_CM_SIDR_REQ_ERROR:
event.event = RDMA_CM_EVENT_UNREACHABLE; event.event = RDMA_CM_EVENT_UNREACHABLE;
...@@ -2084,12 +2098,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, ...@@ -2084,12 +2098,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
/* Destroy the CM ID by returning a non-zero value. */ /* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL; id_priv->cm_id.ib = NULL;
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, CMA_DESTROYING);
cma_release_remove(id_priv); cma_enable_remove(id_priv);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
return ret; return ret;
} }
out: out:
cma_release_remove(id_priv); cma_enable_remove(id_priv);
return ret; return ret;
} }
...@@ -2413,7 +2427,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) ...@@ -2413,7 +2427,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp(id_priv, CMA_CONNECT)) if (!cma_has_cm_dev(id_priv))
return -EINVAL; return -EINVAL;
switch (id->device->node_type) { switch (id->device->node_type) {
...@@ -2435,7 +2449,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, ...@@ -2435,7 +2449,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp(id_priv, CMA_CONNECT)) if (!cma_has_cm_dev(id_priv))
return -EINVAL; return -EINVAL;
switch (rdma_node_get_transport(id->device->node_type)) { switch (rdma_node_get_transport(id->device->node_type)) {
...@@ -2466,8 +2480,7 @@ int rdma_disconnect(struct rdma_cm_id *id) ...@@ -2466,8 +2480,7 @@ int rdma_disconnect(struct rdma_cm_id *id)
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp(id_priv, CMA_CONNECT) && if (!cma_has_cm_dev(id_priv))
!cma_comp(id_priv, CMA_DISCONNECT))
return -EINVAL; return -EINVAL;
switch (rdma_node_get_transport(id->device->node_type)) { switch (rdma_node_get_transport(id->device->node_type)) {
...@@ -2499,10 +2512,9 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) ...@@ -2499,10 +2512,9 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
int ret; int ret;
id_priv = mc->id_priv; id_priv = mc->id_priv;
atomic_inc(&id_priv->dev_remove); if (cma_disable_remove(id_priv, CMA_ADDR_BOUND) &&
if (!cma_comp(id_priv, CMA_ADDR_BOUND) && cma_disable_remove(id_priv, CMA_ADDR_RESOLVED))
!cma_comp(id_priv, CMA_ADDR_RESOLVED)) return 0;
goto out;
if (!status && id_priv->id.qp) if (!status && id_priv->id.qp)
status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
...@@ -2524,12 +2536,12 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) ...@@ -2524,12 +2536,12 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
ret = id_priv->id.event_handler(&id_priv->id, &event); ret = id_priv->id.event_handler(&id_priv->id, &event);
if (ret) { if (ret) {
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, CMA_DESTROYING);
cma_release_remove(id_priv); cma_enable_remove(id_priv);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
return 0; return 0;
} }
out:
cma_release_remove(id_priv); cma_enable_remove(id_priv);
return 0; return 0;
} }
......
...@@ -277,6 +277,7 @@ void ehca_cleanup_mrmw_cache(void); ...@@ -277,6 +277,7 @@ void ehca_cleanup_mrmw_cache(void);
extern spinlock_t ehca_qp_idr_lock; extern spinlock_t ehca_qp_idr_lock;
extern spinlock_t ehca_cq_idr_lock; extern spinlock_t ehca_cq_idr_lock;
extern spinlock_t hcall_lock;
extern struct idr ehca_qp_idr; extern struct idr ehca_qp_idr;
extern struct idr ehca_cq_idr; extern struct idr ehca_cq_idr;
......
...@@ -517,12 +517,11 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) ...@@ -517,12 +517,11 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
else { else {
struct ehca_cq *cq = eq->eqe_cache[i].cq; struct ehca_cq *cq = eq->eqe_cache[i].cq;
comp_event_callback(cq); comp_event_callback(cq);
spin_lock_irqsave(&ehca_cq_idr_lock, flags); spin_lock(&ehca_cq_idr_lock);
cq->nr_events--; cq->nr_events--;
if (!cq->nr_events) if (!cq->nr_events)
wake_up(&cq->wait_completion); wake_up(&cq->wait_completion);
spin_unlock_irqrestore(&ehca_cq_idr_lock, spin_unlock(&ehca_cq_idr_lock);
flags);
} }
} else { } else {
ehca_dbg(&shca->ib_device, "Got non completion event"); ehca_dbg(&shca->ib_device, "Got non completion event");
...@@ -711,6 +710,7 @@ static void destroy_comp_task(struct ehca_comp_pool *pool, ...@@ -711,6 +710,7 @@ static void destroy_comp_task(struct ehca_comp_pool *pool,
kthread_stop(task); kthread_stop(task);
} }
#ifdef CONFIG_HOTPLUG_CPU
static void take_over_work(struct ehca_comp_pool *pool, static void take_over_work(struct ehca_comp_pool *pool,
int cpu) int cpu)
{ {
...@@ -735,7 +735,6 @@ static void take_over_work(struct ehca_comp_pool *pool, ...@@ -735,7 +735,6 @@ static void take_over_work(struct ehca_comp_pool *pool,
} }
#ifdef CONFIG_HOTPLUG_CPU
static int comp_pool_callback(struct notifier_block *nfb, static int comp_pool_callback(struct notifier_block *nfb,
unsigned long action, unsigned long action,
void *hcpu) void *hcpu)
......
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
MODULE_VERSION("SVNEHCA_0022"); MODULE_VERSION("SVNEHCA_0023");
int ehca_open_aqp1 = 0; int ehca_open_aqp1 = 0;
int ehca_debug_level = 0; int ehca_debug_level = 0;
...@@ -62,7 +62,7 @@ int ehca_use_hp_mr = 0; ...@@ -62,7 +62,7 @@ int ehca_use_hp_mr = 0;
int ehca_port_act_time = 30; int ehca_port_act_time = 30;
int ehca_poll_all_eqs = 1; int ehca_poll_all_eqs = 1;
int ehca_static_rate = -1; int ehca_static_rate = -1;
int ehca_scaling_code = 1; int ehca_scaling_code = 0;
module_param_named(open_aqp1, ehca_open_aqp1, int, 0); module_param_named(open_aqp1, ehca_open_aqp1, int, 0);
module_param_named(debug_level, ehca_debug_level, int, 0); module_param_named(debug_level, ehca_debug_level, int, 0);
...@@ -98,6 +98,7 @@ MODULE_PARM_DESC(scaling_code, ...@@ -98,6 +98,7 @@ MODULE_PARM_DESC(scaling_code,
spinlock_t ehca_qp_idr_lock; spinlock_t ehca_qp_idr_lock;
spinlock_t ehca_cq_idr_lock; spinlock_t ehca_cq_idr_lock;
spinlock_t hcall_lock;
DEFINE_IDR(ehca_qp_idr); DEFINE_IDR(ehca_qp_idr);
DEFINE_IDR(ehca_cq_idr); DEFINE_IDR(ehca_cq_idr);
...@@ -453,15 +454,14 @@ static ssize_t ehca_store_debug_level(struct device_driver *ddp, ...@@ -453,15 +454,14 @@ static ssize_t ehca_store_debug_level(struct device_driver *ddp,
DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR, DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
ehca_show_debug_level, ehca_store_debug_level); ehca_show_debug_level, ehca_store_debug_level);
void ehca_create_driver_sysfs(struct ibmebus_driver *drv) static struct attribute *ehca_drv_attrs[] = {
{ &driver_attr_debug_level.attr,
driver_create_file(&drv->driver, &driver_attr_debug_level); NULL
} };
void ehca_remove_driver_sysfs(struct ibmebus_driver *drv) static struct attribute_group ehca_drv_attr_grp = {
{ .attrs = ehca_drv_attrs
driver_remove_file(&drv->driver, &driver_attr_debug_level); };
}
#define EHCA_RESOURCE_ATTR(name) \ #define EHCA_RESOURCE_ATTR(name) \
static ssize_t ehca_show_##name(struct device *dev, \ static ssize_t ehca_show_##name(struct device *dev, \
...@@ -523,44 +523,28 @@ static ssize_t ehca_show_adapter_handle(struct device *dev, ...@@ -523,44 +523,28 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
} }
static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
static struct attribute *ehca_dev_attrs[] = {
&dev_attr_adapter_handle.attr,
&dev_attr_num_ports.attr,
&dev_attr_hw_ver.attr,
&dev_attr_max_eq.attr,
&dev_attr_cur_eq.attr,
&dev_attr_max_cq.attr,
&dev_attr_cur_cq.attr,
&dev_attr_max_qp.attr,
&dev_attr_cur_qp.attr,
&dev_attr_max_mr.attr,
&dev_attr_cur_mr.attr,
&dev_attr_max_mw.attr,
&dev_attr_cur_mw.attr,
&dev_attr_max_pd.attr,
&dev_attr_max_ah.attr,
NULL
};
void ehca_create_device_sysfs(struct ibmebus_dev *dev) static struct attribute_group ehca_dev_attr_grp = {
{ .attrs = ehca_dev_attrs
device_create_file(&dev->ofdev.dev, &dev_attr_adapter_handle); };
device_create_file(&dev->ofdev.dev, &dev_attr_num_ports);
device_create_file(&dev->ofdev.dev, &dev_attr_hw_ver);
device_create_file(&dev->ofdev.dev, &dev_attr_max_eq);
device_create_file(&dev->ofdev.dev, &dev_attr_cur_eq);
device_create_file(&dev->ofdev.dev, &dev_attr_max_cq);
device_create_file(&dev->ofdev.dev, &dev_attr_cur_cq);
device_create_file(&dev->ofdev.dev, &dev_attr_max_qp);
device_create_file(&dev->ofdev.dev, &dev_attr_cur_qp);
device_create_file(&dev->ofdev.dev, &dev_attr_max_mr);
device_create_file(&dev->ofdev.dev, &dev_attr_cur_mr);
device_create_file(&dev->ofdev.dev, &dev_attr_max_mw);
device_create_file(&dev->ofdev.dev, &dev_attr_cur_mw);
device_create_file(&dev->ofdev.dev, &dev_attr_max_pd);
device_create_file(&dev->ofdev.dev, &dev_attr_max_ah);
}
void ehca_remove_device_sysfs(struct ibmebus_dev *dev)
{
device_remove_file(&dev->ofdev.dev, &dev_attr_adapter_handle);
device_remove_file(&dev->ofdev.dev, &dev_attr_num_ports);
device_remove_file(&dev->ofdev.dev, &dev_attr_hw_ver);
device_remove_file(&dev->ofdev.dev, &dev_attr_max_eq);
device_remove_file(&dev->ofdev.dev, &dev_attr_cur_eq);
device_remove_file(&dev->ofdev.dev, &dev_attr_max_cq);
device_remove_file(&dev->ofdev.dev, &dev_attr_cur_cq);
device_remove_file(&dev->ofdev.dev, &dev_attr_max_qp);
device_remove_file(&dev->ofdev.dev, &dev_attr_cur_qp);
device_remove_file(&dev->ofdev.dev, &dev_attr_max_mr);
device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mr);
device_remove_file(&dev->ofdev.dev, &dev_attr_max_mw);
device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mw);
device_remove_file(&dev->ofdev.dev, &dev_attr_max_pd);
device_remove_file(&dev->ofdev.dev, &dev_attr_max_ah);
}
static int __devinit ehca_probe(struct ibmebus_dev *dev, static int __devinit ehca_probe(struct ibmebus_dev *dev,
const struct of_device_id *id) const struct of_device_id *id)
...@@ -668,7 +652,10 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev, ...@@ -668,7 +652,10 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
} }
} }
ehca_create_device_sysfs(dev); ret = sysfs_create_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp);
if (ret) /* only complain; we can live without attributes */
ehca_err(&shca->ib_device,
"Cannot create device attributes ret=%d", ret);
spin_lock(&shca_list_lock); spin_lock(&shca_list_lock);
list_add(&shca->shca_list, &shca_list); list_add(&shca->shca_list, &shca_list);
...@@ -720,7 +707,7 @@ static int __devexit ehca_remove(struct ibmebus_dev *dev) ...@@ -720,7 +707,7 @@ static int __devexit ehca_remove(struct ibmebus_dev *dev)
struct ehca_shca *shca = dev->ofdev.dev.driver_data; struct ehca_shca *shca = dev->ofdev.dev.driver_data;
int ret; int ret;
ehca_remove_device_sysfs(dev); sysfs_remove_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp);
if (ehca_open_aqp1 == 1) { if (ehca_open_aqp1 == 1) {
int i; int i;
...@@ -812,11 +799,12 @@ int __init ehca_module_init(void) ...@@ -812,11 +799,12 @@ int __init ehca_module_init(void)
int ret; int ret;
printk(KERN_INFO "eHCA Infiniband Device Driver " printk(KERN_INFO "eHCA Infiniband Device Driver "
"(Rel.: SVNEHCA_0022)\n"); "(Rel.: SVNEHCA_0023)\n");
idr_init(&ehca_qp_idr); idr_init(&ehca_qp_idr);
idr_init(&ehca_cq_idr); idr_init(&ehca_cq_idr);
spin_lock_init(&ehca_qp_idr_lock); spin_lock_init(&ehca_qp_idr_lock);
spin_lock_init(&ehca_cq_idr_lock); spin_lock_init(&ehca_cq_idr_lock);
spin_lock_init(&hcall_lock);
INIT_LIST_HEAD(&shca_list); INIT_LIST_HEAD(&shca_list);
spin_lock_init(&shca_list_lock); spin_lock_init(&shca_list_lock);
...@@ -838,7 +826,9 @@ int __init ehca_module_init(void) ...@@ -838,7 +826,9 @@ int __init ehca_module_init(void)
goto module_init2; goto module_init2;
} }
ehca_create_driver_sysfs(&ehca_driver); ret = sysfs_create_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
if (ret) /* only complain; we can live without attributes */
ehca_gen_err("Cannot create driver attributes ret=%d", ret);
if (ehca_poll_all_eqs != 1) { if (ehca_poll_all_eqs != 1) {
ehca_gen_err("WARNING!!!"); ehca_gen_err("WARNING!!!");
...@@ -865,7 +855,7 @@ void __exit ehca_module_exit(void) ...@@ -865,7 +855,7 @@ void __exit ehca_module_exit(void)
if (ehca_poll_all_eqs == 1) if (ehca_poll_all_eqs == 1)
del_timer_sync(&poll_eqs_timer); del_timer_sync(&poll_eqs_timer);
ehca_remove_driver_sysfs(&ehca_driver); sysfs_remove_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
ibmebus_unregister_driver(&ehca_driver); ibmebus_unregister_driver(&ehca_driver);
ehca_destroy_slab_caches(); ehca_destroy_slab_caches();
......
...@@ -523,6 +523,8 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd, ...@@ -523,6 +523,8 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
goto create_qp_exit1; goto create_qp_exit1;
} }
my_qp->ib_qp.qp_num = my_qp->real_qp_num;
switch (init_attr->qp_type) { switch (init_attr->qp_type) {
case IB_QPT_RC: case IB_QPT_RC:
if (isdaqp == 0) { if (isdaqp == 0) {
...@@ -568,7 +570,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd, ...@@ -568,7 +570,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr; parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr;
parms.act_nr_send_sges = init_attr->cap.max_send_sge; parms.act_nr_send_sges = init_attr->cap.max_send_sge;
parms.act_nr_recv_sges = init_attr->cap.max_recv_sge; parms.act_nr_recv_sges = init_attr->cap.max_recv_sge;
my_qp->real_qp_num = my_qp->ib_qp.qp_num =
(init_attr->qp_type == IB_QPT_SMI) ? 0 : 1; (init_attr->qp_type == IB_QPT_SMI) ? 0 : 1;
} }
...@@ -595,7 +597,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd, ...@@ -595,7 +597,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
my_qp->ib_qp.recv_cq = init_attr->recv_cq; my_qp->ib_qp.recv_cq = init_attr->recv_cq;
my_qp->ib_qp.send_cq = init_attr->send_cq; my_qp->ib_qp.send_cq = init_attr->send_cq;
my_qp->ib_qp.qp_num = my_qp->real_qp_num;
my_qp->ib_qp.qp_type = init_attr->qp_type; my_qp->ib_qp.qp_type = init_attr->qp_type;
my_qp->qp_type = init_attr->qp_type; my_qp->qp_type = init_attr->qp_type;
...@@ -968,17 +969,21 @@ static int internal_modify_qp(struct ib_qp *ibqp, ...@@ -968,17 +969,21 @@ static int internal_modify_qp(struct ib_qp *ibqp,
((ehca_mult - 1) / ah_mult) : 0; ((ehca_mult - 1) / ah_mult) : 0;
else else
mqpcb->max_static_rate = 0; mqpcb->max_static_rate = 0;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1); update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
/*
* Always supply the GRH flag, even if it's zero, to give the
* hypervisor a clear "yes" or "no" instead of a "perhaps"
*/
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
/* /*
* only if GRH is TRUE we might consider SOURCE_GID_IDX * only if GRH is TRUE we might consider SOURCE_GID_IDX
* and DEST_GID otherwise phype will return H_ATTR_PARM!!! * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
*/ */
if (attr->ah_attr.ah_flags == IB_AH_GRH) { if (attr->ah_attr.ah_flags == IB_AH_GRH) {
mqpcb->send_grh_flag = 1 << 31; mqpcb->send_grh_flag = 1;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index; mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
update_mask |= update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1); EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
......
...@@ -154,7 +154,8 @@ static long ehca_plpar_hcall9(unsigned long opcode, ...@@ -154,7 +154,8 @@ static long ehca_plpar_hcall9(unsigned long opcode,
unsigned long arg9) unsigned long arg9)
{ {
long ret; long ret;
int i, sleep_msecs; int i, sleep_msecs, lock_is_set = 0;
unsigned long flags;
ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx " ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
"arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx", "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
...@@ -162,10 +163,18 @@ static long ehca_plpar_hcall9(unsigned long opcode, ...@@ -162,10 +163,18 @@ static long ehca_plpar_hcall9(unsigned long opcode,
arg8, arg9); arg8, arg9);
for (i = 0; i < 5; i++) { for (i = 0; i < 5; i++) {
if ((opcode == H_ALLOC_RESOURCE) && (arg2 == 5)) {
spin_lock_irqsave(&hcall_lock, flags);
lock_is_set = 1;
}
ret = plpar_hcall9(opcode, outs, ret = plpar_hcall9(opcode, outs,
arg1, arg2, arg3, arg4, arg5, arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9); arg6, arg7, arg8, arg9);
if (lock_is_set)
spin_unlock_irqrestore(&hcall_lock, flags);
if (H_IS_LONG_BUSY(ret)) { if (H_IS_LONG_BUSY(ret)) {
sleep_msecs = get_longbusy_msecs(ret); sleep_msecs = get_longbusy_msecs(ret);
msleep_interruptible(sleep_msecs); msleep_interruptible(sleep_msecs);
...@@ -193,11 +202,11 @@ static long ehca_plpar_hcall9(unsigned long opcode, ...@@ -193,11 +202,11 @@ static long ehca_plpar_hcall9(unsigned long opcode,
opcode, ret, outs[0], outs[1], outs[2], outs[3], opcode, ret, outs[0], outs[1], outs[2], outs[3],
outs[4], outs[5], outs[6], outs[7], outs[8]); outs[4], outs[5], outs[6], outs[7], outs[8]);
return ret; return ret;
} }
return H_BUSY; return H_BUSY;
} }
u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle, u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
struct ehca_pfeq *pfeq, struct ehca_pfeq *pfeq,
const u32 neq_control, const u32 neq_control,
......
...@@ -747,7 +747,6 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) ...@@ -747,7 +747,6 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
static int ipath_pe_intconfig(struct ipath_devdata *dd) static int ipath_pe_intconfig(struct ipath_devdata *dd)
{ {
u64 val;
u32 chiprev; u32 chiprev;
/* /*
...@@ -760,9 +759,9 @@ static int ipath_pe_intconfig(struct ipath_devdata *dd) ...@@ -760,9 +759,9 @@ static int ipath_pe_intconfig(struct ipath_devdata *dd)
if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) { if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) {
/* Rev2+ reports extra errors via internal GPIO pins */ /* Rev2+ reports extra errors via internal GPIO pins */
dd->ipath_flags |= IPATH_GPIO_ERRINTRS; dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK;
val |= IPATH_GPIO_ERRINTR_MASK; ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); dd->ipath_gpio_mask);
} }
return 0; return 0;
} }
......
...@@ -1056,7 +1056,7 @@ irqreturn_t ipath_intr(int irq, void *data) ...@@ -1056,7 +1056,7 @@ irqreturn_t ipath_intr(int irq, void *data)
gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT); gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT);
chk0rcv = 1; chk0rcv = 1;
} }
if (unlikely(gpiostatus)) { if (gpiostatus) {
/* /*
* Some unexpected bits remain. If they could have * Some unexpected bits remain. If they could have
* caused the interrupt, complain and clear. * caused the interrupt, complain and clear.
...@@ -1065,9 +1065,8 @@ irqreturn_t ipath_intr(int irq, void *data) ...@@ -1065,9 +1065,8 @@ irqreturn_t ipath_intr(int irq, void *data)
* GPIO interrupts, possibly on a "three strikes" * GPIO interrupts, possibly on a "three strikes"
* basis. * basis.
*/ */
u32 mask; const u32 mask = (u32) dd->ipath_gpio_mask;
mask = ipath_read_kreg32(
dd, dd->ipath_kregs->kr_gpio_mask);
if (mask & gpiostatus) { if (mask & gpiostatus) {
ipath_dbg("Unexpected GPIO IRQ bits %x\n", ipath_dbg("Unexpected GPIO IRQ bits %x\n",
gpiostatus & mask); gpiostatus & mask);
......
...@@ -397,6 +397,8 @@ struct ipath_devdata { ...@@ -397,6 +397,8 @@ struct ipath_devdata {
unsigned long ipath_pioavailshadow[8]; unsigned long ipath_pioavailshadow[8];
/* shadow of kr_gpio_out, for rmw ops */ /* shadow of kr_gpio_out, for rmw ops */
u64 ipath_gpio_out; u64 ipath_gpio_out;
/* shadow the gpio mask register */
u64 ipath_gpio_mask;
/* kr_revision shadow */ /* kr_revision shadow */
u64 ipath_revision; u64 ipath_revision;
/* /*
......
...@@ -1387,13 +1387,12 @@ static int enable_timer(struct ipath_devdata *dd) ...@@ -1387,13 +1387,12 @@ static int enable_timer(struct ipath_devdata *dd)
* processing. * processing.
*/ */
if (dd->ipath_flags & IPATH_GPIO_INTR) { if (dd->ipath_flags & IPATH_GPIO_INTR) {
u64 val;
ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
0x2074076542310ULL); 0x2074076542310ULL);
/* Enable GPIO bit 2 interrupt */ /* Enable GPIO bit 2 interrupt */
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
val |= (u64) (1 << IPATH_GPIO_PORT0_BIT); ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); dd->ipath_gpio_mask);
} }
init_timer(&dd->verbs_timer); init_timer(&dd->verbs_timer);
...@@ -1412,8 +1411,9 @@ static int disable_timer(struct ipath_devdata *dd) ...@@ -1412,8 +1411,9 @@ static int disable_timer(struct ipath_devdata *dd)
u64 val; u64 val;
/* Disable GPIO bit 2 interrupt */ /* Disable GPIO bit 2 interrupt */
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask);
val &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT)); dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
dd->ipath_gpio_mask);
/* /*
* We might want to undo changes to debugportselect, * We might want to undo changes to debugportselect,
* but how? * but how?
......
...@@ -489,6 +489,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -489,6 +489,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!ibdev->uar_map) if (!ibdev->uar_map)
goto err_uar; goto err_uar;
MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
INIT_LIST_HEAD(&ibdev->pgdir_list); INIT_LIST_HEAD(&ibdev->pgdir_list);
mutex_init(&ibdev->pgdir_mutex); mutex_init(&ibdev->pgdir_mutex);
......
...@@ -284,7 +284,7 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, ...@@ -284,7 +284,7 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
{ {
struct mthca_cqe *cqe; struct mthca_cqe *cqe;
u32 prod_index; u32 prod_index;
int nfreed = 0; int i, nfreed = 0;
spin_lock_irq(&cq->lock); spin_lock_irq(&cq->lock);
...@@ -321,6 +321,8 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, ...@@ -321,6 +321,8 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
} }
if (nfreed) { if (nfreed) {
for (i = 0; i < nfreed; ++i)
set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe));
wmb(); wmb();
cq->cons_index += nfreed; cq->cons_index += nfreed;
update_cons_index(dev, cq, nfreed); update_cons_index(dev, cq, nfreed);
......
...@@ -1862,6 +1862,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -1862,6 +1862,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
dev->kar + MTHCA_RECEIVE_DOORBELL, dev->kar + MTHCA_RECEIVE_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
qp->rq.next_ind = ind;
qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
size0 = 0; size0 = 0;
} }
......
...@@ -257,10 +257,11 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even ...@@ -257,10 +257,11 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
cm_id->context = p; cm_id->context = p;
p->jiffies = jiffies; p->jiffies = jiffies;
spin_lock_irq(&priv->lock); spin_lock_irq(&priv->lock);
list_add(&p->list, &priv->cm.passive_ids); if (list_empty(&priv->cm.passive_ids))
spin_unlock_irq(&priv->lock);
queue_delayed_work(ipoib_workqueue, queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY); &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
list_add(&p->list, &priv->cm.passive_ids);
spin_unlock_irq(&priv->lock);
return 0; return 0;
err_rep: err_rep:
...@@ -378,8 +379,6 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -378,8 +379,6 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (!list_empty(&p->list)) if (!list_empty(&p->list))
list_move(&p->list, &priv->cm.passive_ids); list_move(&p->list, &priv->cm.passive_ids);
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
} }
} }
...@@ -1100,6 +1099,10 @@ static void ipoib_cm_stale_task(struct work_struct *work) ...@@ -1100,6 +1099,10 @@ static void ipoib_cm_stale_task(struct work_struct *work)
kfree(p); kfree(p);
spin_lock_irq(&priv->lock); spin_lock_irq(&priv->lock);
} }
if (!list_empty(&priv->cm.passive_ids))
queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
spin_unlock_irq(&priv->lock); spin_unlock_irq(&priv->lock);
} }
......
...@@ -2508,6 +2508,7 @@ config MLX4_CORE ...@@ -2508,6 +2508,7 @@ config MLX4_CORE
config MLX4_DEBUG config MLX4_DEBUG
bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED) bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED)
depends on MLX4_CORE
default y default y
---help--- ---help---
This option causes debugging code to be compiled into the This option causes debugging code to be compiled into the
......
...@@ -542,8 +542,6 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev) ...@@ -542,8 +542,6 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
int err; int err;
MLX4_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
err = mlx4_init_uar_table(dev); err = mlx4_init_uar_table(dev);
if (err) { if (err) {
mlx4_err(dev, "Failed to initialize " mlx4_err(dev, "Failed to initialize "
......
...@@ -275,7 +275,6 @@ struct mlx4_priv { ...@@ -275,7 +275,6 @@ struct mlx4_priv {
struct mlx4_uar driver_uar; struct mlx4_uar driver_uar;
void __iomem *kar; void __iomem *kar;
MLX4_DECLARE_DOORBELL_LOCK(doorbell_lock)
u32 rev_id; u32 rev_id;
char board_id[MLX4_BOARD_ID_LEN]; char board_id[MLX4_BOARD_ID_LEN];
......
This diff is collapsed.
...@@ -85,8 +85,8 @@ __reload_thread(struct pcb_struct *pcb) ...@@ -85,8 +85,8 @@ __reload_thread(struct pcb_struct *pcb)
* +-------------+----------------+--------------+ * +-------------+----------------+--------------+
*/ */
#ifdef CONFIG_SMP
#include <asm/smp.h> #include <asm/smp.h>
#ifdef CONFIG_SMP
#define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn) #define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn)
#else #else
extern unsigned long last_asn; extern unsigned long last_asn;
......
...@@ -37,6 +37,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) ...@@ -37,6 +37,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
} }
#define atomic_sub(i, v) atomic_sub_return(i, v) #define atomic_sub(i, v) atomic_sub_return(i, v)
#define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0)
static __inline__ int atomic_inc_return(atomic_t *v) static __inline__ int atomic_inc_return(atomic_t *v)
{ {
......
...@@ -749,9 +749,13 @@ extern unsigned long boot_option_idle_override; ...@@ -749,9 +749,13 @@ extern unsigned long boot_option_idle_override;
extern void enable_sep_cpu(void); extern void enable_sep_cpu(void);
extern int sysenter_setup(void); extern int sysenter_setup(void);
/* Defined in head.S */
extern struct Xgt_desc_struct early_gdt_descr;
extern void cpu_set_gdt(int); extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(void); extern void switch_to_new_gdt(void);
extern void cpu_init(void); extern void cpu_init(void);
extern void init_gdt(int cpu);
extern int force_mwait; extern int force_mwait;
......
...@@ -361,7 +361,9 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -361,7 +361,9 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
long strncpy_from_user(char *dst, const char __user *src, long count); long strncpy_from_user(char *dst, const char __user *src, long count);
long strnlen_user(const char __user *src, long n); long strnlen_user(const char __user *src, long n);
unsigned long clear_user(void __user *to, unsigned long n); unsigned long __clear_user(void __user *to, unsigned long n);
#define clear_user __clear_user
#define strlen_user(str) strnlen_user(str, 32767) #define strlen_user(str) strnlen_user(str, 32767)
......
...@@ -52,14 +52,9 @@ ...@@ -52,14 +52,9 @@
#endif #endif
/* For assembly routines */ /* For assembly routines */
#ifdef CONFIG_HOTPLUG_CPU
#define __INIT .section ".text","ax"
#define __INITDATA .section ".data","aw"
#else
#define __INIT .section ".init.text","ax" #define __INIT .section ".init.text","ax"
#define __INITDATA .section ".init.data","aw"
#endif
#define __FINIT .previous #define __FINIT .previous
#define __INITDATA .section ".init.data","aw"
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* /*
......
...@@ -27,8 +27,16 @@ struct device; ...@@ -27,8 +27,16 @@ struct device;
void __iowrite32_copy(void __iomem *to, const void *from, size_t count); void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
void __iowrite64_copy(void __iomem *to, const void *from, size_t count); void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
#ifdef CONFIG_MMU
int ioremap_page_range(unsigned long addr, unsigned long end, int ioremap_page_range(unsigned long addr, unsigned long end,
unsigned long phys_addr, pgprot_t prot); unsigned long phys_addr, pgprot_t prot);
#else
static inline int ioremap_page_range(unsigned long addr, unsigned long end,
unsigned long phys_addr, pgprot_t prot)
{
return 0;
}
#endif
/* /*
* Managed iomap interface * Managed iomap interface
......
...@@ -1288,6 +1288,7 @@ ...@@ -1288,6 +1288,7 @@
#define PCI_DEVICE_ID_VIA_8363_0 0x0305 #define PCI_DEVICE_ID_VIA_8363_0 0x0305
#define PCI_DEVICE_ID_VIA_P4M800CE 0x0314 #define PCI_DEVICE_ID_VIA_P4M800CE 0x0314
#define PCI_DEVICE_ID_VIA_P4M890 0x0327 #define PCI_DEVICE_ID_VIA_P4M890 0x0327
#define PCI_DEVICE_ID_VIA_VT3324 0x0324
#define PCI_DEVICE_ID_VIA_VT3336 0x0336 #define PCI_DEVICE_ID_VIA_VT3336 0x0336
#define PCI_DEVICE_ID_VIA_8371_0 0x0391 #define PCI_DEVICE_ID_VIA_8371_0 0x0391
#define PCI_DEVICE_ID_VIA_8501_0 0x0501 #define PCI_DEVICE_ID_VIA_8501_0 0x0501
......
...@@ -60,7 +60,8 @@ struct kmem_cache { ...@@ -60,7 +60,8 @@ struct kmem_cache {
#define KMALLOC_SHIFT_LOW 3 #define KMALLOC_SHIFT_LOW 3
#ifdef CONFIG_LARGE_ALLOCS #ifdef CONFIG_LARGE_ALLOCS
#define KMALLOC_SHIFT_HIGH 25 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) =< 25 ? \
(MAX_ORDER + PAGE_SHIFT - 1) : 25)
#else #else
#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256 #if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256
#define KMALLOC_SHIFT_HIGH 20 #define KMALLOC_SHIFT_HIGH 20
...@@ -87,6 +88,9 @@ static inline int kmalloc_index(int size) ...@@ -87,6 +88,9 @@ static inline int kmalloc_index(int size)
*/ */
WARN_ON_ONCE(size == 0); WARN_ON_ONCE(size == 0);
if (size >= (1 << KMALLOC_SHIFT_HIGH))
return -1;
if (size > 64 && size <= 96) if (size > 64 && size <= 96)
return 1; return 1;
if (size > 128 && size <= 192) if (size > 128 && size <= 192)
......
...@@ -74,7 +74,7 @@ static struct clocksource *watchdog; ...@@ -74,7 +74,7 @@ static struct clocksource *watchdog;
static struct timer_list watchdog_timer; static struct timer_list watchdog_timer;
static DEFINE_SPINLOCK(watchdog_lock); static DEFINE_SPINLOCK(watchdog_lock);
static cycle_t watchdog_last; static cycle_t watchdog_last;
static int watchdog_resumed; static unsigned long watchdog_resumed;
/* /*
* Interval: 0.5sec Threshold: 0.0625s * Interval: 0.5sec Threshold: 0.0625s
...@@ -104,9 +104,7 @@ static void clocksource_watchdog(unsigned long data) ...@@ -104,9 +104,7 @@ static void clocksource_watchdog(unsigned long data)
spin_lock(&watchdog_lock); spin_lock(&watchdog_lock);
resumed = watchdog_resumed; resumed = test_and_clear_bit(0, &watchdog_resumed);
if (unlikely(resumed))
watchdog_resumed = 0;
wdnow = watchdog->read(); wdnow = watchdog->read();
wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
...@@ -151,9 +149,7 @@ static void clocksource_watchdog(unsigned long data) ...@@ -151,9 +149,7 @@ static void clocksource_watchdog(unsigned long data)
} }
static void clocksource_resume_watchdog(void) static void clocksource_resume_watchdog(void)
{ {
spin_lock(&watchdog_lock); set_bit(0, &watchdog_resumed);
watchdog_resumed = 1;
spin_unlock(&watchdog_lock);
} }
static void clocksource_check_watchdog(struct clocksource *cs) static void clocksource_check_watchdog(struct clocksource *cs)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment