Commit 62ae144f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'parisc' of master.kernel.org:/pub/scm/linux/kernel/git/kyle/parisc-2.6

parents bcd039b2 2161558f
......@@ -611,8 +611,7 @@ S: USA
N: Randolph Chung
E: tausq@debian.org
D: Linux/PA-RISC hacker
S: Los Altos, CA 94022
S: USA
S: Hong Kong
N: Juan Jose Ciarlante
W: http://juanjox.kernelnotes.org/
......@@ -3405,6 +3404,15 @@ S: Chudenicka 8
S: 10200 Prague 10, Hostivar
S: Czech Republic
N: Thibaut Varene
E: T-Bone@parisc-linux.org
W: http://www.parisc-linux.org/
P: 1024D/B7D2F063 E67C 0D43 A75E 12A5 BB1C FA2F 1E32 C3DA B7D2 F063
D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits
D: Some bits in an ARM port, S1D13XXX FB driver, random patches here and there
D: AD1889 sound driver
S: Paris, France
N: Heikki Vatiainen
E: hessu@cs.tut.fi
D: Co-author of Multi-Protocol Over ATM (MPOA), some LANE hacks
......
......@@ -499,8 +499,12 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
dev = create_parisc_device(mod_path);
if (dev->id.hw_type != HPHW_FAULTY) {
printk("Two devices have hardware path %s. Please file a bug with HP.\n"
"In the meantime, you could try rearranging your cards.\n", parisc_pathname(dev));
printk(KERN_ERR "Two devices have hardware path [%s]. "
"IODC data for second device: "
"%02x%02x%02x%02x%02x%02x\n"
"Rearranging GSC cards sometimes helps\n",
parisc_pathname(dev), iodc_data[0], iodc_data[1],
iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]);
return NULL;
}
......
......@@ -1846,6 +1846,7 @@ sys_clone_wrapper:
ldo -16(%r30),%r29 /* Reference param save area */
#endif
/* WARNING - Clobbers r19 and r21, userspace must save these! */
STREG %r2,PT_GR19(%r1) /* save for child */
STREG %r30,PT_GR21(%r1)
BL sys_clone,%r2
......
......@@ -188,7 +188,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
temp = pa_pdc_cell.cba;
dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path);
if (!dev) {
return PDC_NE_MOD;
return PDC_OK;
}
/* alloc_pa_dev sets dev->hpa */
......
This diff is collapsed.
......@@ -30,6 +30,9 @@
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/smp.h>
#undef PARISC_IRQ_CR16_COUNTS
......@@ -43,26 +46,34 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
*/
static volatile unsigned long cpu_eiem = 0;
static void cpu_set_eiem(void *info)
{
set_eiem((unsigned long) info);
}
static inline void cpu_disable_irq(unsigned int irq)
static void cpu_disable_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
cpu_eiem &= ~eirr_bit;
on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
/* Do nothing on the other CPUs. If they get this interrupt,
* The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
* handle it, and the set_eiem() at the bottom will ensure it
* then gets disabled */
}
static void cpu_enable_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
cpu_eiem |= eirr_bit;
on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
/* FIXME: while our interrupts aren't nested, we cannot reset
* the eiem mask if we're already in an interrupt. Once we
* implement nested interrupts, this can go away
*/
if (!in_interrupt())
set_eiem(cpu_eiem);
/* This is just a simple NOP IPI. But what it does is cause
* all the other CPUs to do a set_eiem(cpu_eiem) at the end
* of the interrupt handler */
smp_send_all_nop();
}
static unsigned int cpu_startup_irq(unsigned int irq)
......@@ -74,6 +85,35 @@ static unsigned int cpu_startup_irq(unsigned int irq)
void no_ack_irq(unsigned int irq) { }
void no_end_irq(unsigned int irq) { }
#ifdef CONFIG_SMP
int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
{
int cpu_dest;
/* timer and ipi have to always be received on all CPUs */
if (irq == TIMER_IRQ || irq == IPI_IRQ) {
/* Bad linux design decision. The mask has already
* been set; we must reset it */
irq_affinity[irq] = CPU_MASK_ALL;
return -EINVAL;
}
/* whatever mask they set, we just allow one CPU */
cpu_dest = first_cpu(*dest);
*dest = cpumask_of_cpu(cpu_dest);
return 0;
}
static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
{
if (cpu_check_affinity(irq, &dest))
return;
irq_affinity[irq] = dest;
}
#endif
static struct hw_interrupt_type cpu_interrupt_type = {
.typename = "CPU",
.startup = cpu_startup_irq,
......@@ -82,7 +122,9 @@ static struct hw_interrupt_type cpu_interrupt_type = {
.disable = cpu_disable_irq,
.ack = no_ack_irq,
.end = no_end_irq,
// .set_affinity = cpu_set_affinity_irq,
#ifdef CONFIG_SMP
.set_affinity = cpu_set_affinity_irq,
#endif
};
int show_interrupts(struct seq_file *p, void *v)
......@@ -219,6 +261,17 @@ int txn_alloc_irq(unsigned int bits_wide)
return -1;
}
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{
#ifdef CONFIG_SMP
irq_affinity[irq] = cpumask_of_cpu(cpu);
#endif
return cpu_data[cpu].txn_addr;
}
unsigned long txn_alloc_addr(unsigned int virt_irq)
{
static int next_cpu = -1;
......@@ -233,7 +286,7 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
if (next_cpu >= NR_CPUS)
next_cpu = 0; /* nothing else, assign monarch */
return cpu_data[next_cpu].txn_addr;
return txn_affinity_addr(virt_irq, next_cpu);
}
......@@ -250,10 +303,11 @@ void do_cpu_irq_mask(struct pt_regs *regs)
irq_enter();
/*
* Only allow interrupt processing to be interrupted by the
* timer tick
* Don't allow TIMER or IPI nested interrupts.
* Allowing any single interrupt to nest can lead to that CPU
* handling interrupts with all enabled interrupts unmasked.
*/
set_eiem(EIEM_MASK(TIMER_IRQ));
set_eiem(0UL);
/* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
* 2) We loop here on EIRR contents in order to avoid
......@@ -267,23 +321,41 @@ void do_cpu_irq_mask(struct pt_regs *regs)
if (!eirr_val)
break;
if (eirr_val & EIEM_MASK(TIMER_IRQ))
set_eiem(0);
mtctl(eirr_val, 23); /* reset bits we are going to process */
/* Work our way from MSb to LSb...same order we alloc EIRs */
for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
#ifdef CONFIG_SMP
cpumask_t dest = irq_affinity[irq];
#endif
if (!(bit & eirr_val))
continue;
/* clear bit in mask - can exit loop sooner */
eirr_val &= ~bit;
#ifdef CONFIG_SMP
/* FIXME: because generic set affinity mucks
* with the affinity before sending it to us
* we can get the situation where the affinity is
* wrong for our CPU type interrupts */
if (irq != TIMER_IRQ && irq != IPI_IRQ &&
!cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest);
printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
irq, smp_processor_id(), cpu);
gsc_writel(irq + CPU_IRQ_BASE,
cpu_data[cpu].hpa);
continue;
}
#endif
__do_IRQ(irq, regs);
}
}
set_eiem(cpu_eiem);
set_eiem(cpu_eiem); /* restore original mask */
irq_exit();
}
......@@ -291,12 +363,14 @@ void do_cpu_irq_mask(struct pt_regs *regs)
static struct irqaction timer_action = {
.handler = timer_interrupt,
.name = "timer",
.flags = SA_INTERRUPT,
};
#ifdef CONFIG_SMP
static struct irqaction ipi_action = {
.handler = ipi_interrupt,
.name = "IPI",
.flags = SA_INTERRUPT,
};
#endif
......
......@@ -196,8 +196,7 @@ static int perf_open(struct inode *inode, struct file *file);
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos);
static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static void perf_start_counters(void);
static int perf_stop_counters(uint32_t *raddr);
static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
......@@ -438,48 +437,56 @@ static void perf_patch_images(void)
* must be running on the processor that you wish to change.
*/
static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long error_start;
uint32_t raddr[4];
uint32_t raddr[4];
int error = 0;
lock_kernel();
switch (cmd) {
case PA_PERF_ON:
/* Start the counters */
perf_start_counters();
return 0;
break;
case PA_PERF_OFF:
error_start = perf_stop_counters(raddr);
if (error_start != 0) {
printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
return -EFAULT;
error = -EFAULT;
break;
}
/* copy out the Counters */
if (copy_to_user((void __user *)arg, raddr,
sizeof (raddr)) != 0) {
return -EFAULT;
error = -EFAULT;
break;
}
return 0;
break;
case PA_PERF_VERSION:
/* Return the version # */
return put_user(PERF_VERSION, (int *)arg);
error = put_user(PERF_VERSION, (int *)arg);
break;
default:
break;
error = -ENOTTY;
}
return -ENOTTY;
unlock_kernel();
return error;
}
static struct file_operations perf_fops = {
.llseek = no_llseek,
.read = perf_read,
.write = perf_write,
.ioctl = perf_ioctl,
.unlocked_ioctl = perf_ioctl,
.compat_ioctl = perf_ioctl,
.open = perf_open,
.release = perf_release
};
......
......@@ -264,6 +264,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
* sigkill. perhaps it should be put in the status
* that it wants to exit.
*/
ret = 0;
DBG("sys_ptrace(KILL)\n");
if (child->exit_state == EXIT_ZOMBIE) /* already dead */
goto out_tsk;
......@@ -344,11 +345,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_GETEVENTMSG:
ret = put_user(child->ptrace_message, (unsigned int __user *) data);
goto out;
goto out_tsk;
default:
ret = ptrace_request(child, request, addr, data);
goto out;
goto out_tsk;
}
out_wake_notrap:
......
......@@ -296,7 +296,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
struct rt_sigframe __user *frame;
unsigned long rp, usp;
unsigned long haddr, sigframe_size;
struct siginfo si;
int err = 0;
#ifdef __LP64__
compat_int_t compat_val;
......
......@@ -181,12 +181,19 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
while (ops) {
unsigned long which = ffz(~ops);
ops &= ~(1 << which);
switch (which) {
case IPI_NOP:
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_NOP\n",this_cpu);
#endif /* kDEBUG */
break;
case IPI_RESCHEDULE:
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);
#endif /* kDEBUG */
ops &= ~(1 << IPI_RESCHEDULE);
/*
* Reschedule callback. Everything to be
* done is done by the interrupt return path.
......@@ -197,7 +204,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);
#endif /* kDEBUG */
ops &= ~(1 << IPI_CALL_FUNC);
{
volatile struct smp_call_struct *data;
void (*func)(void *info);
......@@ -231,7 +237,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);
#endif /* kDEBUG */
ops &= ~(1 << IPI_CPU_START);
#ifdef ENTRY_SYS_CPUS
p->state = STATE_RUNNING;
#endif
......@@ -241,7 +246,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);
#endif /* kDEBUG */
ops &= ~(1 << IPI_CPU_STOP);
#ifdef ENTRY_SYS_CPUS
#else
halt_processor();
......@@ -252,13 +256,11 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);
#endif /* kDEBUG */
ops &= ~(1 << IPI_CPU_TEST);
break;
default:
printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
this_cpu, which);
ops &= ~(1 << which);
return IRQ_NONE;
} /* Switch */
} /* while (ops) */
......@@ -312,6 +314,12 @@ smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); }
void
smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
void
smp_send_all_nop(void)
{
send_IPI_allbutself(IPI_NOP);
}
/**
* Run a function on all other CPUs.
......@@ -338,6 +346,10 @@ smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
/* can also deadlock if IPIs are disabled */
WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0);
data.func = func;
data.info = info;
......
......@@ -164,7 +164,7 @@ linux_gateway_entry:
#endif
STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */
STREG %r20, TASK_PT_GR20(%r1)
STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */
STREG %r21, TASK_PT_GR21(%r1)
STREG %r22, TASK_PT_GR22(%r1)
STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
......@@ -527,6 +527,7 @@ lws_compare_and_swap:
We *must* giveup this call and fail.
*/
ldw 4(%sr2,%r20), %r28 /* Load thread register */
/* WARNING: If cr27 cycles to the same value we have problems */
mfctl %cr27, %r21 /* Get current thread register */
cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */
b lws_exit /* Return error! */
......
......@@ -625,7 +625,7 @@ config BLK_DEV_NS87415
tristate "NS87415 chipset support"
help
This driver adds detection and support for the NS87415 chip
(used in SPARC64, among others).
(used mainly on SPARC64 and PA-RISC machines).
Please read the comments at the top of <file:drivers/ide/pci/ns87415.c>.
......
......@@ -110,7 +110,7 @@ config HISAX_16_3
config HISAX_TELESPCI
bool "Teles PCI"
depends on PCI && (BROKEN || !(SPARC64 || PPC))
depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
help
This enables HiSax support for the Teles PCI.
See <file:Documentation/isdn/README.HiSax> on how to configure it.
......@@ -238,7 +238,7 @@ config HISAX_MIC
config HISAX_NETJET
bool "NETjet card"
depends on PCI && (BROKEN || !(SPARC64 || PPC))
depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
help
This enables HiSax support for the NetJet from Traverse
Technologies.
......@@ -249,7 +249,7 @@ config HISAX_NETJET
config HISAX_NETJET_U
bool "NETspider U card"
depends on PCI && (BROKEN || !(SPARC64 || PPC))
depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
help
This enables HiSax support for the Netspider U interface ISDN card
from Traverse Technologies.
......@@ -317,7 +317,7 @@ config HISAX_GAZEL
config HISAX_HFC_PCI
bool "HFC PCI-Bus cards"
depends on PCI && (BROKEN || !(SPARC64 || PPC))
depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
help
This enables HiSax support for the HFC-S PCI 2BDS0 based cards.
......@@ -344,14 +344,14 @@ config HISAX_HFC_SX
config HISAX_ENTERNOW_PCI
bool "Formula-n enter:now PCI card"
depends on PCI && (BROKEN || !(SPARC64 || PPC))
depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
help
This enables HiSax support for the Formula-n enter:now PCI
ISDN card.
config HISAX_AMD7930
bool "Am7930 (EXPERIMENTAL)"
depends on EXPERIMENTAL && (SPARC32 || SPARC64)
depends on EXPERIMENTAL && SPARC
help
This enables HiSax support for the AMD7930 chips on some SPARCs.
This code is not finished yet.
......
......@@ -3,7 +3,7 @@
#
config ISDN_DRV_PCBIT
tristate "PCBIT-D support"
depends on ISDN_I4L && ISA && (BROKEN || !PPC)
depends on ISDN_I4L && ISA && (BROKEN || X86)
help
This enables support for the PCBIT ISDN-card. This card is
manufactured in Portugal by Octal. For running this card,
......
......@@ -700,6 +700,28 @@ static unsigned int iosapic_startup_irq(unsigned int irq)
return 0;
}
#ifdef CONFIG_SMP
static void iosapic_set_affinity_irq(unsigned int irq, cpumask_t dest)
{
struct vector_info *vi = iosapic_get_vector(irq);
u32 d0, d1, dummy_d0;
unsigned long flags;
if (cpu_check_affinity(irq, &dest))
return;
vi->txn_addr = txn_affinity_addr(irq, first_cpu(dest));
spin_lock_irqsave(&iosapic_lock, flags);
/* d1 contains the destination CPU, so only want to set that
* entry */
iosapic_rd_irt_entry(vi, &d0, &d1);
iosapic_set_irt_data(vi, &dummy_d0, &d1);
iosapic_wr_irt_entry(vi, d0, d1);
spin_unlock_irqrestore(&iosapic_lock, flags);
}
#endif
static struct hw_interrupt_type iosapic_interrupt_type = {
.typename = "IO-SAPIC-level",
.startup = iosapic_startup_irq,
......@@ -708,7 +730,9 @@ static struct hw_interrupt_type iosapic_interrupt_type = {
.disable = iosapic_disable_irq,
.ack = no_ack_irq,
.end = iosapic_end_irq,
// .set_affinity = iosapic_set_affinity_irq,
#ifdef CONFIG_SMP
.set_affinity = iosapic_set_affinity_irq,
#endif
};
int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
......
......@@ -24,6 +24,9 @@
* Major changes to get basic interrupt infrastructure working to
* hopefully be able to support all SuperIO devices. Currently
* works with serial. -- John Marvin <jsm@fc.hp.com>
*
* Converted superio_init() to be a PCI_FIXUP_FINAL callee.
* -- Kyle McMartin <kyle@parisc-linux.org>
*/
......@@ -141,10 +144,10 @@ superio_interrupt(int parent_irq, void *devp, struct pt_regs *regs)
}
/* Initialize Super I/O device */
static void __devinit
superio_init(struct superio_device *sio)
static void
superio_init(struct pci_dev *pcidev)
{
struct superio_device *sio = &sio_dev;
struct pci_dev *pdev = sio->lio_pdev;
u16 word;
......@@ -160,8 +163,8 @@ superio_init(struct superio_device *sio)
/* ...then properly fixup the USB to point at suckyio PIC */
sio->usb_pdev->irq = superio_fixup_irq(sio->usb_pdev);
printk (KERN_INFO "SuperIO: Found NS87560 Legacy I/O device at %s (IRQ %i) \n",
pci_name(pdev),pdev->irq);
printk(KERN_INFO "SuperIO: Found NS87560 Legacy I/O device at %s (IRQ %i) \n",
pci_name(pdev), pdev->irq);
pci_read_config_dword (pdev, SIO_SP1BAR, &sio->sp1_base);
sio->sp1_base &= ~1;
......@@ -274,7 +277,7 @@ superio_init(struct superio_device *sio)
sio->suckyio_irq_enabled = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init);
static void superio_disable_irq(unsigned int irq)
{
......@@ -452,8 +455,10 @@ static void superio_fixup_pci(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415, superio_fixup_pci);
static int __devinit superio_probe(struct pci_dev *dev, const struct pci_device_id *id)
static int __devinit
superio_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct superio_device *sio = &sio_dev;
/*
** superio_probe(00:0e.0) ven 0x100b dev 0x2 sv 0x0 sd 0x0 class 0x1018a
......@@ -466,7 +471,8 @@ static int __devinit superio_probe(struct pci_dev *dev, const struct pci_device_
dev->subsystem_vendor, dev->subsystem_device,
dev->class);
superio_init(&sio_dev);
if (!sio->suckyio_irq_enabled)
BUG(); /* Enabled by PCI_FIXUP_FINAL */
if (dev->device == PCI_DEVICE_ID_NS_87560_LIO) { /* Function 1 */
superio_parport_init();
......@@ -481,19 +487,21 @@ static int __devinit superio_probe(struct pci_dev *dev, const struct pci_device_
DBG_INIT("superio_probe: WTF? Fire Extinguisher?\n");
}
/* Let appropriate other driver claim this device. */
/* Let appropriate other driver claim this device. */
return -ENODEV;
}
static struct pci_device_id superio_tbl[] = {
{ PCI_VENDOR_ID_NS, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO) },
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_USB) },
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415) },
{ 0, }
};
static struct pci_driver superio_driver = {
.name = "SuperIO",
.id_table = superio_tbl,
.probe = superio_probe,
.name = "SuperIO",
.id_table = superio_tbl,
.probe = superio_probe,
};
static int __init superio_modinit(void)
......@@ -506,6 +514,5 @@ static void __exit superio_exit(void)
pci_unregister_driver(&superio_driver);
}
module_init(superio_modinit);
module_exit(superio_exit);
......@@ -507,7 +507,7 @@ config SERIAL_SUNSU_CONSOLE
config SERIAL_MUX
tristate "Serial MUX support"
depends on PARISC
depends on GSC
select SERIAL_CORE
default y
---help---
......
......@@ -65,8 +65,8 @@ static struct uart_driver mux_driver = {
static struct timer_list mux_timer;
#define UART_PUT_CHAR(p, c) __raw_writel((c), (unsigned long)(p)->membase + IO_DATA_REG_OFFSET)
#define UART_GET_FIFO_CNT(p) __raw_readl((unsigned long)(p)->membase + IO_DCOUNT_REG_OFFSET)
#define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + IO_DATA_REG_OFFSET)
#define UART_GET_FIFO_CNT(p) __raw_readl((p)->membase + IO_DCOUNT_REG_OFFSET)
#define GET_MUX_PORTS(iodc_data) ((((iodc_data)[4] & 0xf0) >> 4) * 8) + 8
/**
......@@ -79,10 +79,7 @@ static struct timer_list mux_timer;
*/
static unsigned int mux_tx_empty(struct uart_port *port)
{
unsigned int cnt = __raw_readl((unsigned long)port->membase
+ IO_DCOUNT_REG_OFFSET);
return cnt ? 0 : TIOCSER_TEMT;
return UART_GET_FIFO_CNT(port) ? 0 : TIOCSER_TEMT;
}
/**
......@@ -218,8 +215,7 @@ static void mux_read(struct uart_port *port)
__u32 start_count = port->icount.rx;
while(1) {
data = __raw_readl((unsigned long)port->membase
+ IO_DATA_REG_OFFSET);
data = __raw_readl(port->membase + IO_DATA_REG_OFFSET);
if (MUX_STATUS(data))
continue;
......@@ -481,6 +477,13 @@ static int __init mux_probe(struct parisc_device *dev)
port->ops = &mux_pops;
port->flags = UPF_BOOT_AUTOCONF;
port->line = port_cnt;
/* The port->timeout needs to match what is present in
* uart_wait_until_sent in serial_core.c. Otherwise
* the time spent in msleep_interruptable will be very
* long, causing the appearance of a console hang.
*/
port->timeout = HZ / 50;
spin_lock_init(&port->lock);
status = uart_add_one_port(&mux_driver, port);
BUG_ON(status);
......
......@@ -8,6 +8,7 @@
#define _ASM_PARISC_IRQ_H
#include <linux/config.h>
#include <linux/cpumask.h>
#include <asm/types.h>
#define NO_IRQ (-1)
......@@ -49,10 +50,10 @@ extern int txn_alloc_irq(unsigned int nbits);
extern int txn_claim_irq(int);
extern unsigned int txn_alloc_data(unsigned int);
extern unsigned long txn_alloc_addr(unsigned int);
extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest);
/* soft power switch support (power.c) */
extern struct tasklet_struct power_tasklet;
......
......@@ -29,6 +29,7 @@ extern cpumask_t cpu_online_map;
#define cpu_logical_map(cpu) (cpu)
extern void smp_send_reschedule(int cpu);
extern void smp_send_all_nop(void);
#endif /* !ASSEMBLY */
......@@ -53,7 +54,11 @@ extern unsigned long cpu_present_mask;
#define raw_smp_processor_id() (current_thread_info()->cpu)
#endif /* CONFIG_SMP */
#else /* CONFIG_SMP */
static inline void smp_send_all_nop(void) { return; }
#endif
#define NO_PROC_ID 0xFF /* No processor magic marker */
#define ANY_PROC_ID 0xFF /* Any processor magic marker */
......
......@@ -11,18 +11,25 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x)
return *a == 0;
}
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
#define __raw_spin_unlock_wait(x) \
do { cpu_relax(); } while (__raw_spin_is_locked(x))
static inline void __raw_spin_lock(raw_spinlock_t *x)
static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
unsigned long flags)
{
volatile unsigned int *a;
mb();
a = __ldcw_align(x);
while (__ldcw(a) == 0)
while (*a == 0);
while (*a == 0)
if (flags & PSW_SM_I) {
local_irq_enable();
cpu_relax();
local_irq_disable();
} else
cpu_relax();
mb();
}
......@@ -60,26 +67,20 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
__raw_spin_lock(&rw->lock);
rw->counter++;
__raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
__raw_spin_lock(&rw->lock);
rw->counter--;
__raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* write_lock is less trivial. We optimistically grab the lock and check
......
......@@ -12,21 +12,15 @@
* N class systems, only one PxTLB inter processor broadcast can be
* active at any one time on the Merced bus. This tlb purge
* synchronisation is fairly lightweight and harmless so we activate
* it on all SMP systems not just the N class. */
#ifdef CONFIG_SMP
* it on all SMP systems not just the N class. We also need to have
* preemption disabled on uniprocessor machines, and spin_lock does that
* nicely.
*/
extern spinlock_t pa_tlb_lock;
#define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
#else
#define purge_tlb_start(x) do { } while(0)
#define purge_tlb_end(x) do { } while (0)
#endif
extern void flush_tlb_all(void);
/*
......@@ -88,7 +82,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
flush_tlb_all();
else {
preempt_disable();
mtsp(vma->vm_mm->context,1);
purge_tlb_start();
if (split_tlb) {
......@@ -102,7 +95,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
pdtlb(start);
start += PAGE_SIZE;
}
preempt_enable();
}
purge_tlb_end();
}
......
......@@ -940,7 +940,9 @@ unsigned long max_sane_readahead(unsigned long nr);
/* Do stack extension */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
#ifdef CONFIG_IA64
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
#endif
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
......
......@@ -1501,7 +1501,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
#ifdef CONFIG_STACK_GROWSUP
#ifndef CONFIG_IA64
static inline
#endif
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment