Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
d06e7a56
Commit
d06e7a56
authored
Jul 05, 2005
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
parents
346fced8
864ae180
Changes
12
Show whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
349 additions
and
506 deletions
+349
-506
arch/sparc64/Kconfig
arch/sparc64/Kconfig
+18
-0
arch/sparc64/kernel/entry.S
arch/sparc64/kernel/entry.S
+3
-18
arch/sparc64/kernel/irq.c
arch/sparc64/kernel/irq.c
+197
-384
arch/sparc64/kernel/pci_psycho.c
arch/sparc64/kernel/pci_psycho.c
+1
-2
arch/sparc64/kernel/pci_sabre.c
arch/sparc64/kernel/pci_sabre.c
+26
-20
arch/sparc64/kernel/pci_schizo.c
arch/sparc64/kernel/pci_schizo.c
+60
-18
arch/sparc64/kernel/time.c
arch/sparc64/kernel/time.c
+1
-1
drivers/sbus/char/bpp.c
drivers/sbus/char/bpp.c
+2
-18
include/asm-sparc64/irq.h
include/asm-sparc64/irq.h
+20
-29
include/asm-sparc64/pbm.h
include/asm-sparc64/pbm.h
+3
-0
include/asm-sparc64/signal.h
include/asm-sparc64/signal.h
+0
-15
include/linux/compat_ioctl.h
include/linux/compat_ioctl.h
+18
-1
No files found.
arch/sparc64/Kconfig
View file @
d06e7a56
...
...
@@ -444,6 +444,24 @@ config PRINTER
If you have more than 8 printers, you need to increase the LP_NO
macro in lp.c and the PARPORT_MAX macro in parport.h.
config PPDEV
tristate "Support for user-space parallel port device drivers"
depends on PARPORT
---help---
Saying Y to this adds support for /dev/parport device nodes. This
is needed for programs that want portable access to the parallel
port, for instance deviceid (which displays Plug-and-Play device
IDs).
This is the parallel port equivalent of SCSI generic support (sg).
It is safe to say N to this -- it is not needed for normal printing
or parallel port CD-ROM/disk support.
To compile this driver as a module, choose M here: the
module will be called ppdev.
If unsure, say N.
config ENVCTRL
tristate "SUNW, envctrl support"
depends on PCI
...
...
arch/sparc64/kernel/entry.S
View file @
d06e7a56
...
...
@@ -553,13 +553,11 @@ do_ivec:
sllx
%
g3
,
5
,
%
g3
or
%
g2
,
%
lo
(
ivector_table
),
%
g2
add
%
g2
,
%
g3
,
%
g3
ldx
[%
g3
+
0x08
],
%
g2
/*
irq_info
*/
ldub
[%
g3
+
0x04
],
%
g4
/*
pil
*/
brz
,
pn
%
g2
,
do_ivec_spurious
mov
1
,
%
g2
sllx
%
g2
,
%
g4
,
%
g2
sllx
%
g4
,
2
,
%
g4
lduw
[%
g6
+
%
g4
],
%
g5
/*
g5
=
irq_work
(
cpu
,
pil
)
*/
stw
%
g5
,
[%
g3
+
0x00
]
/*
bucket
->
irq_chain
=
g5
*/
stw
%
g3
,
[%
g6
+
%
g4
]
/*
irq_work
(
cpu
,
pil
)
=
bucket
*/
...
...
@@ -567,9 +565,9 @@ do_ivec:
retry
do_ivec_xcall
:
mov
0x50
,
%
g1
ldxa
[%
g1
+
%
g0
]
ASI_INTR_R
,
%
g1
srl
%
g3
,
0
,
%
g3
mov
0x60
,
%
g7
ldxa
[%
g7
+
%
g0
]
ASI_INTR_R
,
%
g7
stxa
%
g0
,
[%
g0
]
ASI_INTR_RECEIVE
...
...
@@ -581,19 +579,6 @@ do_ivec_xcall:
1
:
jmpl
%
g3
,
%
g0
nop
do_ivec_spurious
:
stw
%
g3
,
[%
g6
+
0x00
]
/*
irq_work
(
cpu
,
0
)
=
bucket
*/
rdpr
%
pstate
,
%
g5
wrpr
%
g5
,
PSTATE_IG
|
PSTATE_AG
,
%
pstate
sethi
%
hi
(
109
f
),
%
g7
ba
,
pt
%
xcc
,
etrap
109
:
or
%
g7
,
%
lo
(
109
b
),
%
g7
call
catch_disabled_ivec
add
%
sp
,
PTREGS_OFF
,
%
o0
ba
,
pt
%
xcc
,
rtrap
clr
%
l6
.
globl
save_alternate_globals
save_alternate_globals
:
/
*
%
o0
=
save_area
*/
rdpr
%
pstate
,
%
o5
...
...
arch/sparc64/kernel/irq.c
View file @
d06e7a56
...
...
@@ -71,31 +71,7 @@ struct irq_work_struct {
struct
irq_work_struct
__irq_work
[
NR_CPUS
];
#define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
#ifdef CONFIG_PCI
/* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
* It is used for PCI only to synchronize DMA transfers with IRQ delivery
* for devices behind busses other than APB on Sabre systems.
*
* Currently these physical addresses are just config space accesses
* to the command register for that device.
*/
unsigned
long
pci_dma_wsync
;
unsigned
long
dma_sync_reg_table
[
256
];
unsigned
char
dma_sync_reg_table_entry
=
0
;
#endif
/* This is based upon code in the 32-bit Sparc kernel written mostly by
* David Redman (djhr@tadpole.co.uk).
*/
#define MAX_STATIC_ALLOC 4
static
struct
irqaction
static_irqaction
[
MAX_STATIC_ALLOC
];
static
int
static_irq_count
;
/* This is exported so that fast IRQ handlers can get at it... -DaveM */
struct
irqaction
*
irq_action
[
NR_IRQS
+
1
]
=
{
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
};
static
struct
irqaction
*
irq_action
[
NR_IRQS
+
1
];
/* This only synchronizes entities which modify IRQ handler
* state and some selected user-level spots that want to
...
...
@@ -241,17 +217,22 @@ void disable_irq(unsigned int irq)
* the CPU %tick register and not by some normal vectored interrupt
* source. To handle this special case, we use this dummy INO bucket.
*/
static
struct
irq_desc
pil0_dummy_desc
;
static
struct
ino_bucket
pil0_dummy_bucket
=
{
0
,
/* irq_chain */
0
,
/* pil */
0
,
/* pending */
0
,
/* flags */
0
,
/* __unused */
NULL
,
/* irq_info */
0UL
,
/* iclr */
0UL
,
/* imap */
.
irq_info
=
&
pil0_dummy_desc
,
};
static
void
build_irq_error
(
const
char
*
msg
,
unsigned
int
ino
,
int
pil
,
int
inofixup
,
unsigned
long
iclr
,
unsigned
long
imap
,
struct
ino_bucket
*
bucket
)
{
prom_printf
(
"IRQ: INO %04x (%d:%016lx:%016lx) --> "
"(%d:%d:%016lx:%016lx), halting...
\n
"
,
ino
,
bucket
->
pil
,
bucket
->
iclr
,
bucket
->
imap
,
pil
,
inofixup
,
iclr
,
imap
);
prom_halt
();
}
unsigned
int
build_irq
(
int
pil
,
int
inofixup
,
unsigned
long
iclr
,
unsigned
long
imap
)
{
struct
ino_bucket
*
bucket
;
...
...
@@ -280,28 +261,35 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long
prom_halt
();
}
/* Ok, looks good, set it up. Don't touch the irq_chain or
* the pending flag.
*/
bucket
=
&
ivector_table
[
ino
];
if
((
bucket
->
flags
&
IBF_ACTIVE
)
||
(
bucket
->
irq_info
!=
NULL
))
{
/* This is a gross fatal error if it happens here. */
prom_printf
(
"IRQ: Trying to reinit INO bucket, fatal error.
\n
"
);
prom_printf
(
"IRQ: Request INO %04x (%d:%d:%016lx:%016lx)
\n
"
,
ino
,
pil
,
inofixup
,
iclr
,
imap
);
prom_printf
(
"IRQ: Existing (%d:%016lx:%016lx)
\n
"
,
bucket
->
pil
,
bucket
->
iclr
,
bucket
->
imap
);
prom_printf
(
"IRQ: Cannot continue, halting...
\n
"
);
if
(
bucket
->
flags
&
IBF_ACTIVE
)
build_irq_error
(
"IRQ: Trying to build active INO bucket.
\n
"
,
ino
,
pil
,
inofixup
,
iclr
,
imap
,
bucket
);
if
(
bucket
->
irq_info
)
{
if
(
bucket
->
imap
!=
imap
||
bucket
->
iclr
!=
iclr
)
build_irq_error
(
"IRQ: Trying to reinit INO bucket.
\n
"
,
ino
,
pil
,
inofixup
,
iclr
,
imap
,
bucket
);
goto
out
;
}
bucket
->
irq_info
=
kmalloc
(
sizeof
(
struct
irq_desc
),
GFP_ATOMIC
);
if
(
!
bucket
->
irq_info
)
{
prom_printf
(
"IRQ: Error, kmalloc(irq_desc) failed.
\n
"
);
prom_halt
();
}
memset
(
bucket
->
irq_info
,
0
,
sizeof
(
struct
irq_desc
));
/* Ok, looks good, set it up. Don't touch the irq_chain or
* the pending flag.
*/
bucket
->
imap
=
imap
;
bucket
->
iclr
=
iclr
;
bucket
->
pil
=
pil
;
bucket
->
flags
=
0
;
bucket
->
irq_info
=
NULL
;
out:
return
__irq
(
bucket
);
}
...
...
@@ -319,27 +307,66 @@ static void atomic_bucket_insert(struct ino_bucket *bucket)
__asm__
__volatile__
(
"wrpr %0, 0x0, %%pstate"
:
:
"r"
(
pstate
));
}
static
int
check_irq_sharing
(
int
pil
,
unsigned
long
irqflags
)
{
struct
irqaction
*
action
,
*
tmp
;
action
=
*
(
irq_action
+
pil
);
if
(
action
)
{
if
((
action
->
flags
&
SA_SHIRQ
)
&&
(
irqflags
&
SA_SHIRQ
))
{
for
(
tmp
=
action
;
tmp
->
next
;
tmp
=
tmp
->
next
)
;
}
else
{
return
-
EBUSY
;
}
}
return
0
;
}
static
void
append_irq_action
(
int
pil
,
struct
irqaction
*
action
)
{
struct
irqaction
**
pp
=
irq_action
+
pil
;
while
(
*
pp
)
pp
=
&
((
*
pp
)
->
next
);
*
pp
=
action
;
}
static
struct
irqaction
*
get_action_slot
(
struct
ino_bucket
*
bucket
)
{
struct
irq_desc
*
desc
=
bucket
->
irq_info
;
int
max_irq
,
i
;
max_irq
=
1
;
if
(
bucket
->
flags
&
IBF_PCI
)
max_irq
=
MAX_IRQ_DESC_ACTION
;
for
(
i
=
0
;
i
<
max_irq
;
i
++
)
{
struct
irqaction
*
p
=
&
desc
->
action
[
i
];
u32
mask
=
(
1
<<
i
);
if
(
desc
->
action_active_mask
&
mask
)
continue
;
desc
->
action_active_mask
|=
mask
;
return
p
;
}
return
NULL
;
}
int
request_irq
(
unsigned
int
irq
,
irqreturn_t
(
*
handler
)(
int
,
void
*
,
struct
pt_regs
*
),
unsigned
long
irqflags
,
const
char
*
name
,
void
*
dev_id
)
{
struct
irqaction
*
action
,
*
tmp
=
NULL
;
struct
irqaction
*
action
;
struct
ino_bucket
*
bucket
=
__bucket
(
irq
);
unsigned
long
flags
;
int
pending
=
0
;
if
((
bucket
!=
&
pil0_dummy_bucket
)
&&
(
bucket
<
&
ivector_table
[
0
]
||
bucket
>=
&
ivector_table
[
NUM_IVECS
]))
{
unsigned
int
*
caller
;
__asm__
__volatile__
(
"mov %%i7, %0"
:
"=r"
(
caller
));
printk
(
KERN_CRIT
"request_irq: Old style IRQ registry attempt "
"from %p, irq %08x.
\n
"
,
caller
,
irq
);
return
-
EINVAL
;
}
if
(
!
handler
)
if
(
unlikely
(
!
handler
))
return
-
EINVAL
;
if
(
unlikely
(
!
bucket
->
irq_info
))
return
-
ENODEV
;
if
((
bucket
!=
&
pil0_dummy_bucket
)
&&
(
irqflags
&
SA_SAMPLE_RANDOM
))
{
/*
* This function might sleep, we want to call it first,
...
...
@@ -356,93 +383,20 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
spin_lock_irqsave
(
&
irq_action_lock
,
flags
);
action
=
*
(
bucket
->
pil
+
irq_action
);
if
(
action
)
{
if
((
action
->
flags
&
SA_SHIRQ
)
&&
(
irqflags
&
SA_SHIRQ
))
for
(
tmp
=
action
;
tmp
->
next
;
tmp
=
tmp
->
next
)
;
else
{
if
(
check_irq_sharing
(
bucket
->
pil
,
irqflags
))
{
spin_unlock_irqrestore
(
&
irq_action_lock
,
flags
);
return
-
EBUSY
;
}
action
=
NULL
;
/* Or else! */
}
/* If this is flagged as statically allocated then we use our
* private struct which is never freed.
*/
if
(
irqflags
&
SA_STATIC_ALLOC
)
{
if
(
static_irq_count
<
MAX_STATIC_ALLOC
)
action
=
&
static_irqaction
[
static_irq_count
++
];
else
printk
(
"Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
"using kmalloc
\n
"
,
irq
,
name
);
}
if
(
action
==
NULL
)
action
=
(
struct
irqaction
*
)
kmalloc
(
sizeof
(
struct
irqaction
),
GFP_ATOMIC
);
action
=
get_action_slot
(
bucket
);
if
(
!
action
)
{
spin_unlock_irqrestore
(
&
irq_action_lock
,
flags
);
return
-
ENOMEM
;
}
if
(
bucket
==
&
pil0_dummy_bucket
)
{
bucket
->
irq_info
=
action
;
bucket
->
flags
|=
IBF_ACTIVE
;
}
else
{
if
((
bucket
->
flags
&
IBF_ACTIVE
)
!=
0
)
{
void
*
orig
=
bucket
->
irq_info
;
void
**
vector
=
NULL
;
if
((
bucket
->
flags
&
IBF_PCI
)
==
0
)
{
printk
(
"IRQ: Trying to share non-PCI bucket.
\n
"
);
goto
free_and_ebusy
;
}
if
((
bucket
->
flags
&
IBF_MULTI
)
==
0
)
{
vector
=
kmalloc
(
sizeof
(
void
*
)
*
4
,
GFP_ATOMIC
);
if
(
vector
==
NULL
)
goto
free_and_enomem
;
/* We might have slept. */
if
((
bucket
->
flags
&
IBF_MULTI
)
!=
0
)
{
int
ent
;
kfree
(
vector
);
vector
=
(
void
**
)
bucket
->
irq_info
;
for
(
ent
=
0
;
ent
<
4
;
ent
++
)
{
if
(
vector
[
ent
]
==
NULL
)
{
vector
[
ent
]
=
action
;
break
;
}
}
if
(
ent
==
4
)
goto
free_and_ebusy
;
}
else
{
vector
[
0
]
=
orig
;
vector
[
1
]
=
action
;
vector
[
2
]
=
NULL
;
vector
[
3
]
=
NULL
;
bucket
->
irq_info
=
vector
;
bucket
->
flags
|=
IBF_MULTI
;
}
}
else
{
int
ent
;
vector
=
(
void
**
)
orig
;
for
(
ent
=
0
;
ent
<
4
;
ent
++
)
{
if
(
vector
[
ent
]
==
NULL
)
{
vector
[
ent
]
=
action
;
break
;
}
}
if
(
ent
==
4
)
goto
free_and_ebusy
;
}
}
else
{
bucket
->
irq_info
=
action
;
bucket
->
flags
|=
IBF_ACTIVE
;
}
pending
=
0
;
if
(
bucket
!=
&
pil0_dummy_bucket
)
{
pending
=
bucket
->
pending
;
if
(
pending
)
bucket
->
pending
=
0
;
...
...
@@ -456,10 +410,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
put_ino_in_irqaction
(
action
,
irq
);
put_smpaff_in_irqaction
(
action
,
CPU_MASK_NONE
);
if
(
tmp
)
tmp
->
next
=
action
;
else
*
(
bucket
->
pil
+
irq_action
)
=
action
;
append_irq_action
(
bucket
->
pil
,
action
);
enable_irq
(
irq
);
...
...
@@ -468,124 +419,81 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
atomic_bucket_insert
(
bucket
);
set_softint
(
1
<<
bucket
->
pil
);
}
spin_unlock_irqrestore
(
&
irq_action_lock
,
flags
);
if
((
bucket
!=
&
pil0_dummy_bucket
)
&&
(
!
(
irqflags
&
SA_STATIC_ALLOC
)))
if
(
bucket
!=
&
pil0_dummy_bucket
)
register_irq_proc
(
__irq_ino
(
irq
));
#ifdef CONFIG_SMP
distribute_irqs
();
#endif
return
0
;
free_and_ebusy:
kfree
(
action
);
spin_unlock_irqrestore
(
&
irq_action_lock
,
flags
);
return
-
EBUSY
;
free_and_enomem:
kfree
(
action
);
spin_unlock_irqrestore
(
&
irq_action_lock
,
flags
);
return
-
ENOMEM
;
}
EXPORT_SYMBOL
(
request_irq
);
void
free_irq
(
unsigned
int
irq
,
void
*
dev_id
)
static
struct
irqaction
*
unlink_irq_action
(
unsigned
int
irq
,
void
*
dev_id
)
{
struct
irqaction
*
action
;
struct
irqaction
*
tmp
=
NULL
;
unsigned
long
flags
;
struct
ino_bucket
*
bucket
=
__bucket
(
irq
),
*
bp
;
if
((
bucket
!=
&
pil0_dummy_bucket
)
&&
(
bucket
<
&
ivector_table
[
0
]
||
bucket
>=
&
ivector_table
[
NUM_IVECS
]))
{
unsigned
int
*
caller
;
__asm__
__volatile__
(
"mov %%i7, %0"
:
"=r"
(
caller
));
printk
(
KERN_CRIT
"free_irq: Old style IRQ removal attempt "
"from %p, irq %08x.
\n
"
,
caller
,
irq
);
return
;
}
struct
ino_bucket
*
bucket
=
__bucket
(
irq
);
struct
irqaction
*
action
,
**
pp
;
spin_lock_irqsave
(
&
irq_action_lock
,
flags
);
pp
=
irq_action
+
bucket
->
pil
;
action
=
*
pp
;
if
(
unlikely
(
!
action
))
return
NULL
;
action
=
*
(
bucket
->
pil
+
irq_action
);
if
(
!
action
->
handler
)
{
if
(
unlikely
(
!
action
->
handler
))
{
printk
(
"Freeing free IRQ %d
\n
"
,
bucket
->
pil
);
return
;
}
if
(
dev_id
)
{
for
(
;
action
;
action
=
action
->
next
)
{
if
(
action
->
dev_id
==
dev_id
)
break
;
tmp
=
action
;
}
if
(
!
action
)
{
printk
(
"Trying to free free shared IRQ %d
\n
"
,
bucket
->
pil
);
spin_unlock_irqrestore
(
&
irq_action_lock
,
flags
);
return
;
}
}
else
if
(
action
->
flags
&
SA_SHIRQ
)
{
printk
(
"Trying to free shared IRQ %d with NULL device ID
\n
"
,
bucket
->
pil
);
spin_unlock_irqrestore
(
&
irq_action_lock
,
flags
);
return
;
return
NULL
;
}
if
(
action
->
flags
&
SA_STATIC_ALLOC
)
{
printk
(
"Attempt to free statically allocated IRQ %d (%s)
\n
"
,
bucket
->
pil
,
action
->
name
);
spin_unlock_irqrestore
(
&
irq_action_lock
,
flags
);
return
;
while
(
action
&&
action
->
dev_id
!=
dev_id
)
{
pp
=
&
action
->
next
;
action
=
*
pp
;
}
if
(
action
&&
tmp
)
tmp
->
next
=
action
->
next
;
else
*
(
bucket
->
pil
+
irq_action
)
=
action
->
next
;
if
(
likely
(
action
))
*
pp
=
action
->
next
;
return
action
;
}
void
free_irq
(
unsigned
int
irq
,
void
*
dev_id
)
{
struct
irqaction
*
action
;
struct
ino_bucket
*
bucket
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
irq_action_lock
,
flags
);
action
=
unlink_irq_action
(
irq
,
dev_id
);
spin_unlock_irqrestore
(
&
irq_action_lock
,
flags
);
if
(
unlikely
(
!
action
))
return
;
synchronize_irq
(
irq
);
spin_lock_irqsave
(
&
irq_action_lock
,
flags
);
bucket
=
__bucket
(
irq
);
if
(
bucket
!=
&
pil0_dummy_bucket
)
{
struct
irq_desc
*
desc
=
bucket
->
irq_info
;
unsigned
long
imap
=
bucket
->
imap
;
void
**
vector
,
*
orig
;
int
ent
;
orig
=
bucket
->
irq_info
;
vector
=
(
void
**
)
orig
;
if
((
bucket
->
flags
&
IBF_MULTI
)
!=
0
)
{
int
other
=
0
;
void
*
orphan
=
NULL
;
for
(
ent
=
0
;
ent
<
4
;
ent
++
)
{
if
(
vector
[
ent
]
==
action
)
vector
[
ent
]
=
NULL
;
else
if
(
vector
[
ent
]
!=
NULL
)
{
orphan
=
vector
[
ent
];
other
++
;
}
}
int
ent
,
i
;
/* Only free when no other shared irq
* uses this bucket.
*/
if
(
other
)
{
if
(
other
==
1
)
{
/* Convert back to non-shared bucket. */
bucket
->
irq_info
=
orphan
;
bucket
->
flags
&=
~
(
IBF_MULTI
);
kfree
(
vector
);
for
(
i
=
0
;
i
<
MAX_IRQ_DESC_ACTION
;
i
++
)
{
struct
irqaction
*
p
=
&
desc
->
action
[
i
];
if
(
p
==
action
)
{
desc
->
action_active_mask
&=
~
(
1
<<
i
);
break
;
}
goto
out
;
}
}
else
{
bucket
->
irq_info
=
NULL
;
}
if
(
!
desc
->
action_active_mask
)
{
/* This unique interrupt source is now inactive. */
bucket
->
flags
&=
~
IBF_ACTIVE
;
...
...
@@ -593,7 +501,7 @@ void free_irq(unsigned int irq, void *dev_id)
* and are still active.
*/
for
(
ent
=
0
;
ent
<
NUM_IVECS
;
ent
++
)
{
bp
=
&
ivector_table
[
ent
];
struct
ino_bucket
*
bp
=
&
ivector_table
[
ent
];
if
(
bp
!=
bucket
&&
bp
->
imap
==
imap
&&
(
bp
->
flags
&
IBF_ACTIVE
)
!=
0
)
...
...
@@ -606,9 +514,8 @@ void free_irq(unsigned int irq, void *dev_id)
if
(
ent
==
NUM_IVECS
)
disable_irq
(
irq
);
}
}
out:
kfree
(
action
);
spin_unlock_irqrestore
(
&
irq_action_lock
,
flags
);
}
...
...
@@ -647,99 +554,55 @@ void synchronize_irq(unsigned int irq)
}
#endif
/* CONFIG_SMP */
void
catch_disabled_ivec
(
struct
pt_regs
*
regs
)
static
void
process_bucket
(
int
irq
,
struct
ino_bucket
*
bp
,
struct
pt_regs
*
regs
)
{
int
cpu
=
smp_processor_id
();
struct
ino_bucket
*
bucket
=
__bucket
(
*
irq_work
(
cpu
,
0
));
/* We can actually see this on Ultra/PCI PCI cards, which are bridges
* to other devices. Here a single IMAP enabled potentially multiple
* unique interrupt sources (which each do have a unique ICLR register.
*
* So what we do is just register that the IVEC arrived, when registered
* for real the request_irq() code will check the bit and signal
* a local CPU interrupt for it.
*/
#if 0
printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
bucket - &ivector_table[0], regs->tpc);
#endif
*
irq_work
(
cpu
,
0
)
=
0
;
bucket
->
pending
=
1
;
}
/* Tune this... */
#define FORWARD_VOLUME 12
#ifdef CONFIG_SMP
static
inline
void
redirect_intr
(
int
cpu
,
struct
ino_bucket
*
bp
)
{
/* Ok, here is what is going on:
* 1) Retargeting IRQs on Starfire is very
* expensive so just forget about it on them.
* 2) Moving around very high priority interrupts
* is a losing game.
* 3) If the current cpu is idle, interrupts are
* useful work, so keep them here. But do not
* pass to our neighbour if he is not very idle.
* 4) If sysadmin explicitly asks for directed intrs,
* Just Do It.
*/
struct
irqaction
*
ap
=
bp
->
irq_info
;
cpumask_t
cpu_mask
;
unsigned
int
buddy
,
ticks
;
cpu_mask
=
get_smpaff_in_irqaction
(
ap
);
cpus_and
(
cpu_mask
,
cpu_mask
,
cpu_online_map
);
if
(
cpus_empty
(
cpu_mask
))
cpu_mask
=
cpu_online_map
;
struct
irq_desc
*
desc
=
bp
->
irq_info
;
unsigned
char
flags
=
bp
->
flags
;
u32
action_mask
,
i
;
int
random
;
if
(
this_is_starfire
!=
0
||
bp
->
pil
>=
10
||
current
->
pid
==
0
)
goto
out
;
bp
->
flags
|=
IBF_INPROGRESS
;
/* 'cpu' is the MID (ie. UPAID), calculate the MID
* of our buddy.
*/
buddy
=
cpu
+
1
;
if
(
buddy
>=
NR_CPUS
)
buddy
=
0
;
ticks
=
0
;
while
(
!
cpu_isset
(
buddy
,
cpu_mask
))
{
if
(
++
buddy
>=
NR_CPUS
)
buddy
=
0
;
if
(
++
ticks
>
NR_CPUS
)
{
put_smpaff_in_irqaction
(
ap
,
CPU_MASK_NONE
);
if
(
unlikely
(
!
(
flags
&
IBF_ACTIVE
)))
{
bp
->
pending
=
1
;
goto
out
;
}
}
if
(
buddy
==
cpu
)
goto
out
;
if
(
desc
->
pre_handler
)
desc
->
pre_handler
(
bp
,
desc
->
pre_handler_arg1
,
desc
->
pre_handler_arg2
);
/* Voo-doo programming. */
if
(
cpu_data
(
buddy
).
idle_volume
<
FORWARD_VOLUME
)
goto
out
;
action_mask
=
desc
->
action_active_mask
;
random
=
0
;
for
(
i
=
0
;
i
<
MAX_IRQ_DESC_ACTION
;
i
++
)
{
struct
irqaction
*
p
=
&
desc
->
action
[
i
];
u32
mask
=
(
1
<<
i
);
/* This just so happens to be correct on Cheetah
* at the moment.
*/
buddy
<<=
26
;
if
(
!
(
action_mask
&
mask
))
continue
;
/* Push it to our buddy. */
upa_writel
(
buddy
|
IMAP_VALID
,
bp
->
imap
);
action_mask
&=
~
mask
;
if
(
p
->
handler
(
__irq
(
bp
),
p
->
dev_id
,
regs
)
==
IRQ_HANDLED
)
random
|=
p
->
flags
;
if
(
!
action_mask
)
break
;
}
if
(
bp
->
pil
!=
0
)
{
upa_writel
(
ICLR_IDLE
,
bp
->
iclr
);
/* Test and add entropy */
if
(
random
&
SA_SAMPLE_RANDOM
)
add_interrupt_randomness
(
irq
);
}
out:
return
;
bp
->
flags
&=
~
IBF_INPROGRESS
;
}
#endif
void
handler_irq
(
int
irq
,
struct
pt_regs
*
regs
)
{
struct
ino_bucket
*
bp
,
*
nbp
;
struct
ino_bucket
*
bp
;
int
cpu
=
smp_processor_id
();
#ifndef CONFIG_SMP
...
...
@@ -757,8 +620,6 @@ void handler_irq(int irq, struct pt_regs *regs)
clear_softint
(
clr_mask
);
}
#else
int
should_forward
=
0
;
clear_softint
(
1
<<
irq
);
#endif
...
...
@@ -773,63 +634,12 @@ void handler_irq(int irq, struct pt_regs *regs)
#else
bp
=
__bucket
(
xchg32
(
irq_work
(
cpu
,
irq
),
0
));
#endif
for
(
;
bp
!=
NULL
;
bp
=
nbp
)
{
unsigned
char
flags
=
bp
->
flags
;
unsigned
char
random
=
0
;
while
(
bp
)
{
struct
ino_bucket
*
nbp
=
__bucket
(
bp
->
irq_chain
);
nbp
=
__bucket
(
bp
->
irq_chain
);
bp
->
irq_chain
=
0
;
bp
->
flags
|=
IBF_INPROGRESS
;
if
((
flags
&
IBF_ACTIVE
)
!=
0
)
{
#ifdef CONFIG_PCI
if
((
flags
&
IBF_DMA_SYNC
)
!=
0
)
{
upa_readl
(
dma_sync_reg_table
[
bp
->
synctab_ent
]);
upa_readq
(
pci_dma_wsync
);
}
#endif
if
((
flags
&
IBF_MULTI
)
==
0
)
{
struct
irqaction
*
ap
=
bp
->
irq_info
;
int
ret
;
ret
=
ap
->
handler
(
__irq
(
bp
),
ap
->
dev_id
,
regs
);
if
(
ret
==
IRQ_HANDLED
)
random
|=
ap
->
flags
;
}
else
{
void
**
vector
=
(
void
**
)
bp
->
irq_info
;
int
ent
;
for
(
ent
=
0
;
ent
<
4
;
ent
++
)
{
struct
irqaction
*
ap
=
vector
[
ent
];
if
(
ap
!=
NULL
)
{
int
ret
;
ret
=
ap
->
handler
(
__irq
(
bp
),
ap
->
dev_id
,
regs
);
if
(
ret
==
IRQ_HANDLED
)
random
|=
ap
->
flags
;
}
}
}
/* Only the dummy bucket lacks IMAP/ICLR. */
if
(
bp
->
pil
!=
0
)
{
#ifdef CONFIG_SMP
if
(
should_forward
)
{
redirect_intr
(
cpu
,
bp
);
should_forward
=
0
;
}
#endif
upa_writel
(
ICLR_IDLE
,
bp
->
iclr
);
/* Test and add entropy */
if
(
random
&
SA_SAMPLE_RANDOM
)
add_interrupt_randomness
(
irq
);
}
}
else
bp
->
pending
=
1
;
bp
->
flags
&=
~
IBF_INPROGRESS
;
process_bucket
(
irq
,
bp
,
regs
);
bp
=
nbp
;
}
irq_exit
();
}
...
...
@@ -959,7 +769,10 @@ static void distribute_irqs(void)
*/
for
(
level
=
1
;
level
<
NR_IRQS
;
level
++
)
{
struct
irqaction
*
p
=
irq_action
[
level
];
if
(
level
==
12
)
continue
;
if
(
level
==
12
)
continue
;
while
(
p
)
{
cpu
=
retarget_one_irq
(
p
,
cpu
);
p
=
p
->
next
;
...
...
arch/sparc64/kernel/pci_psycho.c
View file @
d06e7a56
...
...
@@ -1303,8 +1303,7 @@ static void psycho_controller_hwinit(struct pci_controller_info *p)
{
u64
tmp
;
/* PROM sets the IRQ retry value too low, increase it. */
psycho_write
(
p
->
pbm_A
.
controller_regs
+
PSYCHO_IRQ_RETRY
,
0xff
);
psycho_write
(
p
->
pbm_A
.
controller_regs
+
PSYCHO_IRQ_RETRY
,
5
);
/* Enable arbiter for all PCI slots. */
tmp
=
psycho_read
(
p
->
pbm_A
.
controller_regs
+
PSYCHO_PCIA_CTRL
);
...
...
arch/sparc64/kernel/pci_sabre.c
View file @
d06e7a56
...
...
@@ -595,6 +595,23 @@ static int __init sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
return
ret
;
}
/* When a device lives behind a bridge deeper in the PCI bus topology
* than APB, a special sequence must run to make sure all pending DMA
* transfers at the time of IRQ delivery are visible in the coherency
* domain by the cpu. This sequence is to perform a read on the far
* side of the non-APB bridge, then perform a read of Sabre's DMA
* write-sync register.
*/
static
void
sabre_wsync_handler
(
struct
ino_bucket
*
bucket
,
void
*
_arg1
,
void
*
_arg2
)
{
struct
pci_dev
*
pdev
=
_arg1
;
unsigned
long
sync_reg
=
(
unsigned
long
)
_arg2
;
u16
_unused
;
pci_read_config_word
(
pdev
,
PCI_VENDOR_ID
,
&
_unused
);
sabre_read
(
sync_reg
);
}
static
unsigned
int
__init
sabre_irq_build
(
struct
pci_pbm_info
*
pbm
,
struct
pci_dev
*
pdev
,
unsigned
int
ino
)
...
...
@@ -639,24 +656,14 @@ static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm,
if
(
pdev
)
{
struct
pcidev_cookie
*
pcp
=
pdev
->
sysdata
;
/* When a device lives behind a bridge deeper in the
* PCI bus topology than APB, a special sequence must
* run to make sure all pending DMA transfers at the
* time of IRQ delivery are visible in the coherency
* domain by the cpu. This sequence is to perform
* a read on the far side of the non-APB bridge, then
* perform a read of Sabre's DMA write-sync register.
*
* Currently, the PCI_CONFIG register for the device
* is used for this read from the far side of the bridge.
*/
if
(
pdev
->
bus
->
number
!=
pcp
->
pbm
->
pci_first_busno
)
{
bucket
->
flags
|=
IBF_DMA_SYNC
;
bucket
->
synctab_ent
=
dma_sync_reg_table_entry
++
;
dma_sync_reg_table
[
bucket
->
synctab_ent
]
=
(
unsigned
long
)
sabre_pci_config_mkaddr
(
pcp
->
pbm
,
pdev
->
bus
->
number
,
pdev
->
devfn
,
PCI_COMMAND
);
struct
pci_controller_info
*
p
=
pcp
->
pbm
->
parent
;
struct
irq_desc
*
d
=
bucket
->
irq_info
;
d
->
pre_handler
=
sabre_wsync_handler
;
d
->
pre_handler_arg1
=
pdev
;
d
->
pre_handler_arg2
=
(
void
*
)
p
->
pbm_A
.
controller_regs
+
SABRE_WRSYNC
;
}
}
return
__irq
(
bucket
);
...
...
@@ -1626,10 +1633,9 @@ void __init sabre_init(int pnode, char *model_name)
*/
p
->
pbm_A
.
controller_regs
=
pr_regs
[
0
].
phys_addr
;
p
->
pbm_B
.
controller_regs
=
pr_regs
[
0
].
phys_addr
;
pci_dma_wsync
=
p
->
pbm_A
.
controller_regs
+
SABRE_WRSYNC
;
printk
(
"PCI: Found SABRE, main regs at %016lx
, wsync at %016lx
\n
"
,
p
->
pbm_A
.
controller_regs
,
pci_dma_wsync
);
printk
(
"PCI: Found SABRE, main regs at %016lx
\n
"
,
p
->
pbm_A
.
controller_regs
);
/* Clear interrupts */
...
...
arch/sparc64/kernel/pci_schizo.c
View file @
d06e7a56
...
...
@@ -15,6 +15,7 @@
#include <asm/iommu.h>
#include <asm/irq.h>
#include <asm/upa.h>
#include <asm/pstate.h>
#include "pci_impl.h"
#include "iommu_common.h"
...
...
@@ -326,6 +327,44 @@ static int __init schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
return
ret
;
}
static
void
tomatillo_wsync_handler
(
struct
ino_bucket
*
bucket
,
void
*
_arg1
,
void
*
_arg2
)
{
unsigned
long
sync_reg
=
(
unsigned
long
)
_arg2
;
u64
mask
=
1
<<
(
__irq_ino
(
__irq
(
bucket
))
&
IMAP_INO
);
u64
val
;
int
limit
;
schizo_write
(
sync_reg
,
mask
);
limit
=
100000
;
val
=
0
;
while
(
--
limit
)
{
val
=
schizo_read
(
sync_reg
);
if
(
!
(
val
&
mask
))
break
;
}
if
(
limit
<=
0
)
{
printk
(
"tomatillo_wsync_handler: DMA won't sync [%lx:%lx]
\n
"
,
val
,
mask
);
}
if
(
_arg1
)
{
static
unsigned
char
cacheline
[
64
]
__attribute__
((
aligned
(
64
)));
__asm__
__volatile__
(
"rd %%fprs, %0
\n\t
"
"or %0, %4, %1
\n\t
"
"wr %1, 0x0, %%fprs
\n\t
"
"stda %%f0, [%5] %6
\n\t
"
"wr %0, 0x0, %%fprs
\n\t
"
"membar #Sync"
:
"=&r"
(
mask
),
"=&r"
(
val
)
:
"0"
(
mask
),
"1"
(
val
),
"i"
(
FPRS_FEF
),
"r"
(
&
cacheline
[
0
]),
"i"
(
ASI_BLK_COMMIT_P
));
}
}
static
unsigned
int
schizo_irq_build
(
struct
pci_pbm_info
*
pbm
,
struct
pci_dev
*
pdev
,
unsigned
int
ino
)
...
...
@@ -369,6 +408,15 @@ static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
bucket
=
__bucket
(
build_irq
(
pil
,
ign_fixup
,
iclr
,
imap
));
bucket
->
flags
|=
IBF_PCI
;
if
(
pdev
&&
pbm
->
chip_type
==
PBM_CHIP_TYPE_TOMATILLO
)
{
struct
irq_desc
*
p
=
bucket
->
irq_info
;
p
->
pre_handler
=
tomatillo_wsync_handler
;
p
->
pre_handler_arg1
=
((
pbm
->
chip_version
<=
4
)
?
(
void
*
)
1
:
(
void
*
)
0
);
p
->
pre_handler_arg2
=
(
void
*
)
pbm
->
sync_reg
;
}
return
__irq
(
bucket
);
}
...
...
@@ -885,6 +933,7 @@ static irqreturn_t schizo_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
#define SCHIZO_PCI_CTRL (0x2000UL)
#define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL)
/* Safari */
#define SCHIZO_PCICTRL_DTO_INT (1UL << 61UL)
/* Tomatillo */
#define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL)
/* Tomatillo */
#define SCHIZO_PCICTRL_ESLCK (1UL << 51UL)
/* Safari */
#define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL)
/* Safari */
...
...
@@ -1887,37 +1936,27 @@ static void __init schizo_pbm_hw_init(struct pci_pbm_info *pbm)
{
u64
tmp
;
/* Set IRQ retry to infinity. */
schizo_write
(
pbm
->
pbm_regs
+
SCHIZO_PCI_IRQ_RETRY
,
SCHIZO_IRQ_RETRY_INF
);
schizo_write
(
pbm
->
pbm_regs
+
SCHIZO_PCI_IRQ_RETRY
,
5
);
/* Enable arbiter for all PCI slots. Also, disable PCI interval
* timer so that DTO (Discard TimeOuts) are not reported because
* some Schizo revisions report them erroneously.
*/
tmp
=
schizo_read
(
pbm
->
pbm_regs
+
SCHIZO_PCI_CTRL
);
if
(
pbm
->
chip_type
==
PBM_CHIP_TYPE_SCHIZO_PLUS
&&
pbm
->
chip_version
==
0x5
&&
pbm
->
chip_revision
==
0x1
)
tmp
|=
0x0f
;
else
/* Enable arbiter for all PCI slots. */
tmp
|=
0xff
;
tmp
&=
~
SCHIZO_PCICTRL_PTO
;
if
(
pbm
->
chip_type
==
PBM_CHIP_TYPE_TOMATILLO
&&
pbm
->
chip_version
>=
0x2
)
tmp
|=
0x3UL
<<
SCHIZO_PCICTRL_PTO_SHIFT
;
else
tmp
|=
0x1UL
<<
SCHIZO_PCICTRL_PTO_SHIFT
;
if
(
!
prom_getbool
(
pbm
->
prom_node
,
"no-bus-parking"
))
tmp
|=
SCHIZO_PCICTRL_PARK
;
else
tmp
&=
~
SCHIZO_PCICTRL_PARK
;
if
(
pbm
->
chip_type
==
PBM_CHIP_TYPE_TOMATILLO
&&
pbm
->
chip_version
<=
0x1
)
tmp
|=
(
1UL
<<
61
)
;
tmp
|=
SCHIZO_PCICTRL_DTO_INT
;
else
tmp
&=
~
(
1UL
<<
61
)
;
tmp
&=
~
SCHIZO_PCICTRL_DTO_INT
;
if
(
pbm
->
chip_type
==
PBM_CHIP_TYPE_TOMATILLO
)
tmp
|=
(
SCHIZO_PCICTRL_MRM_PREF
|
...
...
@@ -2015,6 +2054,9 @@ static void __init schizo_pbm_init(struct pci_controller_info *p,
pbm
->
pbm_regs
=
pr_regs
[
0
].
phys_addr
;
pbm
->
controller_regs
=
pr_regs
[
1
].
phys_addr
-
0x10000UL
;
if
(
chip_type
==
PBM_CHIP_TYPE_TOMATILLO
)
pbm
->
sync_reg
=
pr_regs
[
3
].
phys_addr
+
0x1a18UL
;
sprintf
(
pbm
->
name
,
(
chip_type
==
PBM_CHIP_TYPE_TOMATILLO
?
"TOMATILLO%d PBM%c"
:
...
...
arch/sparc64/kernel/time.c
View file @
d06e7a56
...
...
@@ -973,7 +973,7 @@ static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_reg
int
err
;
/* Register IRQ handler. */
err
=
request_irq
(
build_irq
(
0
,
0
,
0UL
,
0UL
),
cfunc
,
SA_STATIC_ALLOC
,
err
=
request_irq
(
build_irq
(
0
,
0
,
0UL
,
0UL
),
cfunc
,
0
,
"timer"
,
NULL
);
if
(
err
)
{
...
...
drivers/sbus/char/bpp.c
View file @
d06e7a56
...
...
@@ -79,10 +79,6 @@ struct inst {
unsigned
char
run_length
;
unsigned
char
repeat_byte
;
/* These members manage timeouts for programmed delays */
wait_queue_head_t
wait_queue
;
struct
timer_list
timer_list
;
};
static
struct
inst
instances
[
BPP_NO
];
...
...
@@ -297,16 +293,10 @@ static unsigned short get_pins(unsigned minor)
#endif
/* __sparc__ */
static
void
bpp_wake_up
(
unsigned
long
val
)
{
wake_up
(
&
instances
[
val
].
wait_queue
);
}
static
void
snooze
(
unsigned
long
snooze_time
,
unsigned
minor
)
{
init_timer
(
&
instances
[
minor
].
timer_list
);
instances
[
minor
].
timer_list
.
expires
=
jiffies
+
snooze_time
+
1
;
instances
[
minor
].
timer_list
.
data
=
minor
;
add_timer
(
&
instances
[
minor
].
timer_list
);
sleep_on
(
&
instances
[
minor
].
wait_queue
);
set_current_state
(
TASK_UNINTERRUPTIBLE
);
schedule_timeout
(
snooze_time
+
1
);
}
static
int
wait_for
(
unsigned
short
set
,
unsigned
short
clr
,
...
...
@@ -880,11 +870,8 @@ static void probeLptPort(unsigned idx)
instances
[
idx
].
enhanced
=
0
;
instances
[
idx
].
direction
=
0
;
instances
[
idx
].
mode
=
COMPATIBILITY
;
instances
[
idx
].
wait_queue
=
0
;
instances
[
idx
].
run_length
=
0
;
instances
[
idx
].
run_flag
=
0
;
init_timer
(
&
instances
[
idx
].
timer_list
);
instances
[
idx
].
timer_list
.
function
=
bpp_wake_up
;
if
(
!
request_region
(
lpAddr
,
3
,
dev_name
))
return
;
/*
...
...
@@ -977,11 +964,8 @@ static void probeLptPort(unsigned idx)
instances
[
idx
].
enhanced
=
0
;
instances
[
idx
].
direction
=
0
;
instances
[
idx
].
mode
=
COMPATIBILITY
;
init_waitqueue_head
(
&
instances
[
idx
].
wait_queue
);
instances
[
idx
].
run_length
=
0
;
instances
[
idx
].
run_flag
=
0
;
init_timer
(
&
instances
[
idx
].
timer_list
);
instances
[
idx
].
timer_list
.
function
=
bpp_wake_up
;
if
(
!
rp
)
return
;
...
...
include/asm-sparc64/irq.h
View file @
d06e7a56
...
...
@@ -16,6 +16,18 @@
#include <asm/pil.h>
#include <asm/ptrace.h>
struct
ino_bucket
;
#define MAX_IRQ_DESC_ACTION 4
struct
irq_desc
{
void
(
*
pre_handler
)(
struct
ino_bucket
*
,
void
*
,
void
*
);
void
*
pre_handler_arg1
;
void
*
pre_handler_arg2
;
u32
action_active_mask
;
struct
irqaction
action
[
MAX_IRQ_DESC_ACTION
];
};
/* You should not mess with this directly. That's the job of irq.c.
*
* If you make changes here, please update hand coded assembler of
...
...
@@ -42,24 +54,11 @@ struct ino_bucket {
/* Miscellaneous flags. */
/*0x06*/
unsigned
char
flags
;
/* This is used to deal with IBF_DMA_SYNC on
* Sabre systems.
*/
/*0x07*/
unsigned
char
synctab_ent
;
/* Currently unused. */
/*0x07*/
unsigned
char
__pad
;
/* Reference to handler for this IRQ. If this is
* non-NULL this means it is active and should be
* serviced. Else the pending member is set to one
* and later registry of the interrupt checks for
* this condition.
*
* Normally this is just an irq_action structure.
* But, on PCI, if multiple interrupt sources behind
* a bridge have multiple interrupt sources that share
* the same INO bucket, this points to an array of
* pointers to four IRQ action structures.
*/
/*0x08*/
void
*
irq_info
;
/* Reference to IRQ descriptor for this bucket. */
/*0x08*/
struct
irq_desc
*
irq_info
;
/* Sun5 Interrupt Clear Register. */
/*0x10*/
unsigned
long
iclr
;
...
...
@@ -69,12 +68,6 @@ struct ino_bucket {
};
#ifdef CONFIG_PCI
extern
unsigned
long
pci_dma_wsync
;
extern
unsigned
long
dma_sync_reg_table
[
256
];
extern
unsigned
char
dma_sync_reg_table_entry
;
#endif
/* IMAP/ICLR register defines */
#define IMAP_VALID 0x80000000
/* IRQ Enabled */
#define IMAP_TID_UPA 0x7c000000
/* UPA TargetID */
...
...
@@ -90,10 +83,8 @@ extern unsigned char dma_sync_reg_table_entry;
#define ICLR_PENDING 0x00000003
/* Pending state */
/* Only 8-bits are available, be careful. -DaveM */
#define IBF_DMA_SYNC 0x01
/* DMA synchronization behind PCI bridge needed. */
#define IBF_PCI 0x02
/* Indicates PSYCHO/SABRE/SCHIZO PCI interrupt. */
#define IBF_ACTIVE 0x04
/* This interrupt is active and has a handler. */
#define IBF_MULTI 0x08
/* On PCI, indicates shared bucket. */
#define IBF_PCI 0x02
/* PSYCHO/SABRE/SCHIZO PCI interrupt. */
#define IBF_ACTIVE 0x04
/* Interrupt is active and has a handler.*/
#define IBF_INPROGRESS 0x10
/* IRQ is being serviced. */
#define NUM_IVECS (IMAP_INR + 1)
...
...
include/asm-sparc64/pbm.h
View file @
d06e7a56
...
...
@@ -145,6 +145,9 @@ struct pci_pbm_info {
/* Physical address base of PBM registers. */
unsigned
long
pbm_regs
;
/* Physical address of DMA sync register, if any. */
unsigned
long
sync_reg
;
/* Opaque 32-bit system bus Port ID. */
u32
portid
;
...
...
include/asm-sparc64/signal.h
View file @
d06e7a56
...
...
@@ -162,21 +162,6 @@ struct sigstack {
#define MINSIGSTKSZ 4096
#define SIGSTKSZ 16384
#ifdef __KERNEL__
/*
* DJHR
* SA_STATIC_ALLOC is used for the SPARC system to indicate that this
* interrupt handler's irq structure should be statically allocated
* by the request_irq routine.
* The alternative is that arch/sparc/kernel/irq.c has carnal knowledge
* of interrupt usage and that sucks. Also without a flag like this
* it may be possible for the free_irq routine to attempt to free
* statically allocated data.. which is NOT GOOD.
*
*/
#define SA_STATIC_ALLOC 0x80
#endif
#include <asm-generic/signal.h>
struct
__new_sigaction
{
...
...
include/linux/compat_ioctl.h
View file @
d06e7a56
...
...
@@ -346,10 +346,27 @@ COMPATIBLE_IOCTL(PPPOEIOCDFWD)
/* LP */
COMPATIBLE_IOCTL
(
LPGETSTATUS
)
/* ppdev */
COMPATIBLE_IOCTL
(
PPSETMODE
)
COMPATIBLE_IOCTL
(
PPRSTATUS
)
COMPATIBLE_IOCTL
(
PPRCONTROL
)
COMPATIBLE_IOCTL
(
PPWCONTROL
)
COMPATIBLE_IOCTL
(
PPFCONTROL
)
COMPATIBLE_IOCTL
(
PPRDATA
)
COMPATIBLE_IOCTL
(
PPWDATA
)
COMPATIBLE_IOCTL
(
PPCLAIM
)
COMPATIBLE_IOCTL
(
PPRELEASE
)
COMPATIBLE_IOCTL
(
PPEXCL
)
COMPATIBLE_IOCTL
(
PPYIELD
)
COMPATIBLE_IOCTL
(
PPEXCL
)
COMPATIBLE_IOCTL
(
PPDATADIR
)
COMPATIBLE_IOCTL
(
PPNEGOT
)
COMPATIBLE_IOCTL
(
PPWCTLONIRQ
)
COMPATIBLE_IOCTL
(
PPCLRIRQ
)
COMPATIBLE_IOCTL
(
PPSETPHASE
)
COMPATIBLE_IOCTL
(
PPGETMODES
)
COMPATIBLE_IOCTL
(
PPGETMODE
)
COMPATIBLE_IOCTL
(
PPGETPHASE
)
COMPATIBLE_IOCTL
(
PPGETFLAGS
)
COMPATIBLE_IOCTL
(
PPSETFLAGS
)
/* CDROM stuff */
COMPATIBLE_IOCTL
(
CDROMPAUSE
)
COMPATIBLE_IOCTL
(
CDROMRESUME
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment