Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
e74b3f7d
Commit
e74b3f7d
authored
Dec 25, 2008
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'for-david' of
git://git.kernel.org/pub/scm/linux/kernel/git/chris/linux-2.6
parents
13e620e0
59f8500e
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
1585 additions
and
184 deletions
+1585
-184
MAINTAINERS
MAINTAINERS
+6
-1
arch/arm/mach-ixp4xx/include/mach/qmgr.h
arch/arm/mach-ixp4xx/include/mach/qmgr.h
+33
-2
arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
+33
-11
drivers/net/arm/Kconfig
drivers/net/arm/Kconfig
+1
-1
drivers/net/arm/ixp4xx_eth.c
drivers/net/arm/ixp4xx_eth.c
+173
-164
drivers/net/wan/Kconfig
drivers/net/wan/Kconfig
+7
-0
drivers/net/wan/Makefile
drivers/net/wan/Makefile
+1
-0
drivers/net/wan/hdlc_ppp.c
drivers/net/wan/hdlc_ppp.c
+6
-5
drivers/net/wan/ixp4xx_hss.c
drivers/net/wan/ixp4xx_hss.c
+1325
-0
No files found.
MAINTAINERS
View file @
e74b3f7d
...
...
@@ -1844,7 +1844,7 @@ P: Haavard Skinnemoen
M: hskinnemoen@atmel.com
S: Supported
GENERIC HDLC
DRIVER, N2, C101, PCI200SYN and WANXL
DRIVERS
GENERIC HDLC
(WAN)
DRIVERS
P: Krzysztof Halasa
M: khc@pm.waw.pl
W: http://www.kernel.org/pub/linux/utils/net/hdlc/
...
...
@@ -2243,6 +2243,11 @@ M: dan.j.williams@intel.com
L: linux-kernel@vger.kernel.org
S: Supported
INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
P: Krzysztof Halasa
M: khc@pm.waw.pl
S: Maintained
INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT
P: Deepak Saxena
M: dsaxena@plexity.net
...
...
arch/arm/mach-ixp4xx/include/mach/qmgr.h
View file @
e74b3f7d
...
...
@@ -12,6 +12,8 @@
#include <linux/io.h>
#include <linux/kernel.h>
#define DEBUG_QMGR 0
#define HALF_QUEUES 32
#define QUEUES 64
/* only 32 lower queues currently supported */
#define MAX_QUEUE_LENGTH 4
/* in dwords */
...
...
@@ -61,22 +63,51 @@ void qmgr_enable_irq(unsigned int queue);
void
qmgr_disable_irq
(
unsigned
int
queue
);
/* request_ and release_queue() must be called from non-IRQ context */
#if DEBUG_QMGR
extern
char
qmgr_queue_descs
[
QUEUES
][
32
];
int
qmgr_request_queue
(
unsigned
int
queue
,
unsigned
int
len
/* dwords */
,
unsigned
int
nearly_empty_watermark
,
unsigned
int
nearly_full_watermark
);
unsigned
int
nearly_full_watermark
,
const
char
*
desc_format
,
const
char
*
name
);
#else
int
__qmgr_request_queue
(
unsigned
int
queue
,
unsigned
int
len
/* dwords */
,
unsigned
int
nearly_empty_watermark
,
unsigned
int
nearly_full_watermark
);
#define qmgr_request_queue(queue, len, nearly_empty_watermark, \
nearly_full_watermark, desc_format, name) \
__qmgr_request_queue(queue, len, nearly_empty_watermark, \
nearly_full_watermark)
#endif
void
qmgr_release_queue
(
unsigned
int
queue
);
static
inline
void
qmgr_put_entry
(
unsigned
int
queue
,
u32
val
)
{
extern
struct
qmgr_regs
__iomem
*
qmgr_regs
;
#if DEBUG_QMGR
BUG_ON
(
!
qmgr_queue_descs
[
queue
]);
/* not yet requested */
printk
(
KERN_DEBUG
"Queue %s(%i) put %X
\n
"
,
qmgr_queue_descs
[
queue
],
queue
,
val
);
#endif
__raw_writel
(
val
,
&
qmgr_regs
->
acc
[
queue
][
0
]);
}
static
inline
u32
qmgr_get_entry
(
unsigned
int
queue
)
{
u32
val
;
extern
struct
qmgr_regs
__iomem
*
qmgr_regs
;
return
__raw_readl
(
&
qmgr_regs
->
acc
[
queue
][
0
]);
val
=
__raw_readl
(
&
qmgr_regs
->
acc
[
queue
][
0
]);
#if DEBUG_QMGR
BUG_ON
(
!
qmgr_queue_descs
[
queue
]);
/* not yet requested */
printk
(
KERN_DEBUG
"Queue %s(%i) get %X
\n
"
,
qmgr_queue_descs
[
queue
],
queue
,
val
);
#endif
return
val
;
}
static
inline
int
qmgr_get_stat1
(
unsigned
int
queue
)
...
...
arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
View file @
e74b3f7d
...
...
@@ -14,8 +14,6 @@
#include <linux/module.h>
#include <mach/qmgr.h>
#define DEBUG 0
struct
qmgr_regs
__iomem
*
qmgr_regs
;
static
struct
resource
*
mem_res
;
static
spinlock_t
qmgr_lock
;
...
...
@@ -23,6 +21,10 @@ static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
static
void
(
*
irq_handlers
[
HALF_QUEUES
])(
void
*
pdev
);
static
void
*
irq_pdevs
[
HALF_QUEUES
];
#if DEBUG_QMGR
char
qmgr_queue_descs
[
QUEUES
][
32
];
#endif
void
qmgr_set_irq
(
unsigned
int
queue
,
int
src
,
void
(
*
handler
)(
void
*
pdev
),
void
*
pdev
)
{
...
...
@@ -70,6 +72,7 @@ void qmgr_disable_irq(unsigned int queue)
spin_lock_irqsave
(
&
qmgr_lock
,
flags
);
__raw_writel
(
__raw_readl
(
&
qmgr_regs
->
irqen
[
0
])
&
~
(
1
<<
queue
),
&
qmgr_regs
->
irqen
[
0
]);
__raw_writel
(
1
<<
queue
,
&
qmgr_regs
->
irqstat
[
0
]);
/* clear */
spin_unlock_irqrestore
(
&
qmgr_lock
,
flags
);
}
...
...
@@ -81,9 +84,16 @@ static inline void shift_mask(u32 *mask)
mask
[
0
]
<<=
1
;
}
#if DEBUG_QMGR
int
qmgr_request_queue
(
unsigned
int
queue
,
unsigned
int
len
/* dwords */
,
unsigned
int
nearly_empty_watermark
,
unsigned
int
nearly_full_watermark
)
unsigned
int
nearly_full_watermark
,
const
char
*
desc_format
,
const
char
*
name
)
#else
int
__qmgr_request_queue
(
unsigned
int
queue
,
unsigned
int
len
/* dwords */
,
unsigned
int
nearly_empty_watermark
,
unsigned
int
nearly_full_watermark
)
#endif
{
u32
cfg
,
addr
=
0
,
mask
[
4
];
/* in 16-dwords */
int
err
;
...
...
@@ -151,12 +161,13 @@ int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
used_sram_bitmap
[
2
]
|=
mask
[
2
];
used_sram_bitmap
[
3
]
|=
mask
[
3
];
__raw_writel
(
cfg
|
(
addr
<<
14
),
&
qmgr_regs
->
sram
[
queue
]);
spin_unlock_irq
(
&
qmgr_lock
);
#if DEBUG
printk
(
KERN_DEBUG
"qmgr: requested queue %
i,
addr = 0x%02X
\n
"
,
queue
,
addr
);
#if DEBUG_QMGR
snprintf
(
qmgr_queue_descs
[
queue
],
sizeof
(
qmgr_queue_descs
[
0
]),
desc_format
,
name
);
printk
(
KERN_DEBUG
"qmgr: requested queue %
s(%i)
addr = 0x%02X
\n
"
,
q
mgr_queue_descs
[
queue
],
q
ueue
,
addr
);
#endif
spin_unlock_irq
(
&
qmgr_lock
);
return
0
;
err:
...
...
@@ -189,6 +200,11 @@ void qmgr_release_queue(unsigned int queue)
while
(
addr
--
)
shift_mask
(
mask
);
#if DEBUG_QMGR
printk
(
KERN_DEBUG
"qmgr: releasing queue %s(%i)
\n
"
,
qmgr_queue_descs
[
queue
],
queue
);
qmgr_queue_descs
[
queue
][
0
]
=
'\x0'
;
#endif
__raw_writel
(
0
,
&
qmgr_regs
->
sram
[
queue
]);
used_sram_bitmap
[
0
]
&=
~
mask
[
0
];
...
...
@@ -199,9 +215,10 @@ void qmgr_release_queue(unsigned int queue)
spin_unlock_irq
(
&
qmgr_lock
);
module_put
(
THIS_MODULE
);
#if DEBUG
printk
(
KERN_DEBUG
"qmgr: released queue %i
\n
"
,
queue
);
#endif
while
((
addr
=
qmgr_get_entry
(
queue
)))
printk
(
KERN_ERR
"qmgr: released queue %i not empty: 0x%08X
\n
"
,
queue
,
addr
);
}
static
int
qmgr_init
(
void
)
...
...
@@ -272,5 +289,10 @@ EXPORT_SYMBOL(qmgr_regs);
EXPORT_SYMBOL
(
qmgr_set_irq
);
EXPORT_SYMBOL
(
qmgr_enable_irq
);
EXPORT_SYMBOL
(
qmgr_disable_irq
);
#if DEBUG_QMGR
EXPORT_SYMBOL
(
qmgr_queue_descs
);
EXPORT_SYMBOL
(
qmgr_request_queue
);
#else
EXPORT_SYMBOL
(
__qmgr_request_queue
);
#endif
EXPORT_SYMBOL
(
qmgr_release_queue
);
drivers/net/arm/Kconfig
View file @
e74b3f7d
...
...
@@ -59,7 +59,7 @@ config EP93XX_ETH
config IXP4XX_ETH
tristate "Intel IXP4xx Ethernet support"
depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
select
MII
select
PHYLIB
help
Say Y here if you want to use built-in Ethernet ports
on IXP4xx processor.
drivers/net/arm/ixp4xx_eth.c
View file @
e74b3f7d
...
...
@@ -30,12 +30,11 @@
#include <linux/etherdevice.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/
mii
.h>
#include <linux/
phy
.h>
#include <linux/platform_device.h>
#include <mach/npe.h>
#include <mach/qmgr.h>
#define DEBUG_QUEUES 0
#define DEBUG_DESC 0
#define DEBUG_RX 0
#define DEBUG_TX 0
...
...
@@ -59,7 +58,6 @@
#define NAPI_WEIGHT 16
#define MDIO_INTERVAL (3 * HZ)
#define MAX_MDIO_RETRIES 100
/* microseconds, typically 30 cycles */
#define MAX_MII_RESET_RETRIES 100
/* mdio_read() cycles, typically 4 */
#define MAX_CLOSE_WAIT 1000
/* microseconds, typically 2-3 cycles */
#define NPE_ID(port_id) ((port_id) >> 4)
...
...
@@ -164,15 +162,14 @@ struct port {
struct
npe
*
npe
;
struct
net_device
*
netdev
;
struct
napi_struct
napi
;
struct
net_device_stats
stat
;
struct
mii_if_info
mii
;
struct
delayed_work
mdio_thread
;
struct
phy_device
*
phydev
;
struct
eth_plat_info
*
plat
;
buffer_t
*
rx_buff_tab
[
RX_DESCS
],
*
tx_buff_tab
[
TX_DESCS
];
struct
desc
*
desc_tab
;
/* coherent */
u32
desc_tab_phys
;
int
id
;
/* logical port ID */
u16
mii_bmcr
;
int
speed
,
duplex
;
u8
firmware
[
4
];
};
/* NPE message structure */
...
...
@@ -243,19 +240,20 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
static
spinlock_t
mdio_lock
;
static
struct
eth_regs
__iomem
*
mdio_regs
;
/* mdio command and status only */
struct
mii_bus
*
mdio_bus
;
static
int
ports_open
;
static
struct
port
*
npe_port_tab
[
MAX_NPES
];
static
struct
dma_pool
*
dma_pool
;
static
u16
mdio_cmd
(
struct
net_device
*
dev
,
int
phy_id
,
int
location
,
int
write
,
u16
cmd
)
static
int
ixp4xx_mdio_cmd
(
struct
mii_bus
*
bus
,
int
phy_id
,
int
location
,
int
write
,
u16
cmd
)
{
int
cycles
=
0
;
if
(
__raw_readl
(
&
mdio_regs
->
mdio_command
[
3
])
&
0x80
)
{
printk
(
KERN_ERR
"%s: MII not ready to transmit
\n
"
,
dev
->
name
);
return
0
;
printk
(
KERN_ERR
"%s: MII not ready to transmit
\n
"
,
bus
->
name
);
return
-
1
;
}
if
(
write
)
{
...
...
@@ -274,107 +272,119 @@ static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
}
if
(
cycles
==
MAX_MDIO_RETRIES
)
{
printk
(
KERN_ERR
"%s: MII write failed
\n
"
,
dev
->
name
);
return
0
;
printk
(
KERN_ERR
"%s #%i: MII write failed
\n
"
,
bus
->
name
,
phy_id
);
return
-
1
;
}
#if DEBUG_MDIO
printk
(
KERN_DEBUG
"%s
: mdio_cmd() took %i cycles
\n
"
,
dev
->
name
,
cycles
);
printk
(
KERN_DEBUG
"%s
#%i: mdio_%s() took %i cycles
\n
"
,
bus
->
name
,
phy_id
,
write
?
"write"
:
"read"
,
cycles
);
#endif
if
(
write
)
return
0
;
if
(
__raw_readl
(
&
mdio_regs
->
mdio_status
[
3
])
&
0x80
)
{
printk
(
KERN_ERR
"%s: MII read failed
\n
"
,
dev
->
name
);
return
0
;
#if DEBUG_MDIO
printk
(
KERN_DEBUG
"%s #%i: MII read failed
\n
"
,
bus
->
name
,
phy_id
);
#endif
return
0xFFFF
;
/* don't return error */
}
return
(
__raw_readl
(
&
mdio_regs
->
mdio_status
[
0
])
&
0xFF
)
|
(
__raw_readl
(
&
mdio_regs
->
mdio_status
[
1
]
)
<<
8
);
(
(
__raw_readl
(
&
mdio_regs
->
mdio_status
[
1
])
&
0xFF
)
<<
8
);
}
static
int
mdio_read
(
struct
net_device
*
dev
,
int
phy_id
,
int
location
)
static
int
ixp4xx_mdio_read
(
struct
mii_bus
*
bus
,
int
phy_id
,
int
location
)
{
unsigned
long
flags
;
u16
val
;
int
ret
;
spin_lock_irqsave
(
&
mdio_lock
,
flags
);
val
=
mdio_cmd
(
dev
,
phy_id
,
location
,
0
,
0
);
ret
=
ixp4xx_mdio_cmd
(
bus
,
phy_id
,
location
,
0
,
0
);
spin_unlock_irqrestore
(
&
mdio_lock
,
flags
);
return
val
;
#if DEBUG_MDIO
printk
(
KERN_DEBUG
"%s #%i: MII read [%i] -> 0x%X
\n
"
,
bus
->
name
,
phy_id
,
location
,
ret
);
#endif
return
ret
;
}
static
void
mdio_write
(
struct
net_device
*
dev
,
int
phy_id
,
int
location
,
int
val
)
static
int
ixp4xx_mdio_write
(
struct
mii_bus
*
bus
,
int
phy_id
,
int
location
,
u16
val
)
{
unsigned
long
flags
;
int
ret
;
spin_lock_irqsave
(
&
mdio_lock
,
flags
);
mdio_cmd
(
dev
,
phy_id
,
location
,
1
,
val
);
ret
=
ixp4xx_mdio_cmd
(
bus
,
phy_id
,
location
,
1
,
val
);
spin_unlock_irqrestore
(
&
mdio_lock
,
flags
);
#if DEBUG_MDIO
printk
(
KERN_DEBUG
"%s #%i: MII read [%i] <- 0x%X, err = %i
\n
"
,
bus
->
name
,
phy_id
,
location
,
val
,
ret
);
#endif
return
ret
;
}
static
void
phy_reset
(
struct
net_device
*
dev
,
int
phy_
id
)
static
int
ixp4xx_mdio_register
(
vo
id
)
{
struct
port
*
port
=
netdev_priv
(
dev
);
int
cycles
=
0
;
int
err
;
mdio_write
(
dev
,
phy_id
,
MII_BMCR
,
port
->
mii_bmcr
|
BMCR_RESET
);
if
(
!
(
mdio_bus
=
mdiobus_alloc
()))
return
-
ENOMEM
;
while
(
cycles
<
MAX_MII_RESET_RETRIES
)
{
if
(
!
(
mdio_read
(
dev
,
phy_id
,
MII_BMCR
)
&
BMCR_RESET
))
{
#if DEBUG_MDIO
printk
(
KERN_DEBUG
"%s: phy_reset() took %i cycles
\n
"
,
dev
->
name
,
cycles
);
#endif
return
;
}
udelay
(
1
);
cycles
++
;
}
/* All MII PHY accesses use NPE-B Ethernet registers */
spin_lock_init
(
&
mdio_lock
);
mdio_regs
=
(
struct
eth_regs
__iomem
*
)
IXP4XX_EthB_BASE_VIRT
;
__raw_writel
(
DEFAULT_CORE_CNTRL
,
&
mdio_regs
->
core_control
);
mdio_bus
->
name
=
"IXP4xx MII Bus"
;
mdio_bus
->
read
=
&
ixp4xx_mdio_read
;
mdio_bus
->
write
=
&
ixp4xx_mdio_write
;
strcpy
(
mdio_bus
->
id
,
"0"
);
printk
(
KERN_ERR
"%s: MII reset failed
\n
"
,
dev
->
name
);
if
((
err
=
mdiobus_register
(
mdio_bus
)))
mdiobus_free
(
mdio_bus
);
return
err
;
}
static
void
eth_set_duplex
(
struct
port
*
port
)
static
void
ixp4xx_mdio_remove
(
void
)
{
if
(
port
->
mii
.
full_duplex
)
__raw_writel
(
DEFAULT_TX_CNTRL0
&
~
TX_CNTRL0_HALFDUPLEX
,
&
port
->
regs
->
tx_control
[
0
]);
else
__raw_writel
(
DEFAULT_TX_CNTRL0
|
TX_CNTRL0_HALFDUPLEX
,
&
port
->
regs
->
tx_control
[
0
]);
mdiobus_unregister
(
mdio_bus
);
mdiobus_free
(
mdio_bus
);
}
static
void
phy_check_media
(
struct
port
*
port
,
int
init
)
static
void
ixp4xx_adjust_link
(
struct
net_device
*
dev
)
{
if
(
mii_check_media
(
&
port
->
mii
,
1
,
init
))
eth_set_duplex
(
port
);
if
(
port
->
mii
.
force_media
)
{
/* mii_check_media() doesn't work */
struct
net_device
*
dev
=
port
->
netdev
;
int
cur_link
=
mii_link_ok
(
&
port
->
mii
);
int
prev_link
=
netif_carrier_ok
(
dev
);
if
(
!
prev_link
&&
cur_link
)
{
printk
(
KERN_INFO
"%s: link up
\n
"
,
dev
->
name
);
netif_carrier_on
(
dev
);
}
else
if
(
prev_link
&&
!
cur_link
)
{
struct
port
*
port
=
netdev_priv
(
dev
);
struct
phy_device
*
phydev
=
port
->
phydev
;
if
(
!
phydev
->
link
)
{
if
(
port
->
speed
)
{
port
->
speed
=
0
;
printk
(
KERN_INFO
"%s: link down
\n
"
,
dev
->
name
);
netif_carrier_off
(
dev
);
}
return
;
}
}
if
(
port
->
speed
==
phydev
->
speed
&&
port
->
duplex
==
phydev
->
duplex
)
return
;
static
void
mdio_thread
(
struct
work_struct
*
work
)
{
struct
port
*
port
=
container_of
(
work
,
struct
port
,
mdio_thread
.
work
);
port
->
speed
=
phydev
->
speed
;
port
->
duplex
=
phydev
->
duplex
;
if
(
port
->
duplex
)
__raw_writel
(
DEFAULT_TX_CNTRL0
&
~
TX_CNTRL0_HALFDUPLEX
,
&
port
->
regs
->
tx_control
[
0
]);
else
__raw_writel
(
DEFAULT_TX_CNTRL0
|
TX_CNTRL0_HALFDUPLEX
,
&
port
->
regs
->
tx_control
[
0
]);
p
hy_check_media
(
port
,
0
);
schedule_delayed_work
(
&
port
->
mdio_thread
,
MDIO_INTERVAL
);
p
rintk
(
KERN_INFO
"%s: link up, speed %u Mb/s, %s duplex
\n
"
,
dev
->
name
,
port
->
speed
,
port
->
duplex
?
"full"
:
"half"
);
}
...
...
@@ -412,47 +422,13 @@ static inline void debug_desc(u32 phys, struct desc *desc)
#endif
}
static
inline
void
debug_queue
(
unsigned
int
queue
,
int
is_get
,
u32
phys
)
{
#if DEBUG_QUEUES
static
struct
{
int
queue
;
char
*
name
;
}
names
[]
=
{
{
TX_QUEUE
(
0x10
),
"TX#0 "
},
{
TX_QUEUE
(
0x20
),
"TX#1 "
},
{
TX_QUEUE
(
0x00
),
"TX#2 "
},
{
RXFREE_QUEUE
(
0x10
),
"RX-free#0 "
},
{
RXFREE_QUEUE
(
0x20
),
"RX-free#1 "
},
{
RXFREE_QUEUE
(
0x00
),
"RX-free#2 "
},
{
TXDONE_QUEUE
,
"TX-done "
},
};
int
i
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
names
);
i
++
)
if
(
names
[
i
].
queue
==
queue
)
break
;
printk
(
KERN_DEBUG
"Queue %i %s%s %X
\n
"
,
queue
,
i
<
ARRAY_SIZE
(
names
)
?
names
[
i
].
name
:
""
,
is_get
?
"->"
:
"<-"
,
phys
);
#endif
}
static
inline
u32
queue_get_entry
(
unsigned
int
queue
)
{
u32
phys
=
qmgr_get_entry
(
queue
);
debug_queue
(
queue
,
1
,
phys
);
return
phys
;
}
static
inline
int
queue_get_desc
(
unsigned
int
queue
,
struct
port
*
port
,
int
is_tx
)
{
u32
phys
,
tab_phys
,
n_desc
;
struct
desc
*
tab
;
if
(
!
(
phys
=
q
ueue
_get_entry
(
queue
)))
if
(
!
(
phys
=
q
mgr
_get_entry
(
queue
)))
return
-
1
;
phys
&=
~
0x1F
;
/* mask out non-address bits */
...
...
@@ -468,7 +444,6 @@ static inline int queue_get_desc(unsigned int queue, struct port *port,
static
inline
void
queue_put_desc
(
unsigned
int
queue
,
u32
phys
,
struct
desc
*
desc
)
{
debug_queue
(
queue
,
0
,
phys
);
debug_desc
(
phys
,
desc
);
BUG_ON
(
phys
&
0x1F
);
qmgr_put_entry
(
queue
,
phys
);
...
...
@@ -562,7 +537,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
#endif
if
(
!
skb
)
{
port
->
stat
.
rx_dropped
++
;
dev
->
stats
.
rx_dropped
++
;
/* put the desc back on RX-ready queue */
desc
->
buf_len
=
MAX_MRU
;
desc
->
pkt_len
=
0
;
...
...
@@ -588,8 +563,8 @@ static int eth_poll(struct napi_struct *napi, int budget)
debug_pkt
(
dev
,
"eth_poll"
,
skb
->
data
,
skb
->
len
);
skb
->
protocol
=
eth_type_trans
(
skb
,
dev
);
port
->
stat
.
rx_packets
++
;
port
->
stat
.
rx_bytes
+=
skb
->
len
;
dev
->
stats
.
rx_packets
++
;
dev
->
stats
.
rx_bytes
+=
skb
->
len
;
netif_receive_skb
(
skb
);
/* put the new buffer on RX-free queue */
...
...
@@ -617,7 +592,7 @@ static void eth_txdone_irq(void *unused)
#if DEBUG_TX
printk
(
KERN_DEBUG
DRV_NAME
": eth_txdone_irq
\n
"
);
#endif
while
((
phys
=
q
ueue
_get_entry
(
TXDONE_QUEUE
))
!=
0
)
{
while
((
phys
=
q
mgr
_get_entry
(
TXDONE_QUEUE
))
!=
0
)
{
u32
npe_id
,
n_desc
;
struct
port
*
port
;
struct
desc
*
desc
;
...
...
@@ -634,8 +609,8 @@ static void eth_txdone_irq(void *unused)
debug_desc
(
phys
,
desc
);
if
(
port
->
tx_buff_tab
[
n_desc
])
{
/* not the draining packet */
port
->
stat
.
tx_packets
++
;
port
->
stat
.
tx_bytes
+=
desc
->
pkt_len
;
port
->
netdev
->
stats
.
tx_packets
++
;
port
->
netdev
->
stats
.
tx_bytes
+=
desc
->
pkt_len
;
dma_unmap_tx
(
port
,
desc
);
#if DEBUG_TX
...
...
@@ -673,7 +648,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
if
(
unlikely
(
skb
->
len
>
MAX_MRU
))
{
dev_kfree_skb
(
skb
);
port
->
stat
.
tx_errors
++
;
dev
->
stats
.
tx_errors
++
;
return
NETDEV_TX_OK
;
}
...
...
@@ -689,7 +664,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
bytes
=
ALIGN
(
offset
+
len
,
4
);
if
(
!
(
mem
=
kmalloc
(
bytes
,
GFP_ATOMIC
)))
{
dev_kfree_skb
(
skb
);
port
->
stat
.
tx_dropped
++
;
dev
->
stats
.
tx_dropped
++
;
return
NETDEV_TX_OK
;
}
memcpy_swab32
(
mem
,
(
u32
*
)((
int
)
skb
->
data
&
~
3
),
bytes
/
4
);
...
...
@@ -703,7 +678,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
#else
kfree
(
mem
);
#endif
port
->
stat
.
tx_dropped
++
;
dev
->
stats
.
tx_dropped
++
;
return
NETDEV_TX_OK
;
}
...
...
@@ -746,12 +721,6 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
}
static
struct
net_device_stats
*
eth_stats
(
struct
net_device
*
dev
)
{
struct
port
*
port
=
netdev_priv
(
dev
);
return
&
port
->
stat
;
}
static
void
eth_set_mcast_list
(
struct
net_device
*
dev
)
{
struct
port
*
port
=
netdev_priv
(
dev
);
...
...
@@ -785,41 +754,80 @@ static void eth_set_mcast_list(struct net_device *dev)
static
int
eth_ioctl
(
struct
net_device
*
dev
,
struct
ifreq
*
req
,
int
cmd
)
{
struct
port
*
port
=
netdev_priv
(
dev
);
unsigned
int
duplex_chg
;
int
err
;
if
(
!
netif_running
(
dev
))
return
-
EINVAL
;
err
=
generic_mii_ioctl
(
&
port
->
mii
,
if_mii
(
req
),
cmd
,
&
duplex_chg
);
if
(
duplex_chg
)
eth_set_duplex
(
port
);
return
err
;
return
phy_mii_ioctl
(
port
->
phydev
,
if_mii
(
req
),
cmd
);
}
/* ethtool support */
static
void
ixp4xx_get_drvinfo
(
struct
net_device
*
dev
,
struct
ethtool_drvinfo
*
info
)
{
struct
port
*
port
=
netdev_priv
(
dev
);
strcpy
(
info
->
driver
,
DRV_NAME
);
snprintf
(
info
->
fw_version
,
sizeof
(
info
->
fw_version
),
"%u:%u:%u:%u"
,
port
->
firmware
[
0
],
port
->
firmware
[
1
],
port
->
firmware
[
2
],
port
->
firmware
[
3
]);
strcpy
(
info
->
bus_info
,
"internal"
);
}
static
int
ixp4xx_get_settings
(
struct
net_device
*
dev
,
struct
ethtool_cmd
*
cmd
)
{
struct
port
*
port
=
netdev_priv
(
dev
);
return
phy_ethtool_gset
(
port
->
phydev
,
cmd
);
}
static
int
ixp4xx_set_settings
(
struct
net_device
*
dev
,
struct
ethtool_cmd
*
cmd
)
{
struct
port
*
port
=
netdev_priv
(
dev
);
return
phy_ethtool_sset
(
port
->
phydev
,
cmd
);
}
static
int
ixp4xx_nway_reset
(
struct
net_device
*
dev
)
{
struct
port
*
port
=
netdev_priv
(
dev
);
return
phy_start_aneg
(
port
->
phydev
);
}
static
struct
ethtool_ops
ixp4xx_ethtool_ops
=
{
.
get_drvinfo
=
ixp4xx_get_drvinfo
,
.
get_settings
=
ixp4xx_get_settings
,
.
set_settings
=
ixp4xx_set_settings
,
.
nway_reset
=
ixp4xx_nway_reset
,
.
get_link
=
ethtool_op_get_link
,
};
static
int
request_queues
(
struct
port
*
port
)
{
int
err
;
err
=
qmgr_request_queue
(
RXFREE_QUEUE
(
port
->
id
),
RX_DESCS
,
0
,
0
);
err
=
qmgr_request_queue
(
RXFREE_QUEUE
(
port
->
id
),
RX_DESCS
,
0
,
0
,
"%s:RX-free"
,
port
->
netdev
->
name
);
if
(
err
)
return
err
;
err
=
qmgr_request_queue
(
port
->
plat
->
rxq
,
RX_DESCS
,
0
,
0
);
err
=
qmgr_request_queue
(
port
->
plat
->
rxq
,
RX_DESCS
,
0
,
0
,
"%s:RX"
,
port
->
netdev
->
name
);
if
(
err
)
goto
rel_rxfree
;
err
=
qmgr_request_queue
(
TX_QUEUE
(
port
->
id
),
TX_DESCS
,
0
,
0
);
err
=
qmgr_request_queue
(
TX_QUEUE
(
port
->
id
),
TX_DESCS
,
0
,
0
,
"%s:TX"
,
port
->
netdev
->
name
);
if
(
err
)
goto
rel_rx
;
err
=
qmgr_request_queue
(
port
->
plat
->
txreadyq
,
TX_DESCS
,
0
,
0
);
err
=
qmgr_request_queue
(
port
->
plat
->
txreadyq
,
TX_DESCS
,
0
,
0
,
"%s:TX-ready"
,
port
->
netdev
->
name
);
if
(
err
)
goto
rel_tx
;
/* TX-done queue handles skbs sent out by the NPEs */
if
(
!
ports_open
)
{
err
=
qmgr_request_queue
(
TXDONE_QUEUE
,
TXDONE_QUEUE_LEN
,
0
,
0
);
err
=
qmgr_request_queue
(
TXDONE_QUEUE
,
TXDONE_QUEUE_LEN
,
0
,
0
,
"%s:TX-done"
,
DRV_NAME
);
if
(
err
)
goto
rel_txready
;
}
...
...
@@ -943,10 +951,12 @@ static int eth_open(struct net_device *dev)
npe_name
(
npe
));
return
-
EIO
;
}
port
->
firmware
[
0
]
=
msg
.
byte4
;
port
->
firmware
[
1
]
=
msg
.
byte5
;
port
->
firmware
[
2
]
=
msg
.
byte6
;
port
->
firmware
[
3
]
=
msg
.
byte7
;
}
mdio_write
(
dev
,
port
->
plat
->
phy
,
MII_BMCR
,
port
->
mii_bmcr
);
memset
(
&
msg
,
0
,
sizeof
(
msg
));
msg
.
cmd
=
NPE_VLAN_SETRXQOSENTRY
;
msg
.
eth_id
=
port
->
id
;
...
...
@@ -984,6 +994,9 @@ static int eth_open(struct net_device *dev)
return
err
;
}
port
->
speed
=
0
;
/* force "link up" message */
phy_start
(
port
->
phydev
);
for
(
i
=
0
;
i
<
ETH_ALEN
;
i
++
)
__raw_writel
(
dev
->
dev_addr
[
i
],
&
port
->
regs
->
hw_addr
[
i
]);
__raw_writel
(
0x08
,
&
port
->
regs
->
random_seed
);
...
...
@@ -1011,10 +1024,8 @@ static int eth_open(struct net_device *dev)
__raw_writel
(
DEFAULT_RX_CNTRL0
,
&
port
->
regs
->
rx_control
[
0
]);
napi_enable
(
&
port
->
napi
);
phy_check_media
(
port
,
1
);
eth_set_mcast_list
(
dev
);
netif_start_queue
(
dev
);
schedule_delayed_work
(
&
port
->
mdio_thread
,
MDIO_INTERVAL
);
qmgr_set_irq
(
port
->
plat
->
rxq
,
QUEUE_IRQ_SRC_NOT_EMPTY
,
eth_rx_irq
,
dev
);
...
...
@@ -1105,25 +1116,31 @@ static int eth_close(struct net_device *dev)
printk
(
KERN_CRIT
"%s: unable to disable loopback
\n
"
,
dev
->
name
);
port
->
mii_bmcr
=
mdio_read
(
dev
,
port
->
plat
->
phy
,
MII_BMCR
)
&
~
(
BMCR_RESET
|
BMCR_PDOWN
);
/* may have been altered */
mdio_write
(
dev
,
port
->
plat
->
phy
,
MII_BMCR
,
port
->
mii_bmcr
|
BMCR_PDOWN
);
phy_stop
(
port
->
phydev
);
if
(
!
ports_open
)
qmgr_disable_irq
(
TXDONE_QUEUE
);
cancel_rearming_delayed_work
(
&
port
->
mdio_thread
);
destroy_queues
(
port
);
release_queues
(
port
);
return
0
;
}
static
const
struct
net_device_ops
ixp4xx_netdev_ops
=
{
.
ndo_open
=
eth_open
,
.
ndo_stop
=
eth_close
,
.
ndo_start_xmit
=
eth_xmit
,
.
ndo_set_multicast_list
=
eth_set_mcast_list
,
.
ndo_do_ioctl
=
eth_ioctl
,
};
static
int
__devinit
eth_init_one
(
struct
platform_device
*
pdev
)
{
struct
port
*
port
;
struct
net_device
*
dev
;
struct
eth_plat_info
*
plat
=
pdev
->
dev
.
platform_data
;
u32
regs_phys
;
char
phy_id
[
BUS_ID_SIZE
];
int
err
;
if
(
!
(
dev
=
alloc_etherdev
(
sizeof
(
struct
port
))))
...
...
@@ -1152,12 +1169,8 @@ static int __devinit eth_init_one(struct platform_device *pdev)
goto
err_free
;
}
dev
->
open
=
eth_open
;
dev
->
hard_start_xmit
=
eth_xmit
;
dev
->
stop
=
eth_close
;
dev
->
get_stats
=
eth_stats
;
dev
->
do_ioctl
=
eth_ioctl
;
dev
->
set_multicast_list
=
eth_set_mcast_list
;
dev
->
netdev_ops
=
&
ixp4xx_netdev_ops
;
dev
->
ethtool_ops
=
&
ixp4xx_ethtool_ops
;
dev
->
tx_queue_len
=
100
;
netif_napi_add
(
dev
,
&
port
->
napi
,
eth_poll
,
NAPI_WEIGHT
);
...
...
@@ -1190,22 +1203,19 @@ static int __devinit eth_init_one(struct platform_device *pdev)
__raw_writel
(
DEFAULT_CORE_CNTRL
,
&
port
->
regs
->
core_control
);
udelay
(
50
);
port
->
mii
.
dev
=
dev
;
port
->
mii
.
mdio_read
=
mdio_read
;
port
->
mii
.
mdio_write
=
mdio_write
;
port
->
mii
.
phy_id
=
plat
->
phy
;
port
->
mii
.
phy_id_mask
=
0x1F
;
port
->
mii
.
reg_num_mask
=
0x1F
;
snprintf
(
phy_id
,
BUS_ID_SIZE
,
PHY_ID_FMT
,
"0"
,
plat
->
phy
);
port
->
phydev
=
phy_connect
(
dev
,
phy_id
,
&
ixp4xx_adjust_link
,
0
,
PHY_INTERFACE_MODE_MII
);
if
(
IS_ERR
(
port
->
phydev
))
{
printk
(
KERN_ERR
"%s: Could not attach to PHY
\n
"
,
dev
->
name
);
return
PTR_ERR
(
port
->
phydev
);
}
port
->
phydev
->
irq
=
PHY_POLL
;
printk
(
KERN_INFO
"%s: MII PHY %i on %s
\n
"
,
dev
->
name
,
plat
->
phy
,
npe_name
(
port
->
npe
));
phy_reset
(
dev
,
plat
->
phy
);
port
->
mii_bmcr
=
mdio_read
(
dev
,
plat
->
phy
,
MII_BMCR
)
&
~
(
BMCR_RESET
|
BMCR_PDOWN
);
mdio_write
(
dev
,
plat
->
phy
,
MII_BMCR
,
port
->
mii_bmcr
|
BMCR_PDOWN
);
INIT_DELAYED_WORK
(
&
port
->
mdio_thread
,
mdio_thread
);
return
0
;
err_unreg:
...
...
@@ -1231,7 +1241,7 @@ static int __devexit eth_remove_one(struct platform_device *pdev)
return
0
;
}
static
struct
platform_driver
drv
=
{
static
struct
platform_driver
ixp4xx_eth_driver
=
{
.
driver
.
name
=
DRV_NAME
,
.
probe
=
eth_init_one
,
.
remove
=
eth_remove_one
,
...
...
@@ -1239,20 +1249,19 @@ static struct platform_driver drv = {
static
int
__init
eth_init_module
(
void
)
{
int
err
;
if
(
!
(
ixp4xx_read_feature_bits
()
&
IXP4XX_FEATURE_NPEB_ETH0
))
return
-
ENOSYS
;
/* All MII PHY accesses use NPE-B Ethernet registers */
spin_lock_init
(
&
mdio_lock
);
mdio_regs
=
(
struct
eth_regs
__iomem
*
)
IXP4XX_EthB_BASE_VIRT
;
__raw_writel
(
DEFAULT_CORE_CNTRL
,
&
mdio_regs
->
core_control
);
return
platform_driver_register
(
&
drv
);
if
((
err
=
ixp4xx_mdio_register
()))
return
err
;
return
platform_driver_register
(
&
ixp4xx_eth_driver
);
}
static
void
__exit
eth_cleanup_module
(
void
)
{
platform_driver_unregister
(
&
drv
);
platform_driver_unregister
(
&
ixp4xx_eth_driver
);
ixp4xx_mdio_remove
();
}
MODULE_AUTHOR
(
"Krzysztof Halasa"
);
...
...
drivers/net/wan/Kconfig
View file @
e74b3f7d
...
...
@@ -335,6 +335,13 @@ config DSCC4_PCI_RST
Say Y if your card supports this feature.
config IXP4XX_HSS
tristate "Intel IXP4xx HSS (synchronous serial port) support"
depends on HDLC && ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
help
Say Y here if you want to use built-in HSS ports
on IXP4xx processor.
config DLCI
tristate "Frame Relay DLCI support"
---help---
...
...
drivers/net/wan/Makefile
View file @
e74b3f7d
...
...
@@ -41,6 +41,7 @@ obj-$(CONFIG_C101) += c101.o
obj-$(CONFIG_WANXL)
+=
wanxl.o
obj-$(CONFIG_PCI200SYN)
+=
pci200syn.o
obj-$(CONFIG_PC300TOO)
+=
pc300too.o
obj-$(CONFIG_IXP4XX_HSS)
+=
ixp4xx_hss.o
clean-files
:=
wanxlfw.inc
$(obj)/wanxl.o
:
$(obj)/wanxlfw.inc
...
...
drivers/net/wan/hdlc_ppp.c
View file @
e74b3f7d
...
...
@@ -303,7 +303,7 @@ static int cp_table[EVENTS][STATES] = {
STA: RTR must supply id
SCJ: RUC must supply CP packet len and data */
static
void
ppp_cp_event
(
struct
net_device
*
dev
,
u16
pid
,
u16
event
,
u8
code
,
u8
id
,
unsigned
int
len
,
void
*
data
)
u8
id
,
unsigned
int
len
,
const
void
*
data
)
{
int
old_state
,
action
;
struct
ppp
*
ppp
=
get_ppp
(
dev
);
...
...
@@ -374,11 +374,12 @@ static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
static
void
ppp_cp_parse_cr
(
struct
net_device
*
dev
,
u16
pid
,
u8
id
,
unsigned
int
len
,
u8
*
data
)
unsigned
int
req_len
,
const
u8
*
data
)
{
static
u8
const
valid_accm
[
6
]
=
{
LCP_OPTION_ACCM
,
6
,
0
,
0
,
0
,
0
};
u8
*
opt
,
*
out
;
unsigned
int
nak_len
=
0
,
rej_len
=
0
;
const
u8
*
opt
;
u8
*
out
;
unsigned
int
len
=
req_len
,
nak_len
=
0
,
rej_len
=
0
;
if
(
!
(
out
=
kmalloc
(
len
,
GFP_ATOMIC
)))
{
dev
->
stats
.
rx_dropped
++
;
...
...
@@ -423,7 +424,7 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
else
if
(
nak_len
)
ppp_cp_event
(
dev
,
pid
,
RCR_BAD
,
CP_CONF_NAK
,
id
,
nak_len
,
out
);
else
ppp_cp_event
(
dev
,
pid
,
RCR_GOOD
,
CP_CONF_ACK
,
id
,
len
,
data
);
ppp_cp_event
(
dev
,
pid
,
RCR_GOOD
,
CP_CONF_ACK
,
id
,
req_
len
,
data
);
kfree
(
out
);
}
...
...
drivers/net/wan/ixp4xx_hss.c
0 → 100644
View file @
e74b3f7d
/*
* Intel IXP4xx HSS (synchronous serial port) driver for Linux
*
* Copyright (C) 2007-2008 Krzysztof Hałasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*/
#include <linux/bitops.h>
#include <linux/cdev.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/fs.h>
#include <linux/hdlc.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <mach/npe.h>
#include <mach/qmgr.h>
#define DEBUG_DESC 0
#define DEBUG_RX 0
#define DEBUG_TX 0
#define DEBUG_PKT_BYTES 0
#define DEBUG_CLOSE 0
#define DRV_NAME "ixp4xx_hss"
#define PKT_EXTRA_FLAGS 0
/* orig 1 */
#define PKT_NUM_PIPES 1
/* 1, 2 or 4 */
#define PKT_PIPE_FIFO_SIZEW 4
/* total 4 dwords per HSS */
#define RX_DESCS 16
/* also length of all RX queues */
#define TX_DESCS 16
/* also length of all TX queues */
#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
#define RX_SIZE (HDLC_MAX_MRU + 4)
/* NPE needs more space */
#define MAX_CLOSE_WAIT 1000
/* microseconds */
#define HSS_COUNT 2
#define FRAME_SIZE 256
/* doesn't matter at this point */
#define FRAME_OFFSET 0
#define MAX_CHANNELS (FRAME_SIZE / 8)
#define NAPI_WEIGHT 16
/* Queue IDs */
#define HSS0_CHL_RXTRIG_QUEUE 12
/* orig size = 32 dwords */
#define HSS0_PKT_RX_QUEUE 13
/* orig size = 32 dwords */
#define HSS0_PKT_TX0_QUEUE 14
/* orig size = 16 dwords */
#define HSS0_PKT_TX1_QUEUE 15
#define HSS0_PKT_TX2_QUEUE 16
#define HSS0_PKT_TX3_QUEUE 17
#define HSS0_PKT_RXFREE0_QUEUE 18
/* orig size = 16 dwords */
#define HSS0_PKT_RXFREE1_QUEUE 19
#define HSS0_PKT_RXFREE2_QUEUE 20
#define HSS0_PKT_RXFREE3_QUEUE 21
#define HSS0_PKT_TXDONE_QUEUE 22
/* orig size = 64 dwords */
#define HSS1_CHL_RXTRIG_QUEUE 10
#define HSS1_PKT_RX_QUEUE 0
#define HSS1_PKT_TX0_QUEUE 5
#define HSS1_PKT_TX1_QUEUE 6
#define HSS1_PKT_TX2_QUEUE 7
#define HSS1_PKT_TX3_QUEUE 8
#define HSS1_PKT_RXFREE0_QUEUE 1
#define HSS1_PKT_RXFREE1_QUEUE 2
#define HSS1_PKT_RXFREE2_QUEUE 3
#define HSS1_PKT_RXFREE3_QUEUE 4
#define HSS1_PKT_TXDONE_QUEUE 9
#define NPE_PKT_MODE_HDLC 0
#define NPE_PKT_MODE_RAW 1
#define NPE_PKT_MODE_56KMODE 2
#define NPE_PKT_MODE_56KENDIAN_MSB 4
/* PKT_PIPE_HDLC_CFG_WRITE flags */
#define PKT_HDLC_IDLE_ONES 0x1
/* default = flags */
#define PKT_HDLC_CRC_32 0x2
/* default = CRC-16 */
#define PKT_HDLC_MSB_ENDIAN 0x4
/* default = LE */
/* hss_config, PCRs */
/* Frame sync sampling, default = active low */
#define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000
#define PCR_FRM_SYNC_FALLINGEDGE 0x80000000
#define PCR_FRM_SYNC_RISINGEDGE 0xC0000000
/* Frame sync pin: input (default) or output generated off a given clk edge */
#define PCR_FRM_SYNC_OUTPUT_FALLING 0x20000000
#define PCR_FRM_SYNC_OUTPUT_RISING 0x30000000
/* Frame and data clock sampling on edge, default = falling */
#define PCR_FCLK_EDGE_RISING 0x08000000
#define PCR_DCLK_EDGE_RISING 0x04000000
/* Clock direction, default = input */
#define PCR_SYNC_CLK_DIR_OUTPUT 0x02000000
/* Generate/Receive frame pulses, default = enabled */
#define PCR_FRM_PULSE_DISABLED 0x01000000
/* Data rate is full (default) or half the configured clk speed */
#define PCR_HALF_CLK_RATE 0x00200000
/* Invert data between NPE and HSS FIFOs? (default = no) */
#define PCR_DATA_POLARITY_INVERT 0x00100000
/* TX/RX endianness, default = LSB */
#define PCR_MSB_ENDIAN 0x00080000
/* Normal (default) / open drain mode (TX only) */
#define PCR_TX_PINS_OPEN_DRAIN 0x00040000
/* No framing bit transmitted and expected on RX? (default = framing bit) */
#define PCR_SOF_NO_FBIT 0x00020000
/* Drive data pins? */
#define PCR_TX_DATA_ENABLE 0x00010000
/* Voice 56k type: drive the data pins low (default), high, high Z */
#define PCR_TX_V56K_HIGH 0x00002000
#define PCR_TX_V56K_HIGH_IMP 0x00004000
/* Unassigned type: drive the data pins low (default), high, high Z */
#define PCR_TX_UNASS_HIGH 0x00000800
#define PCR_TX_UNASS_HIGH_IMP 0x00001000
/* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */
#define PCR_TX_FB_HIGH_IMP 0x00000400
/* 56k data endiannes - which bit unused: high (default) or low */
#define PCR_TX_56KE_BIT_0_UNUSED 0x00000200
/* 56k data transmission type: 32/8 bit data (default) or 56K data */
#define PCR_TX_56KS_56K_DATA 0x00000100
/* hss_config, cCR */
/* Number of packetized clients, default = 1 */
#define CCR_NPE_HFIFO_2_HDLC 0x04000000
#define CCR_NPE_HFIFO_3_OR_4HDLC 0x08000000
/* default = no loopback */
#define CCR_LOOPBACK 0x02000000
/* HSS number, default = 0 (first) */
#define CCR_SECOND_HSS 0x01000000
/* hss_config, clkCR: main:10, num:10, denom:12 */
#define CLK42X_SPEED_EXP ((0x3FF << 22) | ( 2 << 12) | 15)
/*65 KHz*/
#define CLK42X_SPEED_512KHZ (( 130 << 22) | ( 2 << 12) | 15)
#define CLK42X_SPEED_1536KHZ (( 43 << 22) | ( 18 << 12) | 47)
#define CLK42X_SPEED_1544KHZ (( 43 << 22) | ( 33 << 12) | 192)
#define CLK42X_SPEED_2048KHZ (( 32 << 22) | ( 34 << 12) | 63)
#define CLK42X_SPEED_4096KHZ (( 16 << 22) | ( 34 << 12) | 127)
#define CLK42X_SPEED_8192KHZ (( 8 << 22) | ( 34 << 12) | 255)
#define CLK46X_SPEED_512KHZ (( 130 << 22) | ( 24 << 12) | 127)
#define CLK46X_SPEED_1536KHZ (( 43 << 22) | (152 << 12) | 383)
#define CLK46X_SPEED_1544KHZ (( 43 << 22) | ( 66 << 12) | 385)
#define CLK46X_SPEED_2048KHZ (( 32 << 22) | (280 << 12) | 511)
#define CLK46X_SPEED_4096KHZ (( 16 << 22) | (280 << 12) | 1023)
#define CLK46X_SPEED_8192KHZ (( 8 << 22) | (280 << 12) | 2047)
/* hss_config, LUT entries */
#define TDMMAP_UNASSIGNED 0
#define TDMMAP_HDLC 1
/* HDLC - packetized */
#define TDMMAP_VOICE56K 2
/* Voice56K - 7-bit channelized */
#define TDMMAP_VOICE64K 3
/* Voice64K - 8-bit channelized */
/* offsets into HSS config */
#define HSS_CONFIG_TX_PCR 0x00
/* port configuration registers */
#define HSS_CONFIG_RX_PCR 0x04
#define HSS_CONFIG_CORE_CR 0x08
/* loopback control, HSS# */
#define HSS_CONFIG_CLOCK_CR 0x0C
/* clock generator control */
#define HSS_CONFIG_TX_FCR 0x10
/* frame configuration registers */
#define HSS_CONFIG_RX_FCR 0x14
#define HSS_CONFIG_TX_LUT 0x18
/* channel look-up tables */
#define HSS_CONFIG_RX_LUT 0x38
/* NPE command codes */
/* writes the ConfigWord value to the location specified by offset */
#define PORT_CONFIG_WRITE 0x40
/* triggers the NPE to load the contents of the configuration table */
#define PORT_CONFIG_LOAD 0x41
/* triggers the NPE to return an HssErrorReadResponse message */
#define PORT_ERROR_READ 0x42
/* triggers the NPE to reset internal status and enable the HssPacketized
operation for the flow specified by pPipe */
#define PKT_PIPE_FLOW_ENABLE 0x50
#define PKT_PIPE_FLOW_DISABLE 0x51
#define PKT_NUM_PIPES_WRITE 0x52
#define PKT_PIPE_FIFO_SIZEW_WRITE 0x53
#define PKT_PIPE_HDLC_CFG_WRITE 0x54
#define PKT_PIPE_IDLE_PATTERN_WRITE 0x55
#define PKT_PIPE_RX_SIZE_WRITE 0x56
#define PKT_PIPE_MODE_WRITE 0x57
/* HDLC packet status values - desc->status */
#define ERR_SHUTDOWN 1
/* stop or shutdown occurrance */
#define ERR_HDLC_ALIGN 2
/* HDLC alignment error */
#define ERR_HDLC_FCS 3
/* HDLC Frame Check Sum error */
#define ERR_RXFREE_Q_EMPTY 4
/* RX-free queue became empty while receiving
this packet (if buf_len < pkt_len) */
#define ERR_HDLC_TOO_LONG 5
/* HDLC frame size too long */
#define ERR_HDLC_ABORT 6
/* abort sequence received */
#define ERR_DISCONNECTING 7
/* disconnect is in progress */
#ifdef __ARMEB__
typedef
struct
sk_buff
buffer_t
;
#define free_buffer dev_kfree_skb
#define free_buffer_irq dev_kfree_skb_irq
#else
typedef
void
buffer_t
;
#define free_buffer kfree
#define free_buffer_irq kfree
#endif
struct
port
{
struct
device
*
dev
;
struct
npe
*
npe
;
struct
net_device
*
netdev
;
struct
napi_struct
napi
;
struct
hss_plat_info
*
plat
;
buffer_t
*
rx_buff_tab
[
RX_DESCS
],
*
tx_buff_tab
[
TX_DESCS
];
struct
desc
*
desc_tab
;
/* coherent */
u32
desc_tab_phys
;
unsigned
int
id
;
unsigned
int
clock_type
,
clock_rate
,
loopback
;
unsigned
int
initialized
,
carrier
;
u8
hdlc_cfg
;
};
/* NPE message structure */
struct
msg
{
#ifdef __ARMEB__
u8
cmd
,
unused
,
hss_port
,
index
;
union
{
struct
{
u8
data8a
,
data8b
,
data8c
,
data8d
;
};
struct
{
u16
data16a
,
data16b
;
};
struct
{
u32
data32
;
};
};
#else
u8
index
,
hss_port
,
unused
,
cmd
;
union
{
struct
{
u8
data8d
,
data8c
,
data8b
,
data8a
;
};
struct
{
u16
data16b
,
data16a
;
};
struct
{
u32
data32
;
};
};
#endif
};
/* HDLC packet descriptor */
struct
desc
{
u32
next
;
/* pointer to next buffer, unused */
#ifdef __ARMEB__
u16
buf_len
;
/* buffer length */
u16
pkt_len
;
/* packet length */
u32
data
;
/* pointer to data buffer in RAM */
u8
status
;
u8
error_count
;
u16
__reserved
;
#else
u16
pkt_len
;
/* packet length */
u16
buf_len
;
/* buffer length */
u32
data
;
/* pointer to data buffer in RAM */
u16
__reserved
;
u8
error_count
;
u8
status
;
#endif
u32
__reserved1
[
4
];
};
#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
(n) * sizeof(struct desc))
#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
((n) + RX_DESCS) * sizeof(struct desc))
#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
/*****************************************************************************
* global variables
****************************************************************************/
static
int
ports_open
;
static
struct
dma_pool
*
dma_pool
;
static
spinlock_t
npe_lock
;
static
const
struct
{
int
tx
,
txdone
,
rx
,
rxfree
;
}
queue_ids
[
2
]
=
{{
HSS0_PKT_TX0_QUEUE
,
HSS0_PKT_TXDONE_QUEUE
,
HSS0_PKT_RX_QUEUE
,
HSS0_PKT_RXFREE0_QUEUE
},
{
HSS1_PKT_TX0_QUEUE
,
HSS1_PKT_TXDONE_QUEUE
,
HSS1_PKT_RX_QUEUE
,
HSS1_PKT_RXFREE0_QUEUE
},
};
/*****************************************************************************
* utility functions
****************************************************************************/
static
inline
struct
port
*
dev_to_port
(
struct
net_device
*
dev
)
{
return
dev_to_hdlc
(
dev
)
->
priv
;
}
#ifndef __ARMEB__
static
inline
void
memcpy_swab32
(
u32
*
dest
,
u32
*
src
,
int
cnt
)
{
int
i
;
for
(
i
=
0
;
i
<
cnt
;
i
++
)
dest
[
i
]
=
swab32
(
src
[
i
]);
}
#endif
/*****************************************************************************
* HSS access
****************************************************************************/
static
void
hss_npe_send
(
struct
port
*
port
,
struct
msg
*
msg
,
const
char
*
what
)
{
u32
*
val
=
(
u32
*
)
msg
;
if
(
npe_send_message
(
port
->
npe
,
msg
,
what
))
{
printk
(
KERN_CRIT
"HSS-%i: unable to send command [%08X:%08X]"
" to %s
\n
"
,
port
->
id
,
val
[
0
],
val
[
1
],
npe_name
(
port
->
npe
));
BUG
();
}
}
static
void
hss_config_set_lut
(
struct
port
*
port
)
{
struct
msg
msg
;
int
ch
;
memset
(
&
msg
,
0
,
sizeof
(
msg
));
msg
.
cmd
=
PORT_CONFIG_WRITE
;
msg
.
hss_port
=
port
->
id
;
for
(
ch
=
0
;
ch
<
MAX_CHANNELS
;
ch
++
)
{
msg
.
data32
>>=
2
;
msg
.
data32
|=
TDMMAP_HDLC
<<
30
;
if
(
ch
%
16
==
15
)
{
msg
.
index
=
HSS_CONFIG_TX_LUT
+
((
ch
/
4
)
&
~
3
);
hss_npe_send
(
port
,
&
msg
,
"HSS_SET_TX_LUT"
);
msg
.
index
+=
HSS_CONFIG_RX_LUT
-
HSS_CONFIG_TX_LUT
;
hss_npe_send
(
port
,
&
msg
,
"HSS_SET_RX_LUT"
);
}
}
}
static
void
hss_config
(
struct
port
*
port
)
{
struct
msg
msg
;
memset
(
&
msg
,
0
,
sizeof
(
msg
));
msg
.
cmd
=
PORT_CONFIG_WRITE
;
msg
.
hss_port
=
port
->
id
;
msg
.
index
=
HSS_CONFIG_TX_PCR
;
msg
.
data32
=
PCR_FRM_SYNC_OUTPUT_RISING
|
PCR_MSB_ENDIAN
|
PCR_TX_DATA_ENABLE
|
PCR_SOF_NO_FBIT
;
if
(
port
->
clock_type
==
CLOCK_INT
)
msg
.
data32
|=
PCR_SYNC_CLK_DIR_OUTPUT
;
hss_npe_send
(
port
,
&
msg
,
"HSS_SET_TX_PCR"
);
msg
.
index
=
HSS_CONFIG_RX_PCR
;
msg
.
data32
^=
PCR_TX_DATA_ENABLE
|
PCR_DCLK_EDGE_RISING
;
hss_npe_send
(
port
,
&
msg
,
"HSS_SET_RX_PCR"
);
memset
(
&
msg
,
0
,
sizeof
(
msg
));
msg
.
cmd
=
PORT_CONFIG_WRITE
;
msg
.
hss_port
=
port
->
id
;
msg
.
index
=
HSS_CONFIG_CORE_CR
;
msg
.
data32
=
(
port
->
loopback
?
CCR_LOOPBACK
:
0
)
|
(
port
->
id
?
CCR_SECOND_HSS
:
0
);
hss_npe_send
(
port
,
&
msg
,
"HSS_SET_CORE_CR"
);
memset
(
&
msg
,
0
,
sizeof
(
msg
));
msg
.
cmd
=
PORT_CONFIG_WRITE
;
msg
.
hss_port
=
port
->
id
;
msg
.
index
=
HSS_CONFIG_CLOCK_CR
;
msg
.
data32
=
CLK42X_SPEED_2048KHZ
/* FIXME */
;
hss_npe_send
(
port
,
&
msg
,
"HSS_SET_CLOCK_CR"
);
memset
(
&
msg
,
0
,
sizeof
(
msg
));
msg
.
cmd
=
PORT_CONFIG_WRITE
;
msg
.
hss_port
=
port
->
id
;
msg
.
index
=
HSS_CONFIG_TX_FCR
;
msg
.
data16a
=
FRAME_OFFSET
;
msg
.
data16b
=
FRAME_SIZE
-
1
;
hss_npe_send
(
port
,
&
msg
,
"HSS_SET_TX_FCR"
);
memset
(
&
msg
,
0
,
sizeof
(
msg
));
msg
.
cmd
=
PORT_CONFIG_WRITE
;
msg
.
hss_port
=
port
->
id
;
msg
.
index
=
HSS_CONFIG_RX_FCR
;
msg
.
data16a
=
FRAME_OFFSET
;
msg
.
data16b
=
FRAME_SIZE
-
1
;
hss_npe_send
(
port
,
&
msg
,
"HSS_SET_RX_FCR"
);
hss_config_set_lut
(
port
);
memset
(
&
msg
,
0
,
sizeof
(
msg
));
msg
.
cmd
=
PORT_CONFIG_LOAD
;
msg
.
hss_port
=
port
->
id
;
hss_npe_send
(
port
,
&
msg
,
"HSS_LOAD_CONFIG"
);
if
(
npe_recv_message
(
port
->
npe
,
&
msg
,
"HSS_LOAD_CONFIG"
)
||
/* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */
msg
.
cmd
!=
PORT_CONFIG_LOAD
||
msg
.
data32
)
{
printk
(
KERN_CRIT
"HSS-%i: HSS_LOAD_CONFIG failed
\n
"
,
port
->
id
);
BUG
();
}
/* HDLC may stop working without this - check FIXME */
npe_recv_message
(
port
->
npe
,
&
msg
,
"FLUSH_IT"
);
}
static
void
hss_set_hdlc_cfg
(
struct
port
*
port
)
{
struct
msg
msg
;
memset
(
&
msg
,
0
,
sizeof
(
msg
));
msg
.
cmd
=
PKT_PIPE_HDLC_CFG_WRITE
;
msg
.
hss_port
=
port
->
id
;
msg
.
data8a
=
port
->
hdlc_cfg
;
/* rx_cfg */
msg
.
data8b
=
port
->
hdlc_cfg
|
(
PKT_EXTRA_FLAGS
<<
3
);
/* tx_cfg */
hss_npe_send
(
port
,
&
msg
,
"HSS_SET_HDLC_CFG"
);
}
static
u32
hss_get_status
(
struct
port
*
port
)
{
struct
msg
msg
;
memset
(
&
msg
,
0
,
sizeof
(
msg
));
msg
.
cmd
=
PORT_ERROR_READ
;
msg
.
hss_port
=
port
->
id
;
hss_npe_send
(
port
,
&
msg
,
"PORT_ERROR_READ"
);
if
(
npe_recv_message
(
port
->
npe
,
&
msg
,
"PORT_ERROR_READ"
))
{
printk
(
KERN_CRIT
"HSS-%i: unable to read HSS status
\n
"
,
port
->
id
);
BUG
();
}
return
msg
.
data32
;
}
static
void
hss_start_hdlc
(
struct
port
*
port
)
{
struct
msg
msg
;
memset
(
&
msg
,
0
,
sizeof
(
msg
));
msg
.
cmd
=
PKT_PIPE_FLOW_ENABLE
;
msg
.
hss_port
=
port
->
id
;
msg
.
data32
=
0
;
hss_npe_send
(
port
,
&
msg
,
"HSS_ENABLE_PKT_PIPE"
);
}
static
void
hss_stop_hdlc
(
struct
port
*
port
)
{
struct
msg
msg
;
memset
(
&
msg
,
0
,
sizeof
(
msg
));
msg
.
cmd
=
PKT_PIPE_FLOW_DISABLE
;
msg
.
hss_port
=
port
->
id
;
hss_npe_send
(
port
,
&
msg
,
"HSS_DISABLE_PKT_PIPE"
);
hss_get_status
(
port
);
/* make sure it's halted */
}
static
int
hss_load_firmware
(
struct
port
*
port
)
{
struct
msg
msg
;
int
err
;
if
(
port
->
initialized
)
return
0
;
if
(
!
npe_running
(
port
->
npe
)
&&
(
err
=
npe_load_firmware
(
port
->
npe
,
npe_name
(
port
->
npe
),
port
->
dev
)))
return
err
;
/* HDLC mode configuration */
memset
(
&
msg
,
0
,
sizeof
(
msg
));
msg
.
cmd
=
PKT_NUM_PIPES_WRITE
;
msg
.
hss_port
=
port
->
id
;
msg
.
data8a
=
PKT_NUM_PIPES
;
hss_npe_send
(
port
,
&
msg
,
"HSS_SET_PKT_PIPES"
);
msg
.
cmd
=
PKT_PIPE_FIFO_SIZEW_WRITE
;
msg
.
data8a
=
PKT_PIPE_FIFO_SIZEW
;
hss_npe_send
(
port
,
&
msg
,
"HSS_SET_PKT_FIFO"
);
msg
.
cmd
=
PKT_PIPE_MODE_WRITE
;
msg
.
data8a
=
NPE_PKT_MODE_HDLC
;
/* msg.data8b = inv_mask */
/* msg.data8c = or_mask */
hss_npe_send
(
port
,
&
msg
,
"HSS_SET_PKT_MODE"
);
msg
.
cmd
=
PKT_PIPE_RX_SIZE_WRITE
;
msg
.
data16a
=
HDLC_MAX_MRU
;
/* including CRC */
hss_npe_send
(
port
,
&
msg
,
"HSS_SET_PKT_RX_SIZE"
);
msg
.
cmd
=
PKT_PIPE_IDLE_PATTERN_WRITE
;
msg
.
data32
=
0x7F7F7F7F
;
/* ??? FIXME */
hss_npe_send
(
port
,
&
msg
,
"HSS_SET_PKT_IDLE"
);
port
->
initialized
=
1
;
return
0
;
}
/*****************************************************************************
* packetized (HDLC) operation
****************************************************************************/
static
inline
void
debug_pkt
(
struct
net_device
*
dev
,
const
char
*
func
,
u8
*
data
,
int
len
)
{
#if DEBUG_PKT_BYTES
int
i
;
printk
(
KERN_DEBUG
"%s: %s(%i)"
,
dev
->
name
,
func
,
len
);
for
(
i
=
0
;
i
<
len
;
i
++
)
{
if
(
i
>=
DEBUG_PKT_BYTES
)
break
;
printk
(
"%s%02X"
,
!
(
i
%
4
)
?
" "
:
""
,
data
[
i
]);
}
printk
(
"
\n
"
);
#endif
}
static
inline
void
debug_desc
(
u32
phys
,
struct
desc
*
desc
)
{
#if DEBUG_DESC
printk
(
KERN_DEBUG
"%X: %X %3X %3X %08X %X %X
\n
"
,
phys
,
desc
->
next
,
desc
->
buf_len
,
desc
->
pkt_len
,
desc
->
data
,
desc
->
status
,
desc
->
error_count
);
#endif
}
static
inline
int
queue_get_desc
(
unsigned
int
queue
,
struct
port
*
port
,
int
is_tx
)
{
u32
phys
,
tab_phys
,
n_desc
;
struct
desc
*
tab
;
if
(
!
(
phys
=
qmgr_get_entry
(
queue
)))
return
-
1
;
BUG_ON
(
phys
&
0x1F
);
tab_phys
=
is_tx
?
tx_desc_phys
(
port
,
0
)
:
rx_desc_phys
(
port
,
0
);
tab
=
is_tx
?
tx_desc_ptr
(
port
,
0
)
:
rx_desc_ptr
(
port
,
0
);
n_desc
=
(
phys
-
tab_phys
)
/
sizeof
(
struct
desc
);
BUG_ON
(
n_desc
>=
(
is_tx
?
TX_DESCS
:
RX_DESCS
));
debug_desc
(
phys
,
&
tab
[
n_desc
]);
BUG_ON
(
tab
[
n_desc
].
next
);
return
n_desc
;
}
static
inline
void
queue_put_desc
(
unsigned
int
queue
,
u32
phys
,
struct
desc
*
desc
)
{
debug_desc
(
phys
,
desc
);
BUG_ON
(
phys
&
0x1F
);
qmgr_put_entry
(
queue
,
phys
);
BUG_ON
(
qmgr_stat_overflow
(
queue
));
}
static
inline
void
dma_unmap_tx
(
struct
port
*
port
,
struct
desc
*
desc
)
{
#ifdef __ARMEB__
dma_unmap_single
(
&
port
->
netdev
->
dev
,
desc
->
data
,
desc
->
buf_len
,
DMA_TO_DEVICE
);
#else
dma_unmap_single
(
&
port
->
netdev
->
dev
,
desc
->
data
&
~
3
,
ALIGN
((
desc
->
data
&
3
)
+
desc
->
buf_len
,
4
),
DMA_TO_DEVICE
);
#endif
}
static
void
hss_hdlc_set_carrier
(
void
*
pdev
,
int
carrier
)
{
struct
net_device
*
netdev
=
pdev
;
struct
port
*
port
=
dev_to_port
(
netdev
);
unsigned
long
flags
;
spin_lock_irqsave
(
&
npe_lock
,
flags
);
port
->
carrier
=
carrier
;
if
(
!
port
->
loopback
)
{
if
(
carrier
)
netif_carrier_on
(
netdev
);
else
netif_carrier_off
(
netdev
);
}
spin_unlock_irqrestore
(
&
npe_lock
,
flags
);
}
static
void
hss_hdlc_rx_irq
(
void
*
pdev
)
{
struct
net_device
*
dev
=
pdev
;
struct
port
*
port
=
dev_to_port
(
dev
);
#if DEBUG_RX
printk
(
KERN_DEBUG
"%s: hss_hdlc_rx_irq
\n
"
,
dev
->
name
);
#endif
qmgr_disable_irq
(
queue_ids
[
port
->
id
].
rx
);
netif_rx_schedule
(
dev
,
&
port
->
napi
);
}
static
int
hss_hdlc_poll
(
struct
napi_struct
*
napi
,
int
budget
)
{
struct
port
*
port
=
container_of
(
napi
,
struct
port
,
napi
);
struct
net_device
*
dev
=
port
->
netdev
;
unsigned
int
rxq
=
queue_ids
[
port
->
id
].
rx
;
unsigned
int
rxfreeq
=
queue_ids
[
port
->
id
].
rxfree
;
int
received
=
0
;
#if DEBUG_RX
printk
(
KERN_DEBUG
"%s: hss_hdlc_poll
\n
"
,
dev
->
name
);
#endif
while
(
received
<
budget
)
{
struct
sk_buff
*
skb
;
struct
desc
*
desc
;
int
n
;
#ifdef __ARMEB__
struct
sk_buff
*
temp
;
u32
phys
;
#endif
if
((
n
=
queue_get_desc
(
rxq
,
port
,
0
))
<
0
)
{
#if DEBUG_RX
printk
(
KERN_DEBUG
"%s: hss_hdlc_poll"
" netif_rx_complete
\n
"
,
dev
->
name
);
#endif
netif_rx_complete
(
dev
,
napi
);
qmgr_enable_irq
(
rxq
);
if
(
!
qmgr_stat_empty
(
rxq
)
&&
netif_rx_reschedule
(
dev
,
napi
))
{
#if DEBUG_RX
printk
(
KERN_DEBUG
"%s: hss_hdlc_poll"
" netif_rx_reschedule succeeded
\n
"
,
dev
->
name
);
#endif
qmgr_disable_irq
(
rxq
);
continue
;
}
#if DEBUG_RX
printk
(
KERN_DEBUG
"%s: hss_hdlc_poll all done
\n
"
,
dev
->
name
);
#endif
return
received
;
/* all work done */
}
desc
=
rx_desc_ptr
(
port
,
n
);
#if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */
if (desc->error_count)
printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
" errors %u\n", dev->name, desc->status,
desc->error_count);
#endif
skb
=
NULL
;
switch
(
desc
->
status
)
{
case
0
:
#ifdef __ARMEB__
if
((
skb
=
netdev_alloc_skb
(
dev
,
RX_SIZE
))
!=
NULL
)
{
phys
=
dma_map_single
(
&
dev
->
dev
,
skb
->
data
,
RX_SIZE
,
DMA_FROM_DEVICE
);
if
(
dma_mapping_error
(
&
dev
->
dev
,
phys
))
{
dev_kfree_skb
(
skb
);
skb
=
NULL
;
}
}
#else
skb
=
netdev_alloc_skb
(
dev
,
desc
->
pkt_len
);
#endif
if
(
!
skb
)
dev
->
stats
.
rx_dropped
++
;
break
;
case
ERR_HDLC_ALIGN
:
case
ERR_HDLC_ABORT
:
dev
->
stats
.
rx_frame_errors
++
;
dev
->
stats
.
rx_errors
++
;
break
;
case
ERR_HDLC_FCS
:
dev
->
stats
.
rx_crc_errors
++
;
dev
->
stats
.
rx_errors
++
;
break
;
case
ERR_HDLC_TOO_LONG
:
dev
->
stats
.
rx_length_errors
++
;
dev
->
stats
.
rx_errors
++
;
break
;
default:
/* FIXME - remove printk */
printk
(
KERN_ERR
"%s: hss_hdlc_poll: status 0x%02X"
" errors %u
\n
"
,
dev
->
name
,
desc
->
status
,
desc
->
error_count
);
dev
->
stats
.
rx_errors
++
;
}
if
(
!
skb
)
{
/* put the desc back on RX-ready queue */
desc
->
buf_len
=
RX_SIZE
;
desc
->
pkt_len
=
desc
->
status
=
0
;
queue_put_desc
(
rxfreeq
,
rx_desc_phys
(
port
,
n
),
desc
);
continue
;
}
/* process received frame */
#ifdef __ARMEB__
temp
=
skb
;
skb
=
port
->
rx_buff_tab
[
n
];
dma_unmap_single
(
&
dev
->
dev
,
desc
->
data
,
RX_SIZE
,
DMA_FROM_DEVICE
);
#else
dma_sync_single
(
&
dev
->
dev
,
desc
->
data
,
RX_SIZE
,
DMA_FROM_DEVICE
);
memcpy_swab32
((
u32
*
)
skb
->
data
,
(
u32
*
)
port
->
rx_buff_tab
[
n
],
ALIGN
(
desc
->
pkt_len
,
4
)
/
4
);
#endif
skb_put
(
skb
,
desc
->
pkt_len
);
debug_pkt
(
dev
,
"hss_hdlc_poll"
,
skb
->
data
,
skb
->
len
);
skb
->
protocol
=
hdlc_type_trans
(
skb
,
dev
);
dev
->
stats
.
rx_packets
++
;
dev
->
stats
.
rx_bytes
+=
skb
->
len
;
netif_receive_skb
(
skb
);
/* put the new buffer on RX-free queue */
#ifdef __ARMEB__
port
->
rx_buff_tab
[
n
]
=
temp
;
desc
->
data
=
phys
;
#endif
desc
->
buf_len
=
RX_SIZE
;
desc
->
pkt_len
=
0
;
queue_put_desc
(
rxfreeq
,
rx_desc_phys
(
port
,
n
),
desc
);
received
++
;
}
#if DEBUG_RX
printk
(
KERN_DEBUG
"hss_hdlc_poll: end, not all work done
\n
"
);
#endif
return
received
;
/* not all work done */
}
static
void
hss_hdlc_txdone_irq
(
void
*
pdev
)
{
struct
net_device
*
dev
=
pdev
;
struct
port
*
port
=
dev_to_port
(
dev
);
int
n_desc
;
#if DEBUG_TX
printk
(
KERN_DEBUG
DRV_NAME
": hss_hdlc_txdone_irq
\n
"
);
#endif
while
((
n_desc
=
queue_get_desc
(
queue_ids
[
port
->
id
].
txdone
,
port
,
1
))
>=
0
)
{
struct
desc
*
desc
;
int
start
;
desc
=
tx_desc_ptr
(
port
,
n_desc
);
dev
->
stats
.
tx_packets
++
;
dev
->
stats
.
tx_bytes
+=
desc
->
pkt_len
;
dma_unmap_tx
(
port
,
desc
);
#if DEBUG_TX
printk
(
KERN_DEBUG
"%s: hss_hdlc_txdone_irq free %p
\n
"
,
dev
->
name
,
port
->
tx_buff_tab
[
n_desc
]);
#endif
free_buffer_irq
(
port
->
tx_buff_tab
[
n_desc
]);
port
->
tx_buff_tab
[
n_desc
]
=
NULL
;
start
=
qmgr_stat_empty
(
port
->
plat
->
txreadyq
);
queue_put_desc
(
port
->
plat
->
txreadyq
,
tx_desc_phys
(
port
,
n_desc
),
desc
);
if
(
start
)
{
#if DEBUG_TX
printk
(
KERN_DEBUG
"%s: hss_hdlc_txdone_irq xmit"
" ready
\n
"
,
dev
->
name
);
#endif
netif_wake_queue
(
dev
);
}
}
}
static
int
hss_hdlc_xmit
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
)
{
struct
port
*
port
=
dev_to_port
(
dev
);
unsigned
int
txreadyq
=
port
->
plat
->
txreadyq
;
int
len
,
offset
,
bytes
,
n
;
void
*
mem
;
u32
phys
;
struct
desc
*
desc
;
#if DEBUG_TX
printk
(
KERN_DEBUG
"%s: hss_hdlc_xmit
\n
"
,
dev
->
name
);
#endif
if
(
unlikely
(
skb
->
len
>
HDLC_MAX_MRU
))
{
dev_kfree_skb
(
skb
);
dev
->
stats
.
tx_errors
++
;
return
NETDEV_TX_OK
;
}
debug_pkt
(
dev
,
"hss_hdlc_xmit"
,
skb
->
data
,
skb
->
len
);
len
=
skb
->
len
;
#ifdef __ARMEB__
offset
=
0
;
/* no need to keep alignment */
bytes
=
len
;
mem
=
skb
->
data
;
#else
offset
=
(
int
)
skb
->
data
&
3
;
/* keep 32-bit alignment */
bytes
=
ALIGN
(
offset
+
len
,
4
);
if
(
!
(
mem
=
kmalloc
(
bytes
,
GFP_ATOMIC
)))
{
dev_kfree_skb
(
skb
);
dev
->
stats
.
tx_dropped
++
;
return
NETDEV_TX_OK
;
}
memcpy_swab32
(
mem
,
(
u32
*
)((
int
)
skb
->
data
&
~
3
),
bytes
/
4
);
dev_kfree_skb
(
skb
);
#endif
phys
=
dma_map_single
(
&
dev
->
dev
,
mem
,
bytes
,
DMA_TO_DEVICE
);
if
(
dma_mapping_error
(
&
dev
->
dev
,
phys
))
{
#ifdef __ARMEB__
dev_kfree_skb
(
skb
);
#else
kfree
(
mem
);
#endif
dev
->
stats
.
tx_dropped
++
;
return
NETDEV_TX_OK
;
}
n
=
queue_get_desc
(
txreadyq
,
port
,
1
);
BUG_ON
(
n
<
0
);
desc
=
tx_desc_ptr
(
port
,
n
);
#ifdef __ARMEB__
port
->
tx_buff_tab
[
n
]
=
skb
;
#else
port
->
tx_buff_tab
[
n
]
=
mem
;
#endif
desc
->
data
=
phys
+
offset
;
desc
->
buf_len
=
desc
->
pkt_len
=
len
;
wmb
();
queue_put_desc
(
queue_ids
[
port
->
id
].
tx
,
tx_desc_phys
(
port
,
n
),
desc
);
dev
->
trans_start
=
jiffies
;
if
(
qmgr_stat_empty
(
txreadyq
))
{
#if DEBUG_TX
printk
(
KERN_DEBUG
"%s: hss_hdlc_xmit queue full
\n
"
,
dev
->
name
);
#endif
netif_stop_queue
(
dev
);
/* we could miss TX ready interrupt */
if
(
!
qmgr_stat_empty
(
txreadyq
))
{
#if DEBUG_TX
printk
(
KERN_DEBUG
"%s: hss_hdlc_xmit ready again
\n
"
,
dev
->
name
);
#endif
netif_wake_queue
(
dev
);
}
}
#if DEBUG_TX
printk
(
KERN_DEBUG
"%s: hss_hdlc_xmit end
\n
"
,
dev
->
name
);
#endif
return
NETDEV_TX_OK
;
}
static
int
request_hdlc_queues
(
struct
port
*
port
)
{
int
err
;
err
=
qmgr_request_queue
(
queue_ids
[
port
->
id
].
rxfree
,
RX_DESCS
,
0
,
0
,
"%s:RX-free"
,
port
->
netdev
->
name
);
if
(
err
)
return
err
;
err
=
qmgr_request_queue
(
queue_ids
[
port
->
id
].
rx
,
RX_DESCS
,
0
,
0
,
"%s:RX"
,
port
->
netdev
->
name
);
if
(
err
)
goto
rel_rxfree
;
err
=
qmgr_request_queue
(
queue_ids
[
port
->
id
].
tx
,
TX_DESCS
,
0
,
0
,
"%s:TX"
,
port
->
netdev
->
name
);
if
(
err
)
goto
rel_rx
;
err
=
qmgr_request_queue
(
port
->
plat
->
txreadyq
,
TX_DESCS
,
0
,
0
,
"%s:TX-ready"
,
port
->
netdev
->
name
);
if
(
err
)
goto
rel_tx
;
err
=
qmgr_request_queue
(
queue_ids
[
port
->
id
].
txdone
,
TX_DESCS
,
0
,
0
,
"%s:TX-done"
,
port
->
netdev
->
name
);
if
(
err
)
goto
rel_txready
;
return
0
;
rel_txready:
qmgr_release_queue
(
port
->
plat
->
txreadyq
);
rel_tx:
qmgr_release_queue
(
queue_ids
[
port
->
id
].
tx
);
rel_rx:
qmgr_release_queue
(
queue_ids
[
port
->
id
].
rx
);
rel_rxfree:
qmgr_release_queue
(
queue_ids
[
port
->
id
].
rxfree
);
printk
(
KERN_DEBUG
"%s: unable to request hardware queues
\n
"
,
port
->
netdev
->
name
);
return
err
;
}
static
void
release_hdlc_queues
(
struct
port
*
port
)
{
qmgr_release_queue
(
queue_ids
[
port
->
id
].
rxfree
);
qmgr_release_queue
(
queue_ids
[
port
->
id
].
rx
);
qmgr_release_queue
(
queue_ids
[
port
->
id
].
txdone
);
qmgr_release_queue
(
queue_ids
[
port
->
id
].
tx
);
qmgr_release_queue
(
port
->
plat
->
txreadyq
);
}
static
int
init_hdlc_queues
(
struct
port
*
port
)
{
int
i
;
if
(
!
ports_open
)
if
(
!
(
dma_pool
=
dma_pool_create
(
DRV_NAME
,
NULL
,
POOL_ALLOC_SIZE
,
32
,
0
)))
return
-
ENOMEM
;
if
(
!
(
port
->
desc_tab
=
dma_pool_alloc
(
dma_pool
,
GFP_KERNEL
,
&
port
->
desc_tab_phys
)))
return
-
ENOMEM
;
memset
(
port
->
desc_tab
,
0
,
POOL_ALLOC_SIZE
);
memset
(
port
->
rx_buff_tab
,
0
,
sizeof
(
port
->
rx_buff_tab
));
/* tables */
memset
(
port
->
tx_buff_tab
,
0
,
sizeof
(
port
->
tx_buff_tab
));
/* Setup RX buffers */
for
(
i
=
0
;
i
<
RX_DESCS
;
i
++
)
{
struct
desc
*
desc
=
rx_desc_ptr
(
port
,
i
);
buffer_t
*
buff
;
void
*
data
;
#ifdef __ARMEB__
if
(
!
(
buff
=
netdev_alloc_skb
(
port
->
netdev
,
RX_SIZE
)))
return
-
ENOMEM
;
data
=
buff
->
data
;
#else
if
(
!
(
buff
=
kmalloc
(
RX_SIZE
,
GFP_KERNEL
)))
return
-
ENOMEM
;
data
=
buff
;
#endif
desc
->
buf_len
=
RX_SIZE
;
desc
->
data
=
dma_map_single
(
&
port
->
netdev
->
dev
,
data
,
RX_SIZE
,
DMA_FROM_DEVICE
);
if
(
dma_mapping_error
(
&
port
->
netdev
->
dev
,
desc
->
data
))
{
free_buffer
(
buff
);
return
-
EIO
;
}
port
->
rx_buff_tab
[
i
]
=
buff
;
}
return
0
;
}
static
void
destroy_hdlc_queues
(
struct
port
*
port
)
{
int
i
;
if
(
port
->
desc_tab
)
{
for
(
i
=
0
;
i
<
RX_DESCS
;
i
++
)
{
struct
desc
*
desc
=
rx_desc_ptr
(
port
,
i
);
buffer_t
*
buff
=
port
->
rx_buff_tab
[
i
];
if
(
buff
)
{
dma_unmap_single
(
&
port
->
netdev
->
dev
,
desc
->
data
,
RX_SIZE
,
DMA_FROM_DEVICE
);
free_buffer
(
buff
);
}
}
for
(
i
=
0
;
i
<
TX_DESCS
;
i
++
)
{
struct
desc
*
desc
=
tx_desc_ptr
(
port
,
i
);
buffer_t
*
buff
=
port
->
tx_buff_tab
[
i
];
if
(
buff
)
{
dma_unmap_tx
(
port
,
desc
);
free_buffer
(
buff
);
}
}
dma_pool_free
(
dma_pool
,
port
->
desc_tab
,
port
->
desc_tab_phys
);
port
->
desc_tab
=
NULL
;
}
if
(
!
ports_open
&&
dma_pool
)
{
dma_pool_destroy
(
dma_pool
);
dma_pool
=
NULL
;
}
}
static
int
hss_hdlc_open
(
struct
net_device
*
dev
)
{
struct
port
*
port
=
dev_to_port
(
dev
);
unsigned
long
flags
;
int
i
,
err
=
0
;
if
((
err
=
hdlc_open
(
dev
)))
return
err
;
if
((
err
=
hss_load_firmware
(
port
)))
goto
err_hdlc_close
;
if
((
err
=
request_hdlc_queues
(
port
)))
goto
err_hdlc_close
;
if
((
err
=
init_hdlc_queues
(
port
)))
goto
err_destroy_queues
;
spin_lock_irqsave
(
&
npe_lock
,
flags
);
if
(
port
->
plat
->
open
)
if
((
err
=
port
->
plat
->
open
(
port
->
id
,
dev
,
hss_hdlc_set_carrier
)))
goto
err_unlock
;
spin_unlock_irqrestore
(
&
npe_lock
,
flags
);
/* Populate queues with buffers, no failure after this point */
for
(
i
=
0
;
i
<
TX_DESCS
;
i
++
)
queue_put_desc
(
port
->
plat
->
txreadyq
,
tx_desc_phys
(
port
,
i
),
tx_desc_ptr
(
port
,
i
));
for
(
i
=
0
;
i
<
RX_DESCS
;
i
++
)
queue_put_desc
(
queue_ids
[
port
->
id
].
rxfree
,
rx_desc_phys
(
port
,
i
),
rx_desc_ptr
(
port
,
i
));
napi_enable
(
&
port
->
napi
);
netif_start_queue
(
dev
);
qmgr_set_irq
(
queue_ids
[
port
->
id
].
rx
,
QUEUE_IRQ_SRC_NOT_EMPTY
,
hss_hdlc_rx_irq
,
dev
);
qmgr_set_irq
(
queue_ids
[
port
->
id
].
txdone
,
QUEUE_IRQ_SRC_NOT_EMPTY
,
hss_hdlc_txdone_irq
,
dev
);
qmgr_enable_irq
(
queue_ids
[
port
->
id
].
txdone
);
ports_open
++
;
hss_set_hdlc_cfg
(
port
);
hss_config
(
port
);
hss_start_hdlc
(
port
);
/* we may already have RX data, enables IRQ */
netif_rx_schedule
(
dev
,
&
port
->
napi
);
return
0
;
err_unlock:
spin_unlock_irqrestore
(
&
npe_lock
,
flags
);
err_destroy_queues:
destroy_hdlc_queues
(
port
);
release_hdlc_queues
(
port
);
err_hdlc_close:
hdlc_close
(
dev
);
return
err
;
}
static
int
hss_hdlc_close
(
struct
net_device
*
dev
)
{
struct
port
*
port
=
dev_to_port
(
dev
);
unsigned
long
flags
;
int
i
,
buffs
=
RX_DESCS
;
/* allocated RX buffers */
spin_lock_irqsave
(
&
npe_lock
,
flags
);
ports_open
--
;
qmgr_disable_irq
(
queue_ids
[
port
->
id
].
rx
);
netif_stop_queue
(
dev
);
napi_disable
(
&
port
->
napi
);
hss_stop_hdlc
(
port
);
while
(
queue_get_desc
(
queue_ids
[
port
->
id
].
rxfree
,
port
,
0
)
>=
0
)
buffs
--
;
while
(
queue_get_desc
(
queue_ids
[
port
->
id
].
rx
,
port
,
0
)
>=
0
)
buffs
--
;
if
(
buffs
)
printk
(
KERN_CRIT
"%s: unable to drain RX queue, %i buffer(s)"
" left in NPE
\n
"
,
dev
->
name
,
buffs
);
buffs
=
TX_DESCS
;
while
(
queue_get_desc
(
queue_ids
[
port
->
id
].
tx
,
port
,
1
)
>=
0
)
buffs
--
;
/* cancel TX */
i
=
0
;
do
{
while
(
queue_get_desc
(
port
->
plat
->
txreadyq
,
port
,
1
)
>=
0
)
buffs
--
;
if
(
!
buffs
)
break
;
}
while
(
++
i
<
MAX_CLOSE_WAIT
);
if
(
buffs
)
printk
(
KERN_CRIT
"%s: unable to drain TX queue, %i buffer(s) "
"left in NPE
\n
"
,
dev
->
name
,
buffs
);
#if DEBUG_CLOSE
if
(
!
buffs
)
printk
(
KERN_DEBUG
"Draining TX queues took %i cycles
\n
"
,
i
);
#endif
qmgr_disable_irq
(
queue_ids
[
port
->
id
].
txdone
);
if
(
port
->
plat
->
close
)
port
->
plat
->
close
(
port
->
id
,
dev
);
spin_unlock_irqrestore
(
&
npe_lock
,
flags
);
destroy_hdlc_queues
(
port
);
release_hdlc_queues
(
port
);
hdlc_close
(
dev
);
return
0
;
}
static
int
hss_hdlc_attach
(
struct
net_device
*
dev
,
unsigned
short
encoding
,
unsigned
short
parity
)
{
struct
port
*
port
=
dev_to_port
(
dev
);
if
(
encoding
!=
ENCODING_NRZ
)
return
-
EINVAL
;
switch
(
parity
)
{
case
PARITY_CRC16_PR1_CCITT
:
port
->
hdlc_cfg
=
0
;
return
0
;
case
PARITY_CRC32_PR1_CCITT
:
port
->
hdlc_cfg
=
PKT_HDLC_CRC_32
;
return
0
;
default:
return
-
EINVAL
;
}
}
static
int
hss_hdlc_ioctl
(
struct
net_device
*
dev
,
struct
ifreq
*
ifr
,
int
cmd
)
{
const
size_t
size
=
sizeof
(
sync_serial_settings
);
sync_serial_settings
new_line
;
sync_serial_settings
__user
*
line
=
ifr
->
ifr_settings
.
ifs_ifsu
.
sync
;
struct
port
*
port
=
dev_to_port
(
dev
);
unsigned
long
flags
;
int
clk
;
if
(
cmd
!=
SIOCWANDEV
)
return
hdlc_ioctl
(
dev
,
ifr
,
cmd
);
switch
(
ifr
->
ifr_settings
.
type
)
{
case
IF_GET_IFACE
:
ifr
->
ifr_settings
.
type
=
IF_IFACE_V35
;
if
(
ifr
->
ifr_settings
.
size
<
size
)
{
ifr
->
ifr_settings
.
size
=
size
;
/* data size wanted */
return
-
ENOBUFS
;
}
memset
(
&
new_line
,
0
,
sizeof
(
new_line
));
new_line
.
clock_type
=
port
->
clock_type
;
new_line
.
clock_rate
=
2048000
;
/* FIXME */
new_line
.
loopback
=
port
->
loopback
;
if
(
copy_to_user
(
line
,
&
new_line
,
size
))
return
-
EFAULT
;
return
0
;
case
IF_IFACE_SYNC_SERIAL
:
case
IF_IFACE_V35
:
if
(
!
capable
(
CAP_NET_ADMIN
))
return
-
EPERM
;
if
(
copy_from_user
(
&
new_line
,
line
,
size
))
return
-
EFAULT
;
clk
=
new_line
.
clock_type
;
if
(
port
->
plat
->
set_clock
)
clk
=
port
->
plat
->
set_clock
(
port
->
id
,
clk
);
if
(
clk
!=
CLOCK_EXT
&&
clk
!=
CLOCK_INT
)
return
-
EINVAL
;
/* No such clock setting */
if
(
new_line
.
loopback
!=
0
&&
new_line
.
loopback
!=
1
)
return
-
EINVAL
;
port
->
clock_type
=
clk
;
/* Update settings */
/* FIXME port->clock_rate = new_line.clock_rate */
;
port
->
loopback
=
new_line
.
loopback
;
spin_lock_irqsave
(
&
npe_lock
,
flags
);
if
(
dev
->
flags
&
IFF_UP
)
hss_config
(
port
);
if
(
port
->
loopback
||
port
->
carrier
)
netif_carrier_on
(
port
->
netdev
);
else
netif_carrier_off
(
port
->
netdev
);
spin_unlock_irqrestore
(
&
npe_lock
,
flags
);
return
0
;
default:
return
hdlc_ioctl
(
dev
,
ifr
,
cmd
);
}
}
/*****************************************************************************
* initialization
****************************************************************************/
static
int
__devinit
hss_init_one
(
struct
platform_device
*
pdev
)
{
struct
port
*
port
;
struct
net_device
*
dev
;
hdlc_device
*
hdlc
;
int
err
;
if
((
port
=
kzalloc
(
sizeof
(
*
port
),
GFP_KERNEL
))
==
NULL
)
return
-
ENOMEM
;
if
((
port
->
npe
=
npe_request
(
0
))
==
NULL
)
{
err
=
-
ENOSYS
;
goto
err_free
;
}
if
((
port
->
netdev
=
dev
=
alloc_hdlcdev
(
port
))
==
NULL
)
{
err
=
-
ENOMEM
;
goto
err_plat
;
}
SET_NETDEV_DEV
(
dev
,
&
pdev
->
dev
);
hdlc
=
dev_to_hdlc
(
dev
);
hdlc
->
attach
=
hss_hdlc_attach
;
hdlc
->
xmit
=
hss_hdlc_xmit
;
dev
->
open
=
hss_hdlc_open
;
dev
->
stop
=
hss_hdlc_close
;
dev
->
do_ioctl
=
hss_hdlc_ioctl
;
dev
->
tx_queue_len
=
100
;
port
->
clock_type
=
CLOCK_EXT
;
port
->
clock_rate
=
2048000
;
port
->
id
=
pdev
->
id
;
port
->
dev
=
&
pdev
->
dev
;
port
->
plat
=
pdev
->
dev
.
platform_data
;
netif_napi_add
(
dev
,
&
port
->
napi
,
hss_hdlc_poll
,
NAPI_WEIGHT
);
if
((
err
=
register_hdlc_device
(
dev
)))
goto
err_free_netdev
;
platform_set_drvdata
(
pdev
,
port
);
printk
(
KERN_INFO
"%s: HSS-%i
\n
"
,
dev
->
name
,
port
->
id
);
return
0
;
err_free_netdev:
free_netdev
(
dev
);
err_plat:
npe_release
(
port
->
npe
);
err_free:
kfree
(
port
);
return
err
;
}
static
int
__devexit
hss_remove_one
(
struct
platform_device
*
pdev
)
{
struct
port
*
port
=
platform_get_drvdata
(
pdev
);
unregister_hdlc_device
(
port
->
netdev
);
free_netdev
(
port
->
netdev
);
npe_release
(
port
->
npe
);
platform_set_drvdata
(
pdev
,
NULL
);
kfree
(
port
);
return
0
;
}
static
struct
platform_driver
ixp4xx_hss_driver
=
{
.
driver
.
name
=
DRV_NAME
,
.
probe
=
hss_init_one
,
.
remove
=
hss_remove_one
,
};
static
int
__init
hss_init_module
(
void
)
{
if
((
ixp4xx_read_feature_bits
()
&
(
IXP4XX_FEATURE_HDLC
|
IXP4XX_FEATURE_HSS
))
!=
(
IXP4XX_FEATURE_HDLC
|
IXP4XX_FEATURE_HSS
))
return
-
ENOSYS
;
spin_lock_init
(
&
npe_lock
);
return
platform_driver_register
(
&
ixp4xx_hss_driver
);
}
static
void
__exit
hss_cleanup_module
(
void
)
{
platform_driver_unregister
(
&
ixp4xx_hss_driver
);
}
MODULE_AUTHOR
(
"Krzysztof Halasa"
);
MODULE_DESCRIPTION
(
"Intel IXP4xx HSS driver"
);
MODULE_LICENSE
(
"GPL v2"
);
MODULE_ALIAS
(
"platform:ixp4xx_hss"
);
module_init
(
hss_init_module
);
module_exit
(
hss_cleanup_module
);
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment