Commit a6a9fb85 authored by Krzysztof Hałasa's avatar Krzysztof Hałasa

IXP4xx: Add support for the second half of the 64 hardware queues.

Signed-off-by: default avatarKrzysztof Hałasa <khc@pm.waw.pl>
parent 1406de8e
......@@ -15,7 +15,7 @@
#define DEBUG_QMGR 0
#define HALF_QUEUES 32
#define QUEUES 64 /* only 32 lower queues currently supported */
#define QUEUES 64
#define MAX_QUEUE_LENGTH 4 /* in dwords */
#define QUEUE_STAT1_EMPTY 1 /* queue status bits */
......@@ -110,48 +110,95 @@ static inline u32 qmgr_get_entry(unsigned int queue)
return val;
}
static inline int qmgr_get_stat1(unsigned int queue)
static inline int __qmgr_get_stat1(unsigned int queue)
{
extern struct qmgr_regs __iomem *qmgr_regs;
return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
>> ((queue & 7) << 2)) & 0xF;
}
static inline int qmgr_get_stat2(unsigned int queue)
static inline int __qmgr_get_stat2(unsigned int queue)
{
extern struct qmgr_regs __iomem *qmgr_regs;
BUG_ON(queue >= HALF_QUEUES);
return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
>> ((queue & 0xF) << 1)) & 0x3;
}
/**
* qmgr_stat_empty() - checks if a hardware queue is empty
* @queue: queue number
*
* Returns non-zero value if the queue is empty.
*/
static inline int qmgr_stat_empty(unsigned int queue)
{
return !!(qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY);
BUG_ON(queue >= HALF_QUEUES);
return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY;
}
/**
* qmgr_stat_empty() - checks if a hardware queue is nearly empty
* @queue: queue number
*
* Returns non-zero value if the queue is nearly or completely empty.
*/
static inline int qmgr_stat_nearly_empty(unsigned int queue)
{
return !!(qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY);
extern struct qmgr_regs __iomem *qmgr_regs;
if (queue >= HALF_QUEUES)
return (__raw_readl(&qmgr_regs->statne_h) >>
(queue - HALF_QUEUES)) & 0x01;
return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY;
}
/**
* qmgr_stat_empty() - checks if a hardware queue is nearly full
* @queue: queue number
*
* Returns non-zero value if the queue is nearly or completely full.
*/
static inline int qmgr_stat_nearly_full(unsigned int queue)
{
return !!(qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_FULL);
BUG_ON(queue >= HALF_QUEUES);
return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_FULL;
}
/**
* qmgr_stat_empty() - checks if a hardware queue is full
* @queue: queue number
*
* Returns non-zero value if the queue is full.
*/
static inline int qmgr_stat_full(unsigned int queue)
{
return !!(qmgr_get_stat1(queue) & QUEUE_STAT1_FULL);
extern struct qmgr_regs __iomem *qmgr_regs;
if (queue >= HALF_QUEUES)
return (__raw_readl(&qmgr_regs->statf_h) >>
(queue - HALF_QUEUES)) & 0x01;
return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL;
}
/**
* qmgr_stat_empty() - checks if a hardware queue experienced underflow
* @queue: queue number
*
* Returns non-zero value if empty.
*/
static inline int qmgr_stat_underflow(unsigned int queue)
{
return !!(qmgr_get_stat2(queue) & QUEUE_STAT2_UNDERFLOW);
return __qmgr_get_stat2(queue) & QUEUE_STAT2_UNDERFLOW;
}
/**
* qmgr_stat_empty() - checks if a hardware queue experienced overflow
* @queue: queue number
*
* Returns non-zero value if empty.
*/
static inline int qmgr_stat_overflow(unsigned int queue)
{
return !!(qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW);
return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW;
}
#endif
......@@ -18,8 +18,8 @@ struct qmgr_regs __iomem *qmgr_regs;
static struct resource *mem_res;
static spinlock_t qmgr_lock;
static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
static void (*irq_handlers[HALF_QUEUES])(void *pdev);
static void *irq_pdevs[HALF_QUEUES];
static void (*irq_handlers[QUEUES])(void *pdev);
static void *irq_pdevs[QUEUES];
#if DEBUG_QMGR
char qmgr_queue_descs[QUEUES][32];
......@@ -28,29 +28,38 @@ char qmgr_queue_descs[QUEUES][32];
void qmgr_set_irq(unsigned int queue, int src,
void (*handler)(void *pdev), void *pdev)
{
u32 __iomem *reg = &qmgr_regs->irqsrc[queue / 8]; /* 8 queues / u32 */
int bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
unsigned long flags;
src &= 7;
spin_lock_irqsave(&qmgr_lock, flags);
__raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit), reg);
if (queue < HALF_QUEUES) {
u32 __iomem *reg;
int bit;
BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
__raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
reg);
} else
/* IRQ source for queues 32-63 is fixed */
BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
irq_handlers[queue] = handler;
irq_pdevs[queue] = pdev;
spin_unlock_irqrestore(&qmgr_lock, flags);
}
static irqreturn_t qmgr_irq1(int irq, void *pdev)
static irqreturn_t qmgr_irq(int irq, void *pdev)
{
int i;
u32 val = __raw_readl(&qmgr_regs->irqstat[0]);
__raw_writel(val, &qmgr_regs->irqstat[0]); /* ACK */
int i, half = (irq == IRQ_IXP4XX_QM1 ? 0 : 1);
u32 val = __raw_readl(&qmgr_regs->irqstat[half]);
__raw_writel(val, &qmgr_regs->irqstat[half]); /* ACK */
for (i = 0; i < HALF_QUEUES; i++)
if (val & (1 << i))
irq_handlers[i](irq_pdevs[i]);
if (val & (1 << i)) {
int irq = half * HALF_QUEUES + i;
irq_handlers[irq](irq_pdevs[irq]);
}
return val ? IRQ_HANDLED : 0;
}
......@@ -58,21 +67,25 @@ static irqreturn_t qmgr_irq1(int irq, void *pdev)
void qmgr_enable_irq(unsigned int queue)
{
unsigned long flags;
int half = queue / 32;
u32 mask = 1 << (queue & (HALF_QUEUES - 1));
spin_lock_irqsave(&qmgr_lock, flags);
__raw_writel(__raw_readl(&qmgr_regs->irqen[0]) | (1 << queue),
&qmgr_regs->irqen[0]);
__raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
&qmgr_regs->irqen[half]);
spin_unlock_irqrestore(&qmgr_lock, flags);
}
void qmgr_disable_irq(unsigned int queue)
{
unsigned long flags;
int half = queue / 32;
u32 mask = 1 << (queue & (HALF_QUEUES - 1));
spin_lock_irqsave(&qmgr_lock, flags);
__raw_writel(__raw_readl(&qmgr_regs->irqen[0]) & ~(1 << queue),
&qmgr_regs->irqen[0]);
__raw_writel(1 << queue, &qmgr_regs->irqstat[0]); /* clear */
__raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
&qmgr_regs->irqen[half]);
__raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
spin_unlock_irqrestore(&qmgr_lock, flags);
}
......@@ -98,8 +111,7 @@ int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
int err;
if (queue >= HALF_QUEUES)
return -ERANGE;
BUG_ON(queue >= QUEUES);
if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
return -EINVAL;
......@@ -180,7 +192,7 @@ void qmgr_release_queue(unsigned int queue)
{
u32 cfg, addr, mask[4];
BUG_ON(queue >= HALF_QUEUES); /* not in valid range */
BUG_ON(queue >= QUEUES); /* not in valid range */
spin_lock_irq(&qmgr_lock);
cfg = __raw_readl(&qmgr_regs->sram[queue]);
......@@ -247,10 +259,13 @@ static int qmgr_init(void)
__raw_writel(0, &qmgr_regs->irqen[i]);
}
__raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
__raw_writel(0, &qmgr_regs->statf_h);
for (i = 0; i < QUEUES; i++)
__raw_writel(0, &qmgr_regs->sram[i]);
err = request_irq(IRQ_IXP4XX_QM1, qmgr_irq1, 0,
err = request_irq(IRQ_IXP4XX_QM1, qmgr_irq, 0,
"IXP4xx Queue Manager", NULL);
if (err) {
printk(KERN_ERR "qmgr: failed to request IRQ%i\n",
......@@ -258,12 +273,22 @@ static int qmgr_init(void)
goto error_irq;
}
err = request_irq(IRQ_IXP4XX_QM2, qmgr_irq, 0,
"IXP4xx Queue Manager", NULL);
if (err) {
printk(KERN_ERR "qmgr: failed to request IRQ%i\n",
IRQ_IXP4XX_QM2);
goto error_irq2;
}
used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
spin_lock_init(&qmgr_lock);
printk(KERN_INFO "IXP4xx Queue Manager initialized.\n");
return 0;
error_irq2:
free_irq(IRQ_IXP4XX_QM1, NULL);
error_irq:
iounmap(qmgr_regs);
error_map:
......@@ -274,7 +299,9 @@ error_map:
static void qmgr_remove(void)
{
free_irq(IRQ_IXP4XX_QM1, NULL);
free_irq(IRQ_IXP4XX_QM2, NULL);
synchronize_irq(IRQ_IXP4XX_QM1);
synchronize_irq(IRQ_IXP4XX_QM2);
iounmap(qmgr_regs);
release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment