Commit 281befa5 authored by Dan Williams's avatar Dan Williams

ioat2: kill pending flag

The pending == 2 case no longer exists in the driver so, we can use
ioat2_ring_pending() outside the lock to determine if there might be any
descriptors in the ring that the hardware has not seen.
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent b372ec2d
...@@ -51,48 +51,40 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order, ...@@ -51,48 +51,40 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order,
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
{ {
void * __iomem reg_base = ioat->base.reg_base; struct ioat_chan_common *chan = &ioat->base;
ioat->pending = 0;
ioat->dmacount += ioat2_ring_pending(ioat); ioat->dmacount += ioat2_ring_pending(ioat);
ioat->issued = ioat->head; ioat->issued = ioat->head;
/* make descriptor updates globally visible before notifying channel */ /* make descriptor updates globally visible before notifying channel */
wmb(); wmb();
writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
dev_dbg(to_dev(&ioat->base), dev_dbg(to_dev(chan),
"%s: head: %#x tail: %#x issued: %#x count: %#x\n", "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
} }
void ioat2_issue_pending(struct dma_chan *chan) void ioat2_issue_pending(struct dma_chan *c)
{ {
struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
spin_lock_bh(&ioat->ring_lock); if (ioat2_ring_pending(ioat)) {
if (ioat->pending == 1) spin_lock_bh(&ioat->ring_lock);
__ioat2_issue_pending(ioat); __ioat2_issue_pending(ioat);
spin_unlock_bh(&ioat->ring_lock); spin_unlock_bh(&ioat->ring_lock);
}
} }
/** /**
* ioat2_update_pending - log pending descriptors * ioat2_update_pending - log pending descriptors
* @ioat: ioat2+ channel * @ioat: ioat2+ channel
* *
* set pending to '1' unless pending is already set to '2', pending == 2 * Check if the number of unsubmitted descriptors has exceeded the
* indicates that submission is temporarily blocked due to an in-flight * watermark. Called with ring_lock held
* reset. If we are already above the ioat_pending_level threshold then
* just issue pending.
*
* called with ring_lock held
*/ */
static void ioat2_update_pending(struct ioat2_dma_chan *ioat) static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
{ {
if (unlikely(ioat->pending == 2)) if (ioat2_ring_pending(ioat) > ioat_pending_level)
return;
else if (ioat2_ring_pending(ioat) > ioat_pending_level)
__ioat2_issue_pending(ioat); __ioat2_issue_pending(ioat);
else
ioat->pending = 1;
} }
static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
...@@ -546,7 +538,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) ...@@ -546,7 +538,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat->head = 0; ioat->head = 0;
ioat->issued = 0; ioat->issued = 0;
ioat->tail = 0; ioat->tail = 0;
ioat->pending = 0;
ioat->alloc_order = order; ioat->alloc_order = order;
spin_unlock_bh(&ioat->ring_lock); spin_unlock_bh(&ioat->ring_lock);
...@@ -815,7 +806,6 @@ void ioat2_free_chan_resources(struct dma_chan *c) ...@@ -815,7 +806,6 @@ void ioat2_free_chan_resources(struct dma_chan *c)
chan->last_completion = 0; chan->last_completion = 0;
chan->completion_dma = 0; chan->completion_dma = 0;
ioat->pending = 0;
ioat->dmacount = 0; ioat->dmacount = 0;
} }
......
...@@ -47,7 +47,6 @@ extern int ioat_ring_alloc_order; ...@@ -47,7 +47,6 @@ extern int ioat_ring_alloc_order;
* @head: allocated index * @head: allocated index
* @issued: hardware notification point * @issued: hardware notification point
* @tail: cleanup index * @tail: cleanup index
* @pending: lock free indicator for issued != head
* @dmacount: identical to 'head' except for occasionally resetting to zero * @dmacount: identical to 'head' except for occasionally resetting to zero
* @alloc_order: log2 of the number of allocated descriptors * @alloc_order: log2 of the number of allocated descriptors
* @ring: software ring buffer implementation of hardware ring * @ring: software ring buffer implementation of hardware ring
...@@ -61,7 +60,6 @@ struct ioat2_dma_chan { ...@@ -61,7 +60,6 @@ struct ioat2_dma_chan {
u16 tail; u16 tail;
u16 dmacount; u16 dmacount;
u16 alloc_order; u16 alloc_order;
int pending;
struct ioat_ring_ent **ring; struct ioat_ring_ent **ring;
spinlock_t ring_lock; spinlock_t ring_lock;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment