Commit e12c4fa3 authored by Dan Williams's avatar Dan Williams

Merge branch 'ioat' into dmaengine

parents a348a7e6 4b652f0d
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o
......
obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
ioatdma-objs := pci.o dma.o dma_v2.o dca.o
......@@ -33,8 +33,8 @@
#define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
#endif
#include "ioatdma.h"
#include "ioatdma_registers.h"
#include "dma.h"
#include "registers.h"
/*
* Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
......@@ -242,7 +242,8 @@ static struct dca_ops ioat_dca_ops = {
};
struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
struct dca_provider * __devinit
ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
......@@ -407,7 +408,8 @@ static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
return slots;
}
struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
struct dca_provider * __devinit
ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
......@@ -602,7 +604,8 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
return slots;
}
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
struct dca_provider * __devinit
ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
......
This diff is collapsed.
/*
* Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef IOATDMA_H
#define IOATDMA_H
#include <linux/dmaengine.h>
#include "hw.h"
#include "registers.h"
#include <linux/init.h>
#include <linux/dmapool.h>
#include <linux/cache.h>
#include <linux/pci_ids.h>
#include <net/tcp.h>
#define IOAT_DMA_VERSION "3.64"
#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
#define IOAT_DMA_DCA_ANY_CPU ~0
#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd)
#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev)
#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
/*
* workaround for IOAT ver.3.0 null descriptor issue
* (channel returns error when size is 0)
*/
#define NULL_DESC_BUFFER_SIZE 1
/**
* struct ioatdma_device - internal representation of a IOAT device
* @pdev: PCI-Express device
* @reg_base: MMIO register space base address
* @dma_pool: for allocating DMA descriptors
* @common: embedded struct dma_device
* @version: version of ioatdma device
* @msix_entries: irq handlers
* @idx: per channel data
* @dca: direct cache access context
* @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
* @enumerate_channels: hw version specific channel enumeration
*/
struct ioatdma_device {
struct pci_dev *pdev;
void __iomem *reg_base;
struct pci_pool *dma_pool;
struct pci_pool *completion_pool;
struct dma_device common;
u8 version;
struct msix_entry msix_entries[4];
struct ioat_chan_common *idx[4];
struct dca_provider *dca;
void (*intr_quirk)(struct ioatdma_device *device);
int (*enumerate_channels)(struct ioatdma_device *device);
};
struct ioat_chan_common {
struct dma_chan common;
void __iomem *reg_base;
unsigned long last_completion;
spinlock_t cleanup_lock;
dma_cookie_t completed_cookie;
unsigned long state;
#define IOAT_COMPLETION_PENDING 0
#define IOAT_COMPLETION_ACK 1
#define IOAT_RESET_PENDING 2
struct timer_list timer;
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
#define RESET_DELAY msecs_to_jiffies(100)
struct ioatdma_device *device;
dma_addr_t completion_dma;
u64 *completion;
struct tasklet_struct cleanup_task;
};
/**
* struct ioat_dma_chan - internal representation of a DMA channel
*/
struct ioat_dma_chan {
struct ioat_chan_common base;
size_t xfercap; /* XFERCAP register value expanded out */
spinlock_t desc_lock;
struct list_head free_desc;
struct list_head used_desc;
int pending;
u16 desccount;
};
static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c)
{
return container_of(c, struct ioat_chan_common, common);
}
static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
{
struct ioat_chan_common *chan = to_chan_common(c);
return container_of(chan, struct ioat_dma_chan, base);
}
/**
* ioat_is_complete - poll the status of an ioat transaction
* @c: channel handle
* @cookie: transaction identifier
* @done: if set, updated with last completed transaction
* @used: if set, updated with last used transaction
*/
static inline enum dma_status
ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used)
{
struct ioat_chan_common *chan = to_chan_common(c);
dma_cookie_t last_used;
dma_cookie_t last_complete;
last_used = c->cookie;
last_complete = chan->completed_cookie;
if (done)
*done = last_complete;
if (used)
*used = last_used;
return dma_async_is_complete(cookie, last_complete, last_used);
}
/* wrapper around hardware descriptor format + additional software fields */
/**
* struct ioat_desc_sw - wrapper around hardware descriptor
* @hw: hardware DMA descriptor
* @node: this descriptor will either be on the free list,
* or attached to a transaction list (async_tx.tx_list)
* @txd: the generic software descriptor for all engines
* @id: identifier for debug
*/
struct ioat_desc_sw {
struct ioat_dma_descriptor *hw;
struct list_head node;
size_t len;
struct dma_async_tx_descriptor txd;
#ifdef DEBUG
int id;
#endif
};
#ifdef DEBUG
#define set_desc_id(desc, i) ((desc)->id = (i))
#define desc_id(desc) ((desc)->id)
#else
#define set_desc_id(desc, i)
#define desc_id(desc) (0)
#endif
static inline void
__dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
struct dma_async_tx_descriptor *tx, int id)
{
struct device *dev = to_dev(chan);
dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
" ctl: %#x (op: %d int_en: %d compl: %d)\n", id,
(unsigned long long) tx->phys,
(unsigned long long) hw->next, tx->cookie, tx->flags,
hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
}
#define dump_desc_dbg(c, d) \
({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
static inline void ioat_set_tcp_copy_break(unsigned long copybreak)
{
#ifdef CONFIG_NET_DMA
sysctl_tcp_dma_copybreak = copybreak;
#endif
}
static inline struct ioat_chan_common *
ioat_chan_by_index(struct ioatdma_device *device, int index)
{
return device->idx[index];
}
static inline u64 ioat_chansts(struct ioat_chan_common *chan)
{
u8 ver = chan->device->version;
u64 status;
u32 status_lo;
/* We need to read the low address first as this causes the
* chipset to latch the upper bits for the subsequent read
*/
status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
status <<= 32;
status |= status_lo;
return status;
}
static inline void ioat_start(struct ioat_chan_common *chan)
{
u8 ver = chan->device->version;
writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
}
static inline u64 ioat_chansts_to_addr(u64 status)
{
return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
}
static inline u32 ioat_chanerr(struct ioat_chan_common *chan)
{
return readl(chan->reg_base + IOAT_CHANERR_OFFSET);
}
static inline void ioat_suspend(struct ioat_chan_common *chan)
{
u8 ver = chan->device->version;
writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
}
static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
{
struct ioat_chan_common *chan = &ioat->base;
writel(addr & 0x00000000FFFFFFFF,
chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
writel(addr >> 32,
chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
}
static inline bool is_ioat_active(unsigned long status)
{
return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
}
static inline bool is_ioat_idle(unsigned long status)
{
return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
}
static inline bool is_ioat_halted(unsigned long status)
{
return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
}
static inline bool is_ioat_suspended(unsigned long status)
{
return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
}
/* channel was fatally programmed */
static inline bool is_ioat_bug(unsigned long err)
{
return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR|
IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR|
IOAT_CHANERR_LENGTH_ERR));
}
int __devinit ioat_probe(struct ioatdma_device *device);
int __devinit ioat_register(struct ioatdma_device *device);
int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca);
void __devexit ioat_dma_remove(struct ioatdma_device *device);
struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
void __iomem *iobase);
unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx,
void (*timer_fn)(unsigned long),
void (*tasklet)(unsigned long),
unsigned long ioat);
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
unsigned long *phys_complete);
#endif /* IOATDMA_H */
This diff is collapsed.
/*
* Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef IOATDMA_V2_H
#define IOATDMA_V2_H
#include <linux/dmaengine.h>
#include "dma.h"
#include "hw.h"
extern int ioat_pending_level;
/*
* workaround for IOAT ver.3.0 null descriptor issue
* (channel returns error when size is 0)
*/
#define NULL_DESC_BUFFER_SIZE 1
#define IOAT_MAX_ORDER 16
#define ioat_get_alloc_order() \
(min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
#define ioat_get_max_alloc_order() \
(min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
/* struct ioat2_dma_chan - ioat v2 / v3 channel attributes
* @base: common ioat channel parameters
* @xfercap_log; log2 of channel max transfer length (for fast division)
* @head: allocated index
* @issued: hardware notification point
* @tail: cleanup index
* @pending: lock free indicator for issued != head
* @dmacount: identical to 'head' except for occasionally resetting to zero
* @alloc_order: log2 of the number of allocated descriptors
* @ring: software ring buffer implementation of hardware ring
* @ring_lock: protects ring attributes
*/
struct ioat2_dma_chan {
struct ioat_chan_common base;
size_t xfercap_log;
u16 head;
u16 issued;
u16 tail;
u16 dmacount;
u16 alloc_order;
int pending;
struct ioat_ring_ent **ring;
spinlock_t ring_lock;
};
static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
{
struct ioat_chan_common *chan = to_chan_common(c);
return container_of(chan, struct ioat2_dma_chan, base);
}
static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat)
{
return (1 << ioat->alloc_order) - 1;
}
/* count of descriptors in flight with the engine */
static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
{
return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat);
}
/* count of descriptors pending submission to hardware */
static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
{
return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat);
}
static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
{
u16 num_descs = ioat2_ring_mask(ioat) + 1;
u16 active = ioat2_ring_active(ioat);
BUG_ON(active > num_descs);
return num_descs - active;
}
/* assumes caller already checked space */
static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len)
{
ioat->head += len;
return ioat->head - len;
}
static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
{
u16 num_descs = len >> ioat->xfercap_log;
num_descs += !!(len & ((1 << ioat->xfercap_log) - 1));
return num_descs;
}
struct ioat_ring_ent {
struct ioat_dma_descriptor *hw;
struct dma_async_tx_descriptor txd;
size_t len;
#ifdef DEBUG
int id;
#endif
};
static inline struct ioat_ring_ent *
ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
{
return ioat->ring[idx & ioat2_ring_mask(ioat)];
}
static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
{
struct ioat_chan_common *chan = &ioat->base;
writel(addr & 0x00000000FFFFFFFF,
chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
writel(addr >> 32,
chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
}
int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
#endif /* IOATDMA_V2_H */
......@@ -23,6 +23,7 @@
/* PCI Configuration Space Values */
#define IOAT_PCI_VID 0x8086
#define IOAT_MMIO_BAR 0
/* CB device ID's */
#define IOAT_PCI_DID_5000 0x1A38
......@@ -39,32 +40,34 @@
struct ioat_dma_descriptor {
uint32_t size;
union {
uint32_t ctl;
struct {
unsigned int int_en:1;
unsigned int src_snoop_dis:1;
unsigned int dest_snoop_dis:1;
unsigned int compl_write:1;
unsigned int fence:1;
unsigned int null:1;
unsigned int src_brk:1;
unsigned int dest_brk:1;
unsigned int bundle:1;
unsigned int dest_dca:1;
unsigned int hint:1;
unsigned int rsvd2:13;
unsigned int op:8;
} ctl_f;
};
uint64_t src_addr;
uint64_t dst_addr;
uint64_t next;
uint64_t rsv1;
uint64_t rsv2;
/* store some driver data in an unused portion of the descriptor */
union {
uint64_t user1;
uint64_t tx_cnt;
};
uint64_t user2;
};
#define IOAT_DMA_DESCRIPTOR_CTL_INT_GN 0x00000001
#define IOAT_DMA_DESCRIPTOR_CTL_SRC_SN 0x00000002
#define IOAT_DMA_DESCRIPTOR_CTL_DST_SN 0x00000004
#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008
#define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010
#define IOAT_DMA_DESCRIPTOR_NUL 0x00000020
#define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK 0x00000040
#define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK 0x00000080
#define IOAT_DMA_DESCRIPTOR_CTL_BNDL 0x00000100
#define IOAT_DMA_DESCRIPTOR_CTL_DCA 0x00000200
#define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT 0x00000400
#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT 0xFF000000
#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA 0x00000000
#define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA 0x00000001
#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK 0xFF000000
#endif
......@@ -30,9 +30,10 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/dca.h>
#include "ioatdma.h"
#include "ioatdma_registers.h"
#include "ioatdma_hw.h"
#include "dma.h"
#include "dma_v2.h"
#include "registers.h"
#include "hw.h"
MODULE_VERSION(IOAT_DMA_VERSION);
MODULE_LICENSE("GPL");
......@@ -60,14 +61,7 @@ static struct pci_device_id ioat_pci_tbl[] = {
{ 0, }
};
struct ioat_device {
struct pci_dev *pdev;
void __iomem *iobase;
struct ioatdma_device *dma;
struct dca_provider *dca;
};
static int __devinit ioat_probe(struct pci_dev *pdev,
static int __devinit ioat_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
static void __devexit ioat_remove(struct pci_dev *pdev);
......@@ -75,104 +69,93 @@ static int ioat_dca_enabled = 1;
module_param(ioat_dca_enabled, int, 0644);
MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
#define DRV_NAME "ioatdma"
static struct pci_driver ioat_pci_driver = {
.name = "ioatdma",
.name = DRV_NAME,
.id_table = ioat_pci_tbl,
.probe = ioat_probe,
.probe = ioat_pci_probe,
.remove = __devexit_p(ioat_remove),
};
static int __devinit ioat_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
static struct ioatdma_device *
alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
{
struct device *dev = &pdev->dev;
struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
if (!d)
return NULL;
d->pdev = pdev;
d->reg_base = iobase;
return d;
}
static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
void __iomem *iobase;
struct ioat_device *device;
unsigned long mmio_start, mmio_len;
void __iomem * const *iomap;
struct device *dev = &pdev->dev;
struct ioatdma_device *device;
int err;
err = pci_enable_device(pdev);
err = pcim_enable_device(pdev);
if (err)
goto err_enable_device;
return err;
err = pci_request_regions(pdev, ioat_pci_driver.name);
err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
if (err)
goto err_request_regions;
return err;
iomap = pcim_iomap_table(pdev);
if (!iomap)
return -ENOMEM;
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err)
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err)
goto err_set_dma_mask;
return err;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err)
goto err_set_dma_mask;
mmio_start = pci_resource_start(pdev, 0);
mmio_len = pci_resource_len(pdev, 0);
iobase = ioremap(mmio_start, mmio_len);
if (!iobase) {
err = -ENOMEM;
goto err_ioremap;
}
return err;
device = kzalloc(sizeof(*device), GFP_KERNEL);
if (!device) {
err = -ENOMEM;
goto err_kzalloc;
}
device->pdev = pdev;
pci_set_drvdata(pdev, device);
device->iobase = iobase;
device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
if (!device)
return -ENOMEM;
pci_set_master(pdev);
switch (readb(iobase + IOAT_VER_OFFSET)) {
case IOAT_VER_1_2:
device->dma = ioat_dma_probe(pdev, iobase);
if (device->dma && ioat_dca_enabled)
device->dca = ioat_dca_init(pdev, iobase);
break;
case IOAT_VER_2_0:
device->dma = ioat_dma_probe(pdev, iobase);
if (device->dma && ioat_dca_enabled)
device->dca = ioat2_dca_init(pdev, iobase);
break;
case IOAT_VER_3_0:
device->dma = ioat_dma_probe(pdev, iobase);
if (device->dma && ioat_dca_enabled)
device->dca = ioat3_dca_init(pdev, iobase);
break;
default:
err = -ENODEV;
break;
}
if (!device->dma)
err = -ENODEV;
device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
if (!device)
return -ENOMEM;
pci_set_drvdata(pdev, device);
if (err)
goto err_version;
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
if (device->version == IOAT_VER_1_2)
err = ioat1_dma_probe(device, ioat_dca_enabled);
else if (device->version == IOAT_VER_2_0)
err = ioat2_dma_probe(device, ioat_dca_enabled);
else if (device->version >= IOAT_VER_3_0)
err = ioat3_dma_probe(device, ioat_dca_enabled);
else
return -ENODEV;
if (err) {
dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
return -ENODEV;
}
return 0;
err_version:
kfree(device);
err_kzalloc:
iounmap(iobase);
err_ioremap:
err_set_dma_mask:
pci_release_regions(pdev);
pci_disable_device(pdev);
err_request_regions:
err_enable_device:
return err;
}
static void __devexit ioat_remove(struct pci_dev *pdev)
{
struct ioat_device *device = pci_get_drvdata(pdev);
struct ioatdma_device *device = pci_get_drvdata(pdev);
if (!device)
return;
dev_err(&pdev->dev, "Removing dma and dca services\n");
if (device->dca) {
......@@ -180,13 +163,7 @@ static void __devexit ioat_remove(struct pci_dev *pdev)
free_dca_provider(device->dca);
device->dca = NULL;
}
if (device->dma) {
ioat_dma_remove(device->dma);
device->dma = NULL;
}
kfree(device);
ioat_dma_remove(device);
}
static int __init ioat_init_module(void)
......
......@@ -75,7 +75,11 @@
#define IOAT_CHANCTRL_ERR_INT_EN 0x0010
#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008
#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
#define IOAT_CHANCTRL_INT_DISABLE 0x0001
#define IOAT_CHANCTRL_INT_REARM 0x0001
#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\
IOAT_CHANCTRL_ERR_COMPLETION_EN |\
IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\
IOAT_CHANCTRL_ERR_INT_EN)
#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */
#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
......@@ -94,14 +98,14 @@
#define IOAT2_CHANSTS_OFFSET_HIGH 0x0C
#define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \
? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH)
#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F
#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010
#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x0000000000000008
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3
#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL)
#define IOAT_CHANSTS_SOFT_ERR 0x10ULL
#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL
#define IOAT_CHANSTS_STATUS 0x7ULL
#define IOAT_CHANSTS_ACTIVE 0x0
#define IOAT_CHANSTS_DONE 0x1
#define IOAT_CHANSTS_SUSPENDED 0x2
#define IOAT_CHANSTS_HALTED 0x3
......@@ -204,18 +208,18 @@
#define IOAT_CDAR_OFFSET_HIGH 0x24
#define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */
#define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001
#define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR 0x0002
#define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR 0x0004
#define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR 0x0008
#define IOAT_CHANERR_SRC_ADDR_ERR 0x0001
#define IOAT_CHANERR_DEST_ADDR_ERR 0x0002
#define IOAT_CHANERR_NEXT_ADDR_ERR 0x0004
#define IOAT_CHANERR_NEXT_DESC_ALIGN_ERR 0x0008
#define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010
#define IOAT_CHANERR_CHANCMD_ERR 0x0020
#define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040
#define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080
#define IOAT_CHANERR_READ_DATA_ERR 0x0100
#define IOAT_CHANERR_WRITE_DATA_ERR 0x0200
#define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR 0x0400
#define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR 0x0800
#define IOAT_CHANERR_CONTROL_ERR 0x0400
#define IOAT_CHANERR_LENGTH_ERR 0x0800
#define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000
#define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000
#define IOAT_CHANERR_SOFT_ERR 0x4000
......
This diff is collapsed.
/*
* Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef IOATDMA_H
#define IOATDMA_H
#include <linux/dmaengine.h>
#include "ioatdma_hw.h"
#include <linux/init.h>
#include <linux/dmapool.h>
#include <linux/cache.h>
#include <linux/pci_ids.h>
#include <net/tcp.h>
#define IOAT_DMA_VERSION "3.64"
enum ioat_interrupt {
none = 0,
msix_multi_vector = 1,
msix_single_vector = 2,
msi = 3,
intx = 4,
};
#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
#define IOAT_DMA_DCA_ANY_CPU ~0
#define IOAT_WATCHDOG_PERIOD (2 * HZ)
/**
* struct ioatdma_device - internal representation of a IOAT device
* @pdev: PCI-Express device
* @reg_base: MMIO register space base address
* @dma_pool: for allocating DMA descriptors
* @common: embedded struct dma_device
* @version: version of ioatdma device
* @irq_mode: which style irq to use
* @msix_entries: irq handlers
* @idx: per channel data
*/
struct ioatdma_device {
struct pci_dev *pdev;
void __iomem *reg_base;
struct pci_pool *dma_pool;
struct pci_pool *completion_pool;
struct dma_device common;
u8 version;
enum ioat_interrupt irq_mode;
struct delayed_work work;
struct msix_entry msix_entries[4];
struct ioat_dma_chan *idx[4];
};
/**
* struct ioat_dma_chan - internal representation of a DMA channel
*/
struct ioat_dma_chan {
void __iomem *reg_base;
dma_cookie_t completed_cookie;
unsigned long last_completion;
unsigned long last_completion_time;
size_t xfercap; /* XFERCAP register value expanded out */
spinlock_t cleanup_lock;
spinlock_t desc_lock;
struct list_head free_desc;
struct list_head used_desc;
unsigned long watchdog_completion;
int watchdog_tcp_cookie;
u32 watchdog_last_tcp_cookie;
struct delayed_work work;
int pending;
int dmacount;
int desccount;
struct ioatdma_device *device;
struct dma_chan common;
dma_addr_t completion_addr;
union {
u64 full; /* HW completion writeback */
struct {
u32 low;
u32 high;
};
} *completion_virt;
unsigned long last_compl_desc_addr_hw;
struct tasklet_struct cleanup_task;
};
/* wrapper around hardware descriptor format + additional software fields */
/**
* struct ioat_desc_sw - wrapper around hardware descriptor
* @hw: hardware DMA descriptor
* @node: this descriptor will either be on the free list,
* or attached to a transaction list (async_tx.tx_list)
* @tx_cnt: number of descriptors required to complete the transaction
* @async_tx: the generic software descriptor for all engines
*/
struct ioat_desc_sw {
struct ioat_dma_descriptor *hw;
struct list_head node;
int tx_cnt;
size_t len;
dma_addr_t src;
dma_addr_t dst;
struct dma_async_tx_descriptor async_tx;
};
static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
{
#ifdef CONFIG_NET_DMA
switch (dev->version) {
case IOAT_VER_1_2:
sysctl_tcp_dma_copybreak = 4096;
break;
case IOAT_VER_2_0:
sysctl_tcp_dma_copybreak = 2048;
break;
case IOAT_VER_3_0:
sysctl_tcp_dma_copybreak = 262144;
break;
}
#endif
}
#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE)
struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
void __iomem *iobase);
void ioat_dma_remove(struct ioatdma_device *device);
struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
#else
#define ioat_dma_probe(pdev, iobase) NULL
#define ioat_dma_remove(device) do { } while (0)
#define ioat_dca_init(pdev, iobase) NULL
#define ioat2_dca_init(pdev, iobase) NULL
#define ioat3_dca_init(pdev, iobase) NULL
#endif
#endif /* IOATDMA_H */
......@@ -183,6 +183,11 @@ dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
iov_byte_offset,
kdata,
copy);
/* poll for a descriptor slot */
if (unlikely(dma_cookie < 0)) {
dma_async_issue_pending(chan);
continue;
}
len -= copy;
iov[iovec_idx].iov_len -= copy;
......@@ -248,6 +253,11 @@ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
page,
offset,
copy);
/* poll for a descriptor slot */
if (unlikely(dma_cookie < 0)) {
dma_async_issue_pending(chan);
continue;
}
len -= copy;
iov[iovec_idx].iov_len -= copy;
......
......@@ -29,8 +29,8 @@
#include <asm/idle.h>
#include "../dma/ioatdma_hw.h"
#include "../dma/ioatdma_registers.h"
#include "../dma/ioat/hw.h"
#include "../dma/ioat/registers.h"
#define I7300_IDLE_DRIVER_VERSION "1.55"
#define I7300_PRINT "i7300_idle:"
......@@ -126,9 +126,9 @@ static void i7300_idle_ioat_stop(void)
udelay(10);
sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
IOAT_CHANSTS_DMA_TRANSFER_STATUS;
IOAT_CHANSTS_STATUS;
if (sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE)
if (sts != IOAT_CHANSTS_ACTIVE)
break;
}
......@@ -160,9 +160,9 @@ static int __init i7300_idle_ioat_selftest(u8 *ctl,
udelay(1000);
chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
IOAT_CHANSTS_DMA_TRANSFER_STATUS;
IOAT_CHANSTS_STATUS;
if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE) {
if (chan_sts != IOAT_CHANSTS_DONE) {
/* Not complete, reset the channel */
writeb(IOAT_CHANCMD_RESET,
ioat_chanbase + IOAT1_CHANCMD_OFFSET);
......@@ -288,9 +288,9 @@ static void __exit i7300_idle_ioat_exit(void)
ioat_chanbase + IOAT1_CHANCMD_OFFSET);
chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
IOAT_CHANSTS_DMA_TRANSFER_STATUS;
IOAT_CHANSTS_STATUS;
if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) {
if (chan_sts != IOAT_CHANSTS_ACTIVE) {
writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
break;
}
......@@ -298,14 +298,14 @@ static void __exit i7300_idle_ioat_exit(void)
}
chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
IOAT_CHANSTS_DMA_TRANSFER_STATUS;
IOAT_CHANSTS_STATUS;
/*
* We tried to reset multiple times. If IO A/T channel is still active
* flag an error and return without cleanup. Memory leak is better
* than random corruption in that extreme error situation.
*/
if (chan_sts == IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) {
if (chan_sts == IOAT_CHANSTS_ACTIVE) {
printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels."
" Not freeing resources\n");
return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment