Commit 4b3df566 authored by NeilBrown's avatar NeilBrown

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx into for-linus

parents 1ef04fef 1f6672d4
...@@ -54,20 +54,23 @@ features surfaced as a result: ...@@ -54,20 +54,23 @@ features surfaced as a result:
3.1 General format of the API: 3.1 General format of the API:
struct dma_async_tx_descriptor * struct dma_async_tx_descriptor *
async_<operation>(<op specific parameters>, async_<operation>(<op specific parameters>, struct async_submit ctl *submit)
enum async_tx_flags flags,
struct dma_async_tx_descriptor *dependency,
dma_async_tx_callback callback_routine,
void *callback_parameter);
3.2 Supported operations: 3.2 Supported operations:
memcpy - memory copy between a source and a destination buffer memcpy - memory copy between a source and a destination buffer
memset - fill a destination buffer with a byte value memset - fill a destination buffer with a byte value
xor - xor a series of source buffers and write the result to a xor - xor a series of source buffers and write the result to a
destination buffer destination buffer
xor_zero_sum - xor a series of source buffers and set a flag if the xor_val - xor a series of source buffers and set a flag if the
result is zero. The implementation attempts to prevent result is zero. The implementation attempts to prevent
writes to memory writes to memory
pq - generate the p+q (raid6 syndrome) from a series of source buffers
pq_val - validate that a p and or q buffer are in sync with a given series of
sources
datap - (raid6_datap_recov) recover a raid6 data block and the p block
from the given sources
2data - (raid6_2data_recov) recover 2 raid6 data blocks from the given
sources
3.3 Descriptor management: 3.3 Descriptor management:
The return value is non-NULL and points to a 'descriptor' when the operation The return value is non-NULL and points to a 'descriptor' when the operation
...@@ -80,8 +83,8 @@ acknowledged by the application before the offload engine driver is allowed to ...@@ -80,8 +83,8 @@ acknowledged by the application before the offload engine driver is allowed to
recycle (or free) the descriptor. A descriptor can be acked by one of the recycle (or free) the descriptor. A descriptor can be acked by one of the
following methods: following methods:
1/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted 1/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted
2/ setting the ASYNC_TX_DEP_ACK flag to acknowledge the parent 2/ submitting an unacknowledged descriptor as a dependency to another
descriptor of a new operation. async_tx call will implicitly set the acknowledged state.
3/ calling async_tx_ack() on the descriptor. 3/ calling async_tx_ack() on the descriptor.
3.4 When does the operation execute? 3.4 When does the operation execute?
...@@ -119,30 +122,42 @@ of an operation. ...@@ -119,30 +122,42 @@ of an operation.
Perform a xor->copy->xor operation where each operation depends on the Perform a xor->copy->xor operation where each operation depends on the
result from the previous operation: result from the previous operation:
void complete_xor_copy_xor(void *param) void callback(void *param)
{ {
printk("complete\n"); struct completion *cmp = param;
complete(cmp);
} }
int run_xor_copy_xor(struct page **xor_srcs, void run_xor_copy_xor(struct page **xor_srcs,
int xor_src_cnt, int xor_src_cnt,
struct page *xor_dest, struct page *xor_dest,
size_t xor_len, size_t xor_len,
struct page *copy_src, struct page *copy_src,
struct page *copy_dest, struct page *copy_dest,
size_t copy_len) size_t copy_len)
{ {
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
addr_conv_t addr_conv[xor_src_cnt];
struct async_submit_ctl submit;
addr_conv_t addr_conv[NDISKS];
struct completion cmp;
init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL,
addr_conv);
tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, &submit)
tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, submit->depend_tx = tx;
ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL); tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, &submit);
tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len,
ASYNC_TX_DEP_ACK, tx, NULL, NULL); init_completion(&cmp);
tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST | ASYNC_TX_ACK, tx,
ASYNC_TX_XOR_DROP_DST | ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, callback, &cmp, addr_conv);
tx, complete_xor_copy_xor, NULL); tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, &submit);
async_tx_issue_pending_all(); async_tx_issue_pending_all();
wait_for_completion(&cmp);
} }
See include/linux/async_tx.h for more information on the flags. See the See include/linux/async_tx.h for more information on the flags. See the
......
...@@ -187,11 +187,74 @@ union iop3xx_desc { ...@@ -187,11 +187,74 @@ union iop3xx_desc {
void *ptr; void *ptr;
}; };
/* No support for p+q operations */
static inline int
iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
{
BUG();
return 0;
}
static inline void
iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
{
BUG();
}
static inline void
iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
{
BUG();
}
static inline void
iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
dma_addr_t addr, unsigned char coef)
{
BUG();
}
static inline int
iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
{
BUG();
return 0;
}
static inline void
iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
{
BUG();
}
static inline void
iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
{
BUG();
}
#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
static inline void
iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
dma_addr_t *src)
{
BUG();
}
static inline int iop_adma_get_max_xor(void) static inline int iop_adma_get_max_xor(void)
{ {
return 32; return 32;
} }
static inline int iop_adma_get_max_pq(void)
{
BUG();
return 0;
}
static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan) static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
{ {
int id = chan->device->id; int id = chan->device->id;
...@@ -332,6 +395,11 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, ...@@ -332,6 +395,11 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
return slot_cnt; return slot_cnt;
} }
static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
{
return 0;
}
static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan) struct iop_adma_chan *chan)
{ {
...@@ -349,6 +417,14 @@ static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, ...@@ -349,6 +417,14 @@ static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
return 0; return 0;
} }
static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
BUG();
return 0;
}
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan) struct iop_adma_chan *chan)
{ {
...@@ -756,13 +832,14 @@ static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, ...@@ -756,13 +832,14 @@ static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
hw_desc->src[0] = val; hw_desc->src[0] = val;
} }
static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) static inline enum sum_check_flags
iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
{ {
struct iop3xx_desc_aau *hw_desc = desc->hw_desc; struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field; struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en)); iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
return desc_ctrl.zero_result_err; return desc_ctrl.zero_result_err << SUM_CHECK_P;
} }
static inline void iop_chan_append(struct iop_adma_chan *chan) static inline void iop_chan_append(struct iop_adma_chan *chan)
......
...@@ -86,6 +86,7 @@ struct iop_adma_chan { ...@@ -86,6 +86,7 @@ struct iop_adma_chan {
* @idx: pool index * @idx: pool index
* @unmap_src_cnt: number of xor sources * @unmap_src_cnt: number of xor sources
* @unmap_len: transaction bytecount * @unmap_len: transaction bytecount
* @tx_list: list of descriptors that are associated with one operation
* @async_tx: support for the async_tx api * @async_tx: support for the async_tx api
* @group_list: list of slots that make up a multi-descriptor transaction * @group_list: list of slots that make up a multi-descriptor transaction
* for example transfer lengths larger than the supported hw max * for example transfer lengths larger than the supported hw max
...@@ -102,10 +103,12 @@ struct iop_adma_desc_slot { ...@@ -102,10 +103,12 @@ struct iop_adma_desc_slot {
u16 idx; u16 idx;
u16 unmap_src_cnt; u16 unmap_src_cnt;
size_t unmap_len; size_t unmap_len;
struct list_head tx_list;
struct dma_async_tx_descriptor async_tx; struct dma_async_tx_descriptor async_tx;
union { union {
u32 *xor_check_result; u32 *xor_check_result;
u32 *crc32_result; u32 *crc32_result;
u32 *pq_check_result;
}; };
}; };
......
...@@ -150,6 +150,8 @@ static inline int iop_adma_get_max_xor(void) ...@@ -150,6 +150,8 @@ static inline int iop_adma_get_max_xor(void)
return 16; return 16;
} }
#define iop_adma_get_max_pq iop_adma_get_max_xor
static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan) static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
{ {
return __raw_readl(ADMA_ADAR(chan)); return __raw_readl(ADMA_ADAR(chan));
...@@ -211,7 +213,10 @@ iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op) ...@@ -211,7 +213,10 @@ iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op)
#define IOP_ADMA_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT #define IOP_ADMA_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
#define IOP_ADMA_XOR_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT #define IOP_ADMA_XOR_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
#define IOP_ADMA_PQ_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
#define iop_chan_zero_sum_slot_count(l, s, o) iop_chan_xor_slot_count(l, s, o) #define iop_chan_zero_sum_slot_count(l, s, o) iop_chan_xor_slot_count(l, s, o)
#define iop_chan_pq_slot_count iop_chan_xor_slot_count
#define iop_chan_pq_zero_sum_slot_count iop_chan_xor_slot_count
static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan) struct iop_adma_chan *chan)
...@@ -220,6 +225,13 @@ static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, ...@@ -220,6 +225,13 @@ static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
return hw_desc->dest_addr; return hw_desc->dest_addr;
} }
static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
return hw_desc->q_dest_addr;
}
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan) struct iop_adma_chan *chan)
{ {
...@@ -319,6 +331,58 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, ...@@ -319,6 +331,58 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
return 1; return 1;
} }
static inline void
iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
union {
u32 value;
struct iop13xx_adma_desc_ctrl field;
} u_desc_ctrl;
u_desc_ctrl.value = 0;
u_desc_ctrl.field.src_select = src_cnt - 1;
u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
u_desc_ctrl.field.pq_xfer_en = 1;
u_desc_ctrl.field.p_xfer_dis = !!(flags & DMA_PREP_PQ_DISABLE_P);
u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value;
}
static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
union {
u32 value;
struct iop13xx_adma_desc_ctrl field;
} u_desc_ctrl;
u_desc_ctrl.value = hw_desc->desc_ctrl;
return u_desc_ctrl.field.pq_xfer_en;
}
static inline void
iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
union {
u32 value;
struct iop13xx_adma_desc_ctrl field;
} u_desc_ctrl;
u_desc_ctrl.value = 0;
u_desc_ctrl.field.src_select = src_cnt - 1;
u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
u_desc_ctrl.field.zero_result = 1;
u_desc_ctrl.field.status_write_back_en = 1;
u_desc_ctrl.field.pq_xfer_en = 1;
u_desc_ctrl.field.p_xfer_dis = !!(flags & DMA_PREP_PQ_DISABLE_P);
u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value;
}
static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc, static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan, struct iop_adma_chan *chan,
u32 byte_count) u32 byte_count)
...@@ -351,6 +415,7 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) ...@@ -351,6 +415,7 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
} }
} }
#define iop_desc_set_pq_zero_sum_byte_count iop_desc_set_zero_sum_byte_count
static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc, static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan, struct iop_adma_chan *chan,
...@@ -361,6 +426,16 @@ static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc, ...@@ -361,6 +426,16 @@ static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
hw_desc->upper_dest_addr = 0; hw_desc->upper_dest_addr = 0;
} }
static inline void
iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
hw_desc->dest_addr = addr[0];
hw_desc->q_dest_addr = addr[1];
hw_desc->upper_dest_addr = 0;
}
static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc, static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
dma_addr_t addr) dma_addr_t addr)
{ {
...@@ -388,6 +463,29 @@ static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc, ...@@ -388,6 +463,29 @@ static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
} while (slot_cnt); } while (slot_cnt);
} }
static inline void
iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
dma_addr_t addr, unsigned char coef)
{
int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter;
struct iop13xx_adma_src *src;
int i = 0;
do {
iter = iop_hw_desc_slot_idx(hw_desc, i);
src = &iter->src[src_idx];
src->src_addr = addr;
src->pq_upper_src_addr = 0;
src->pq_dmlt = coef;
slot_cnt -= slots_per_op;
if (slot_cnt) {
i += slots_per_op;
addr += IOP_ADMA_PQ_MAX_BYTE_COUNT;
}
} while (slot_cnt);
}
static inline void static inline void
iop_desc_init_interrupt(struct iop_adma_desc_slot *desc, iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan) struct iop_adma_chan *chan)
...@@ -399,6 +497,15 @@ iop_desc_init_interrupt(struct iop_adma_desc_slot *desc, ...@@ -399,6 +497,15 @@ iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
} }
#define iop_desc_set_zero_sum_src_addr iop_desc_set_xor_src_addr #define iop_desc_set_zero_sum_src_addr iop_desc_set_xor_src_addr
#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
static inline void
iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
dma_addr_t *src)
{
iop_desc_set_xor_src_addr(desc, pq_idx, src[pq_idx]);
iop_desc_set_xor_src_addr(desc, pq_idx+1, src[pq_idx+1]);
}
static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc, static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
u32 next_desc_addr) u32 next_desc_addr)
...@@ -428,18 +535,20 @@ static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, ...@@ -428,18 +535,20 @@ static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
hw_desc->block_fill_data = val; hw_desc->block_fill_data = val;
} }
static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) static inline enum sum_check_flags
iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
{ {
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
struct iop13xx_adma_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field; struct iop13xx_adma_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
struct iop13xx_adma_byte_count byte_count = hw_desc->byte_count_field; struct iop13xx_adma_byte_count byte_count = hw_desc->byte_count_field;
enum sum_check_flags flags;
BUG_ON(!(byte_count.tx_complete && desc_ctrl.zero_result)); BUG_ON(!(byte_count.tx_complete && desc_ctrl.zero_result));
if (desc_ctrl.pq_xfer_en) flags = byte_count.zero_result_err_q << SUM_CHECK_Q;
return byte_count.zero_result_err_q; flags |= byte_count.zero_result_err << SUM_CHECK_P;
else
return byte_count.zero_result_err; return flags;
} }
static inline void iop_chan_append(struct iop_adma_chan *chan) static inline void iop_chan_append(struct iop_adma_chan *chan)
......
...@@ -477,10 +477,8 @@ void __init iop13xx_platform_init(void) ...@@ -477,10 +477,8 @@ void __init iop13xx_platform_init(void)
plat_data = &iop13xx_adma_0_data; plat_data = &iop13xx_adma_0_data;
dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask);
dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask);
dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
break; break;
case IOP13XX_INIT_ADMA_1: case IOP13XX_INIT_ADMA_1:
...@@ -489,10 +487,8 @@ void __init iop13xx_platform_init(void) ...@@ -489,10 +487,8 @@ void __init iop13xx_platform_init(void)
plat_data = &iop13xx_adma_1_data; plat_data = &iop13xx_adma_1_data;
dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask);
dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask);
dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
break; break;
case IOP13XX_INIT_ADMA_2: case IOP13XX_INIT_ADMA_2:
...@@ -501,14 +497,11 @@ void __init iop13xx_platform_init(void) ...@@ -501,14 +497,11 @@ void __init iop13xx_platform_init(void)
plat_data = &iop13xx_adma_2_data; plat_data = &iop13xx_adma_2_data;
dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask);
dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask);
dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
dma_cap_set(DMA_PQ_XOR, plat_data->cap_mask); dma_cap_set(DMA_PQ, plat_data->cap_mask);
dma_cap_set(DMA_PQ_UPDATE, plat_data->cap_mask); dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask);
dma_cap_set(DMA_PQ_ZERO_SUM, plat_data->cap_mask);
break; break;
} }
} }
......
...@@ -179,7 +179,6 @@ static int __init iop3xx_adma_cap_init(void) ...@@ -179,7 +179,6 @@ static int __init iop3xx_adma_cap_init(void)
dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask);
#else #else
dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask); dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask);
dma_cap_set(DMA_MEMCPY_CRC32C, iop3xx_dma_0_data.cap_mask);
dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask);
#endif #endif
...@@ -188,7 +187,6 @@ static int __init iop3xx_adma_cap_init(void) ...@@ -188,7 +187,6 @@ static int __init iop3xx_adma_cap_init(void)
dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask);
#else #else
dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask); dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask);
dma_cap_set(DMA_MEMCPY_CRC32C, iop3xx_dma_1_data.cap_mask);
dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask);
#endif #endif
...@@ -198,7 +196,7 @@ static int __init iop3xx_adma_cap_init(void) ...@@ -198,7 +196,7 @@ static int __init iop3xx_adma_cap_init(void)
dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
#else #else
dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask);
dma_cap_set(DMA_ZERO_SUM, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask);
dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask);
dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
#endif #endif
......
/*
* Freescale MPC83XX / MPC85XX DMA Controller
*
* Copyright (c) 2009 Ira W. Snyder <iws@ovro.caltech.edu>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
#define __ARCH_POWERPC_ASM_FSLDMA_H__
#include <linux/dmaengine.h>
/*
* Definitions for the Freescale DMA controller's DMA_SLAVE implemention
*
* The Freescale DMA_SLAVE implementation was designed to handle many-to-many
* transfers. An example usage would be an accelerated copy between two
* scatterlists. Another example use would be an accelerated copy from
* multiple non-contiguous device buffers into a single scatterlist.
*
* A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This
* structure contains a list of hardware addresses that should be copied
* to/from the scatterlist passed into device_prep_slave_sg(). The structure
* also has some fields to enable hardware-specific features.
*/
/**
* struct fsl_dma_hw_addr
* @entry: linked list entry
* @address: the hardware address
* @length: length to transfer
*
* Holds a single physical hardware address / length pair for use
* with the DMAEngine DMA_SLAVE API.
*/
struct fsl_dma_hw_addr {
struct list_head entry;
dma_addr_t address;
size_t length;
};
/**
* struct fsl_dma_slave
* @addresses: a linked list of struct fsl_dma_hw_addr structures
* @request_count: value for DMA request count
* @src_loop_size: setup and enable constant source-address DMA transfers
* @dst_loop_size: setup and enable constant destination address DMA transfers
* @external_start: enable externally started DMA transfers
* @external_pause: enable externally paused DMA transfers
*
* Holds a list of address / length pairs for use with the DMAEngine
* DMA_SLAVE API implementation for the Freescale DMA controller.
*/
struct fsl_dma_slave {
/* List of hardware address/length pairs */
struct list_head addresses;
/* Support for extra controller features */
unsigned int request_count;
unsigned int src_loop_size;
unsigned int dst_loop_size;
bool external_start;
bool external_pause;
};
/**
* fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave
* @slave: the &struct fsl_dma_slave to add to
* @address: the hardware address to add
* @length: the length of bytes to transfer from @address
*
* Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on
* success, -ERRNO otherwise.
*/
static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave,
dma_addr_t address, size_t length)
{
struct fsl_dma_hw_addr *addr;
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (!addr)
return -ENOMEM;
INIT_LIST_HEAD(&addr->entry);
addr->address = address;
addr->length = length;
list_add_tail(&addr->entry, &slave->addresses);
return 0;
}
/**
* fsl_dma_slave_free - free a struct fsl_dma_slave
* @slave: the struct fsl_dma_slave to free
*
* Free a struct fsl_dma_slave and all associated address/length pairs
*/
static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave)
{
struct fsl_dma_hw_addr *addr, *tmp;
if (slave) {
list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) {
list_del(&addr->entry);
kfree(addr);
}
kfree(slave);
}
}
/**
* fsl_dma_slave_alloc - allocate a struct fsl_dma_slave
* @gfp: the flags to pass to kmalloc when allocating this structure
*
* Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new
* struct fsl_dma_slave on success, or NULL on failure.
*/
static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp)
{
struct fsl_dma_slave *slave;
slave = kzalloc(sizeof(*slave), gfp);
if (!slave)
return NULL;
INIT_LIST_HEAD(&slave->addresses);
return slave;
}
#endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */
menu "DMA support" menu "DMA support"
config SH_DMA_API
bool
config SH_DMA config SH_DMA
bool "SuperH on-chip DMA controller (DMAC) support" bool "SuperH on-chip DMA controller (DMAC) support"
depends on CPU_SH3 || CPU_SH4 depends on CPU_SH3 || CPU_SH4
select SH_DMA_API
default n default n
config SH_DMA_IRQ_MULTI config SH_DMA_IRQ_MULTI
...@@ -19,6 +16,15 @@ config SH_DMA_IRQ_MULTI ...@@ -19,6 +16,15 @@ config SH_DMA_IRQ_MULTI
CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 || \ CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 || \
CPU_SUBTYPE_SH7760 CPU_SUBTYPE_SH7760
config SH_DMA_API
depends on SH_DMA
bool "SuperH DMA API support"
default n
help
SH_DMA_API always enabled DMA API of used SuperH.
If you want to use DMA ENGINE, you must not enable this.
Please enable DMA_ENGINE and SH_DMAE.
config NR_ONCHIP_DMA_CHANNELS config NR_ONCHIP_DMA_CHANNELS
int int
depends on SH_DMA depends on SH_DMA
......
...@@ -2,8 +2,7 @@ ...@@ -2,8 +2,7 @@
# Makefile for the SuperH DMA specific kernel interface routines under Linux. # Makefile for the SuperH DMA specific kernel interface routines under Linux.
# #
obj-$(CONFIG_SH_DMA_API) += dma-api.o dma-sysfs.o obj-$(CONFIG_SH_DMA_API) += dma-sh.o dma-api.o dma-sysfs.o
obj-$(CONFIG_SH_DMA) += dma-sh.o
obj-$(CONFIG_PVR2_DMA) += dma-pvr2.o obj-$(CONFIG_PVR2_DMA) += dma-pvr2.o
obj-$(CONFIG_G2_DMA) += dma-g2.o obj-$(CONFIG_G2_DMA) += dma-g2.o
obj-$(CONFIG_SH_DMABRG) += dmabrg.o obj-$(CONFIG_SH_DMABRG) += dmabrg.o
...@@ -116,4 +116,17 @@ static u32 dma_base_addr[] __maybe_unused = { ...@@ -116,4 +116,17 @@ static u32 dma_base_addr[] __maybe_unused = {
#define CHCR 0x0C #define CHCR 0x0C
#define DMAOR 0x40 #define DMAOR 0x40
/*
* for dma engine
*
* SuperH DMA mode
*/
#define SHDMA_MIX_IRQ (1 << 1)
#define SHDMA_DMAOR1 (1 << 2)
#define SHDMA_DMAE1 (1 << 3)
struct sh_dmae_pdata {
unsigned int mode;
};
#endif /* __DMA_SH_H */ #endif /* __DMA_SH_H */
...@@ -14,3 +14,12 @@ config ASYNC_MEMSET ...@@ -14,3 +14,12 @@ config ASYNC_MEMSET
tristate tristate
select ASYNC_CORE select ASYNC_CORE
config ASYNC_PQ
tristate
select ASYNC_CORE
config ASYNC_RAID6_RECOV
tristate
select ASYNC_CORE
select ASYNC_PQ
...@@ -2,3 +2,6 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx.o ...@@ -2,3 +2,6 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx.o
obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o
obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o
obj-$(CONFIG_ASYNC_XOR) += async_xor.o obj-$(CONFIG_ASYNC_XOR) += async_xor.o
obj-$(CONFIG_ASYNC_PQ) += async_pq.o
obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o
obj-$(CONFIG_ASYNC_RAID6_TEST) += raid6test.o
...@@ -33,28 +33,31 @@ ...@@ -33,28 +33,31 @@
* async_memcpy - attempt to copy memory with a dma engine. * async_memcpy - attempt to copy memory with a dma engine.
* @dest: destination page * @dest: destination page
* @src: src page * @src: src page
* @offset: offset in pages to start transaction * @dest_offset: offset into 'dest' to start transaction
* @src_offset: offset into 'src' to start transaction
* @len: length in bytes * @len: length in bytes
* @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK, * @submit: submission / completion modifiers
* @depend_tx: memcpy depends on the result of this transaction *
* @cb_fn: function to call when the memcpy completes * honored flags: ASYNC_TX_ACK
* @cb_param: parameter to pass to the callback routine
*/ */
struct dma_async_tx_descriptor * struct dma_async_tx_descriptor *
async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
unsigned int src_offset, size_t len, enum async_tx_flags flags, unsigned int src_offset, size_t len,
struct dma_async_tx_descriptor *depend_tx, struct async_submit_ctl *submit)
dma_async_tx_callback cb_fn, void *cb_param)
{ {
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY, struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY,
&dest, 1, &src, 1, len); &dest, 1, &src, 1, len);
struct dma_device *device = chan ? chan->device : NULL; struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL; struct dma_async_tx_descriptor *tx = NULL;
if (device) { if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
dma_addr_t dma_dest, dma_src; dma_addr_t dma_dest, dma_src;
unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; unsigned long dma_prep_flags = 0;
if (submit->cb_fn)
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
dma_dest = dma_map_page(device->dev, dest, dest_offset, len, dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
...@@ -67,13 +70,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, ...@@ -67,13 +70,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
if (tx) { if (tx) {
pr_debug("%s: (async) len: %zu\n", __func__, len); pr_debug("%s: (async) len: %zu\n", __func__, len);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); async_tx_submit(chan, tx, submit);
} else { } else {
void *dest_buf, *src_buf; void *dest_buf, *src_buf;
pr_debug("%s: (sync) len: %zu\n", __func__, len); pr_debug("%s: (sync) len: %zu\n", __func__, len);
/* wait for any prerequisite operations */ /* wait for any prerequisite operations */
async_tx_quiesce(&depend_tx); async_tx_quiesce(&submit->depend_tx);
dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
src_buf = kmap_atomic(src, KM_USER1) + src_offset; src_buf = kmap_atomic(src, KM_USER1) + src_offset;
...@@ -83,26 +86,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, ...@@ -83,26 +86,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
kunmap_atomic(dest_buf, KM_USER0); kunmap_atomic(dest_buf, KM_USER0);
kunmap_atomic(src_buf, KM_USER1); kunmap_atomic(src_buf, KM_USER1);
async_tx_sync_epilog(cb_fn, cb_param); async_tx_sync_epilog(submit);
} }
return tx; return tx;
} }
EXPORT_SYMBOL_GPL(async_memcpy); EXPORT_SYMBOL_GPL(async_memcpy);
static int __init async_memcpy_init(void)
{
return 0;
}
static void __exit async_memcpy_exit(void)
{
do { } while (0);
}
module_init(async_memcpy_init);
module_exit(async_memcpy_exit);
MODULE_AUTHOR("Intel Corporation"); MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous memcpy api"); MODULE_DESCRIPTION("asynchronous memcpy api");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -35,26 +35,26 @@ ...@@ -35,26 +35,26 @@
* @val: fill value * @val: fill value
* @offset: offset in pages to start transaction * @offset: offset in pages to start transaction
* @len: length in bytes * @len: length in bytes
* @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK *
* @depend_tx: memset depends on the result of this transaction * honored flags: ASYNC_TX_ACK
* @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine
*/ */
struct dma_async_tx_descriptor * struct dma_async_tx_descriptor *
async_memset(struct page *dest, int val, unsigned int offset, async_memset(struct page *dest, int val, unsigned int offset, size_t len,
size_t len, enum async_tx_flags flags, struct async_submit_ctl *submit)
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{ {
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET, struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET,
&dest, 1, NULL, 0, len); &dest, 1, NULL, 0, len);
struct dma_device *device = chan ? chan->device : NULL; struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL; struct dma_async_tx_descriptor *tx = NULL;
if (device) { if (device && is_dma_fill_aligned(device, offset, 0, len)) {
dma_addr_t dma_dest; dma_addr_t dma_dest;
unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; unsigned long dma_prep_flags = 0;
if (submit->cb_fn)
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
dma_dest = dma_map_page(device->dev, dest, offset, len, dma_dest = dma_map_page(device->dev, dest, offset, len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
...@@ -64,38 +64,25 @@ async_memset(struct page *dest, int val, unsigned int offset, ...@@ -64,38 +64,25 @@ async_memset(struct page *dest, int val, unsigned int offset,
if (tx) { if (tx) {
pr_debug("%s: (async) len: %zu\n", __func__, len); pr_debug("%s: (async) len: %zu\n", __func__, len);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); async_tx_submit(chan, tx, submit);
} else { /* run the memset synchronously */ } else { /* run the memset synchronously */
void *dest_buf; void *dest_buf;
pr_debug("%s: (sync) len: %zu\n", __func__, len); pr_debug("%s: (sync) len: %zu\n", __func__, len);
dest_buf = (void *) (((char *) page_address(dest)) + offset); dest_buf = page_address(dest) + offset;
/* wait for any prerequisite operations */ /* wait for any prerequisite operations */
async_tx_quiesce(&depend_tx); async_tx_quiesce(&submit->depend_tx);
memset(dest_buf, val, len); memset(dest_buf, val, len);
async_tx_sync_epilog(cb_fn, cb_param); async_tx_sync_epilog(submit);
} }
return tx; return tx;
} }
EXPORT_SYMBOL_GPL(async_memset); EXPORT_SYMBOL_GPL(async_memset);
static int __init async_memset_init(void)
{
return 0;
}
static void __exit async_memset_exit(void)
{
do { } while (0);
}
module_init(async_memset_init);
module_exit(async_memset_exit);
MODULE_AUTHOR("Intel Corporation"); MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous memset api"); MODULE_DESCRIPTION("asynchronous memset api");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
This diff is collapsed.
This diff is collapsed.
...@@ -42,16 +42,21 @@ static void __exit async_tx_exit(void) ...@@ -42,16 +42,21 @@ static void __exit async_tx_exit(void)
async_dmaengine_put(); async_dmaengine_put();
} }
module_init(async_tx_init);
module_exit(async_tx_exit);
/** /**
* __async_tx_find_channel - find a channel to carry out the operation or let * __async_tx_find_channel - find a channel to carry out the operation or let
* the transaction execute synchronously * the transaction execute synchronously
* @depend_tx: transaction dependency * @submit: transaction dependency and submission modifiers
* @tx_type: transaction type * @tx_type: transaction type
*/ */
struct dma_chan * struct dma_chan *
__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, __async_tx_find_channel(struct async_submit_ctl *submit,
enum dma_transaction_type tx_type) enum dma_transaction_type tx_type)
{ {
struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
/* see if we can keep the chain on one channel */ /* see if we can keep the chain on one channel */
if (depend_tx && if (depend_tx &&
dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
...@@ -59,17 +64,6 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, ...@@ -59,17 +64,6 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
return async_dma_find_channel(tx_type); return async_dma_find_channel(tx_type);
} }
EXPORT_SYMBOL_GPL(__async_tx_find_channel); EXPORT_SYMBOL_GPL(__async_tx_find_channel);
#else
static int __init async_tx_init(void)
{
printk(KERN_INFO "async_tx: api initialized (sync-only)\n");
return 0;
}
static void __exit async_tx_exit(void)
{
do { } while (0);
}
#endif #endif
...@@ -83,10 +77,14 @@ static void ...@@ -83,10 +77,14 @@ static void
async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
struct dma_async_tx_descriptor *tx) struct dma_async_tx_descriptor *tx)
{ {
struct dma_chan *chan; struct dma_chan *chan = depend_tx->chan;
struct dma_device *device; struct dma_device *device = chan->device;
struct dma_async_tx_descriptor *intr_tx = (void *) ~0; struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
BUG();
#endif
/* first check to see if we can still append to depend_tx */ /* first check to see if we can still append to depend_tx */
spin_lock_bh(&depend_tx->lock); spin_lock_bh(&depend_tx->lock);
if (depend_tx->parent && depend_tx->chan == tx->chan) { if (depend_tx->parent && depend_tx->chan == tx->chan) {
...@@ -96,11 +94,11 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, ...@@ -96,11 +94,11 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
} }
spin_unlock_bh(&depend_tx->lock); spin_unlock_bh(&depend_tx->lock);
if (!intr_tx) /* attached dependency, flush the parent channel */
if (!intr_tx) {
device->device_issue_pending(chan);
return; return;
}
chan = depend_tx->chan;
device = chan->device;
/* see if we can schedule an interrupt /* see if we can schedule an interrupt
* otherwise poll for completion * otherwise poll for completion
...@@ -134,6 +132,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, ...@@ -134,6 +132,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
intr_tx->tx_submit(intr_tx); intr_tx->tx_submit(intr_tx);
async_tx_ack(intr_tx); async_tx_ack(intr_tx);
} }
device->device_issue_pending(chan);
} else { } else {
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
panic("%s: DMA_ERROR waiting for depend_tx\n", panic("%s: DMA_ERROR waiting for depend_tx\n",
...@@ -144,13 +143,14 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, ...@@ -144,13 +143,14 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
/** /**
* submit_disposition - while holding depend_tx->lock we must avoid submitting * submit_disposition - flags for routing an incoming operation
* new operations to prevent a circular locking dependency with
* drivers that already hold a channel lock when calling
* async_tx_run_dependencies.
* @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
* @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
* @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
*
* while holding depend_tx->lock we must avoid submitting new operations
* to prevent a circular locking dependency with drivers that already
* hold a channel lock when calling async_tx_run_dependencies.
*/ */
enum submit_disposition { enum submit_disposition {
ASYNC_TX_SUBMITTED, ASYNC_TX_SUBMITTED,
...@@ -160,11 +160,12 @@ enum submit_disposition { ...@@ -160,11 +160,12 @@ enum submit_disposition {
void void
async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, struct async_submit_ctl *submit)
dma_async_tx_callback cb_fn, void *cb_param)
{ {
tx->callback = cb_fn; struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
tx->callback_param = cb_param;
tx->callback = submit->cb_fn;
tx->callback_param = submit->cb_param;
if (depend_tx) { if (depend_tx) {
enum submit_disposition s; enum submit_disposition s;
...@@ -220,30 +221,29 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, ...@@ -220,30 +221,29 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
tx->tx_submit(tx); tx->tx_submit(tx);
} }
if (flags & ASYNC_TX_ACK) if (submit->flags & ASYNC_TX_ACK)
async_tx_ack(tx); async_tx_ack(tx);
if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) if (depend_tx)
async_tx_ack(depend_tx); async_tx_ack(depend_tx);
} }
EXPORT_SYMBOL_GPL(async_tx_submit); EXPORT_SYMBOL_GPL(async_tx_submit);
/** /**
* async_trigger_callback - schedules the callback function to be run after * async_trigger_callback - schedules the callback function to be run
* any dependent operations have been completed. * @submit: submission and completion parameters
* @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK *
* @depend_tx: 'callback' requires the completion of this transaction * honored flags: ASYNC_TX_ACK
* @cb_fn: function to call after depend_tx completes *
* @cb_param: parameter to pass to the callback routine * The callback is run after any dependent operations have completed.
*/ */
struct dma_async_tx_descriptor * struct dma_async_tx_descriptor *
async_trigger_callback(enum async_tx_flags flags, async_trigger_callback(struct async_submit_ctl *submit)
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{ {
struct dma_chan *chan; struct dma_chan *chan;
struct dma_device *device; struct dma_device *device;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
if (depend_tx) { if (depend_tx) {
chan = depend_tx->chan; chan = depend_tx->chan;
...@@ -262,14 +262,14 @@ async_trigger_callback(enum async_tx_flags flags, ...@@ -262,14 +262,14 @@ async_trigger_callback(enum async_tx_flags flags,
if (tx) { if (tx) {
pr_debug("%s: (async)\n", __func__); pr_debug("%s: (async)\n", __func__);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); async_tx_submit(chan, tx, submit);
} else { } else {
pr_debug("%s: (sync)\n", __func__); pr_debug("%s: (sync)\n", __func__);
/* wait for any prerequisite operations */ /* wait for any prerequisite operations */
async_tx_quiesce(&depend_tx); async_tx_quiesce(&submit->depend_tx);
async_tx_sync_epilog(cb_fn, cb_param); async_tx_sync_epilog(submit);
} }
return tx; return tx;
...@@ -295,9 +295,6 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx) ...@@ -295,9 +295,6 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
} }
EXPORT_SYMBOL_GPL(async_tx_quiesce); EXPORT_SYMBOL_GPL(async_tx_quiesce);
module_init(async_tx_init);
module_exit(async_tx_exit);
MODULE_AUTHOR("Intel Corporation"); MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API"); MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
This diff is collapsed.
/*
* asynchronous raid6 recovery self test
* Copyright (c) 2009, Intel Corporation.
*
* based on drivers/md/raid6test/test.c:
* Copyright 2002-2007 H. Peter Anvin
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/async_tx.h>
#include <linux/random.h>
#undef pr
#define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
#define NDISKS 16 /* Including P and Q */
static struct page *dataptrs[NDISKS];
static addr_conv_t addr_conv[NDISKS];
static struct page *data[NDISKS+3];
static struct page *spare;
static struct page *recovi;
static struct page *recovj;
static void callback(void *param)
{
struct completion *cmp = param;
complete(cmp);
}
static void makedata(int disks)
{
int i, j;
for (i = 0; i < disks; i++) {
for (j = 0; j < PAGE_SIZE/sizeof(u32); j += sizeof(u32)) {
u32 *p = page_address(data[i]) + j;
*p = random32();
}
dataptrs[i] = data[i];
}
}
static char disk_type(int d, int disks)
{
if (d == disks - 2)
return 'P';
else if (d == disks - 1)
return 'Q';
else
return 'D';
}
/* Recover two failed blocks. */
static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, struct page **ptrs)
{
struct async_submit_ctl submit;
struct completion cmp;
struct dma_async_tx_descriptor *tx = NULL;
enum sum_check_flags result = ~0;
if (faila > failb)
swap(faila, failb);
if (failb == disks-1) {
if (faila == disks-2) {
/* P+Q failure. Just rebuild the syndrome. */
init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
} else {
struct page *blocks[disks];
struct page *dest;
int count = 0;
int i;
/* data+Q failure. Reconstruct data from P,
* then rebuild syndrome
*/
for (i = disks; i-- ; ) {
if (i == faila || i == failb)
continue;
blocks[count++] = ptrs[i];
}
dest = ptrs[faila];
init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL,
NULL, NULL, addr_conv);
tx = async_xor(dest, blocks, 0, count, bytes, &submit);
init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
}
} else {
if (failb == disks-2) {
/* data+P failure. */
init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit);
} else {
/* data+data failure. */
init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit);
}
}
init_completion(&cmp);
init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv);
tx = async_syndrome_val(ptrs, 0, disks, bytes, &result, spare, &submit);
async_tx_issue_pending(tx);
if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0)
pr("%s: timeout! (faila: %d failb: %d disks: %d)\n",
__func__, faila, failb, disks);
if (result != 0)
pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n",
__func__, faila, failb, result);
}
static int test_disks(int i, int j, int disks)
{
int erra, errb;
memset(page_address(recovi), 0xf0, PAGE_SIZE);
memset(page_address(recovj), 0xba, PAGE_SIZE);
dataptrs[i] = recovi;
dataptrs[j] = recovj;
raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs);
erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE);
errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE);
pr("%s(%d, %d): faila=%3d(%c) failb=%3d(%c) %s\n",
__func__, i, j, i, disk_type(i, disks), j, disk_type(j, disks),
(!erra && !errb) ? "OK" : !erra ? "ERRB" : !errb ? "ERRA" : "ERRAB");
dataptrs[i] = data[i];
dataptrs[j] = data[j];
return erra || errb;
}
static int test(int disks, int *tests)
{
struct dma_async_tx_descriptor *tx;
struct async_submit_ctl submit;
struct completion cmp;
int err = 0;
int i, j;
recovi = data[disks];
recovj = data[disks+1];
spare = data[disks+2];
makedata(disks);
/* Nuke syndromes */
memset(page_address(data[disks-2]), 0xee, PAGE_SIZE);
memset(page_address(data[disks-1]), 0xee, PAGE_SIZE);
/* Generate assumed good syndrome */
init_completion(&cmp);
init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv);
tx = async_gen_syndrome(dataptrs, 0, disks, PAGE_SIZE, &submit);
async_tx_issue_pending(tx);
if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) {
pr("error: initial gen_syndrome(%d) timed out\n", disks);
return 1;
}
pr("testing the %d-disk case...\n", disks);
for (i = 0; i < disks-1; i++)
for (j = i+1; j < disks; j++) {
(*tests)++;
err += test_disks(i, j, disks);
}
return err;
}
static int raid6_test(void)
{
int err = 0;
int tests = 0;
int i;
for (i = 0; i < NDISKS+3; i++) {
data[i] = alloc_page(GFP_KERNEL);
if (!data[i]) {
while (i--)
put_page(data[i]);
return -ENOMEM;
}
}
/* the 4-disk and 5-disk cases are special for the recovery code */
if (NDISKS > 4)
err += test(4, &tests);
if (NDISKS > 5)
err += test(5, &tests);
err += test(NDISKS, &tests);
pr("\n");
pr("complete (%d tests, %d failure%s)\n",
tests, err, err == 1 ? "" : "s");
for (i = 0; i < NDISKS+3; i++)
put_page(data[i]);
return 0;
}
static void raid6_test_exit(void)
{
}
/* when compiled-in wait for drivers to load first (assumes dma drivers
* are also compliled-in)
*/
late_initcall(raid6_test);
module_exit(raid6_test_exit);
MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
MODULE_DESCRIPTION("asynchronous RAID-6 recovery self tests");
MODULE_LICENSE("GPL");
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/dca.h> #include <linux/dca.h>
#define DCA_VERSION "1.8" #define DCA_VERSION "1.12.1"
MODULE_VERSION(DCA_VERSION); MODULE_VERSION(DCA_VERSION);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -36,20 +36,92 @@ MODULE_AUTHOR("Intel Corporation"); ...@@ -36,20 +36,92 @@ MODULE_AUTHOR("Intel Corporation");
static DEFINE_SPINLOCK(dca_lock); static DEFINE_SPINLOCK(dca_lock);
static LIST_HEAD(dca_providers); static LIST_HEAD(dca_domains);
static struct dca_provider *dca_find_provider_by_dev(struct device *dev) static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
{ {
struct dca_provider *dca, *ret = NULL; struct pci_dev *pdev = to_pci_dev(dev);
struct pci_bus *bus = pdev->bus;
list_for_each_entry(dca, &dca_providers, node) { while (bus->parent)
if ((!dev) || (dca->ops->dev_managed(dca, dev))) { bus = bus->parent;
ret = dca;
break; return bus;
} }
static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
{
struct dca_domain *domain;
domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
if (!domain)
return NULL;
INIT_LIST_HEAD(&domain->dca_providers);
domain->pci_rc = rc;
return domain;
}
static void dca_free_domain(struct dca_domain *domain)
{
list_del(&domain->node);
kfree(domain);
}
static struct dca_domain *dca_find_domain(struct pci_bus *rc)
{
struct dca_domain *domain;
list_for_each_entry(domain, &dca_domains, node)
if (domain->pci_rc == rc)
return domain;
return NULL;
}
static struct dca_domain *dca_get_domain(struct device *dev)
{
struct pci_bus *rc;
struct dca_domain *domain;
rc = dca_pci_rc_from_dev(dev);
domain = dca_find_domain(rc);
if (!domain) {
domain = dca_allocate_domain(rc);
if (domain)
list_add(&domain->node, &dca_domains);
}
return domain;
}
static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
{
struct dca_provider *dca;
struct pci_bus *rc;
struct dca_domain *domain;
if (dev) {
rc = dca_pci_rc_from_dev(dev);
domain = dca_find_domain(rc);
if (!domain)
return NULL;
} else {
if (!list_empty(&dca_domains))
domain = list_first_entry(&dca_domains,
struct dca_domain,
node);
else
return NULL;
} }
return ret; list_for_each_entry(dca, &domain->dca_providers, node)
if ((!dev) || (dca->ops->dev_managed(dca, dev)))
return dca;
return NULL;
} }
/** /**
...@@ -61,6 +133,8 @@ int dca_add_requester(struct device *dev) ...@@ -61,6 +133,8 @@ int dca_add_requester(struct device *dev)
struct dca_provider *dca; struct dca_provider *dca;
int err, slot = -ENODEV; int err, slot = -ENODEV;
unsigned long flags; unsigned long flags;
struct pci_bus *pci_rc;
struct dca_domain *domain;
if (!dev) if (!dev)
return -EFAULT; return -EFAULT;
...@@ -74,7 +148,14 @@ int dca_add_requester(struct device *dev) ...@@ -74,7 +148,14 @@ int dca_add_requester(struct device *dev)
return -EEXIST; return -EEXIST;
} }
list_for_each_entry(dca, &dca_providers, node) { pci_rc = dca_pci_rc_from_dev(dev);
domain = dca_find_domain(pci_rc);
if (!domain) {
spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
list_for_each_entry(dca, &domain->dca_providers, node) {
slot = dca->ops->add_requester(dca, dev); slot = dca->ops->add_requester(dca, dev);
if (slot >= 0) if (slot >= 0)
break; break;
...@@ -222,13 +303,19 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev) ...@@ -222,13 +303,19 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
{ {
int err; int err;
unsigned long flags; unsigned long flags;
struct dca_domain *domain;
err = dca_sysfs_add_provider(dca, dev); err = dca_sysfs_add_provider(dca, dev);
if (err) if (err)
return err; return err;
spin_lock_irqsave(&dca_lock, flags); spin_lock_irqsave(&dca_lock, flags);
list_add(&dca->node, &dca_providers); domain = dca_get_domain(dev);
if (!domain) {
spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
list_add(&dca->node, &domain->dca_providers);
spin_unlock_irqrestore(&dca_lock, flags); spin_unlock_irqrestore(&dca_lock, flags);
blocking_notifier_call_chain(&dca_provider_chain, blocking_notifier_call_chain(&dca_provider_chain,
...@@ -241,15 +328,24 @@ EXPORT_SYMBOL_GPL(register_dca_provider); ...@@ -241,15 +328,24 @@ EXPORT_SYMBOL_GPL(register_dca_provider);
* unregister_dca_provider - remove a dca provider * unregister_dca_provider - remove a dca provider
* @dca - struct created by alloc_dca_provider() * @dca - struct created by alloc_dca_provider()
*/ */
void unregister_dca_provider(struct dca_provider *dca) void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
{ {
unsigned long flags; unsigned long flags;
struct pci_bus *pci_rc;
struct dca_domain *domain;
blocking_notifier_call_chain(&dca_provider_chain, blocking_notifier_call_chain(&dca_provider_chain,
DCA_PROVIDER_REMOVE, NULL); DCA_PROVIDER_REMOVE, NULL);
spin_lock_irqsave(&dca_lock, flags); spin_lock_irqsave(&dca_lock, flags);
list_del(&dca->node); list_del(&dca->node);
pci_rc = dca_pci_rc_from_dev(dev);
domain = dca_find_domain(pci_rc);
if (list_empty(&domain->dca_providers))
dca_free_domain(domain);
spin_unlock_irqrestore(&dca_lock, flags); spin_unlock_irqrestore(&dca_lock, flags);
dca_sysfs_remove_provider(dca); dca_sysfs_remove_provider(dca);
...@@ -276,7 +372,7 @@ EXPORT_SYMBOL_GPL(dca_unregister_notify); ...@@ -276,7 +372,7 @@ EXPORT_SYMBOL_GPL(dca_unregister_notify);
static int __init dca_init(void) static int __init dca_init(void)
{ {
printk(KERN_ERR "dca service started, version %s\n", DCA_VERSION); pr_info("dca service started, version %s\n", DCA_VERSION);
return dca_sysfs_init(); return dca_sysfs_init();
} }
......
...@@ -17,11 +17,15 @@ if DMADEVICES ...@@ -17,11 +17,15 @@ if DMADEVICES
comment "DMA Devices" comment "DMA Devices"
config ASYNC_TX_DISABLE_CHANNEL_SWITCH
bool
config INTEL_IOATDMA config INTEL_IOATDMA
tristate "Intel I/OAT DMA support" tristate "Intel I/OAT DMA support"
depends on PCI && X86 depends on PCI && X86
select DMA_ENGINE select DMA_ENGINE
select DCA select DCA
select ASYNC_TX_DISABLE_CHANNEL_SWITCH
help help
Enable support for the Intel(R) I/OAT DMA engine present Enable support for the Intel(R) I/OAT DMA engine present
in recent Intel Xeon chipsets. in recent Intel Xeon chipsets.
...@@ -97,6 +101,14 @@ config TXX9_DMAC ...@@ -97,6 +101,14 @@ config TXX9_DMAC
Support the TXx9 SoC internal DMA controller. This can be Support the TXx9 SoC internal DMA controller. This can be
integrated in chips such as the Toshiba TX4927/38/39. integrated in chips such as the Toshiba TX4927/38/39.
config SH_DMAE
tristate "Renesas SuperH DMAC support"
depends on SUPERH && SH_DMA
depends on !SH_DMA_API
select DMA_ENGINE
help
Enable support for the Renesas SuperH DMA controllers.
config DMA_ENGINE config DMA_ENGINE
bool bool
...@@ -116,7 +128,7 @@ config NET_DMA ...@@ -116,7 +128,7 @@ config NET_DMA
config ASYNC_TX_DMA config ASYNC_TX_DMA
bool "Async_tx: Offload support for the async_tx api" bool "Async_tx: Offload support for the async_tx api"
depends on DMA_ENGINE && !HIGHMEM64G depends on DMA_ENGINE
help help
This allows the async_tx api to take advantage of offload engines for This allows the async_tx api to take advantage of offload engines for
memcpy, memset, xor, and raid6 p+q operations. If your platform has memcpy, memset, xor, and raid6 p+q operations. If your platform has
......
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_DMATEST) += dmatest.o obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/
ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o obj-$(CONFIG_MV_XOR) += mv_xor.o
...@@ -10,3 +9,4 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o ...@@ -10,3 +9,4 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o
...@@ -87,6 +87,7 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, ...@@ -87,6 +87,7 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
if (desc) { if (desc) {
memset(desc, 0, sizeof(struct at_desc)); memset(desc, 0, sizeof(struct at_desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan); dma_async_tx_descriptor_init(&desc->txd, chan);
/* txd.flags will be overwritten in prep functions */ /* txd.flags will be overwritten in prep functions */
desc->txd.flags = DMA_CTRL_ACK; desc->txd.flags = DMA_CTRL_ACK;
...@@ -150,11 +151,11 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) ...@@ -150,11 +151,11 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
struct at_desc *child; struct at_desc *child;
spin_lock_bh(&atchan->lock); spin_lock_bh(&atchan->lock);
list_for_each_entry(child, &desc->txd.tx_list, desc_node) list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&atchan->chan_common), dev_vdbg(chan2dev(&atchan->chan_common),
"moving child desc %p to freelist\n", "moving child desc %p to freelist\n",
child); child);
list_splice_init(&desc->txd.tx_list, &atchan->free_list); list_splice_init(&desc->tx_list, &atchan->free_list);
dev_vdbg(chan2dev(&atchan->chan_common), dev_vdbg(chan2dev(&atchan->chan_common),
"moving desc %p to freelist\n", desc); "moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &atchan->free_list); list_add(&desc->desc_node, &atchan->free_list);
...@@ -247,30 +248,33 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) ...@@ -247,30 +248,33 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
param = txd->callback_param; param = txd->callback_param;
/* move children to free_list */ /* move children to free_list */
list_splice_init(&txd->tx_list, &atchan->free_list); list_splice_init(&desc->tx_list, &atchan->free_list);
/* move myself to free_list */ /* move myself to free_list */
list_move(&desc->desc_node, &atchan->free_list); list_move(&desc->desc_node, &atchan->free_list);
/* unmap dma addresses */ /* unmap dma addresses */
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { if (!atchan->chan_common.private) {
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) struct device *parent = chan2parent(&atchan->chan_common);
dma_unmap_single(chan2parent(&atchan->chan_common), if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
desc->lli.daddr, if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
desc->len, DMA_FROM_DEVICE); dma_unmap_single(parent,
else desc->lli.daddr,
dma_unmap_page(chan2parent(&atchan->chan_common), desc->len, DMA_FROM_DEVICE);
desc->lli.daddr, else
desc->len, DMA_FROM_DEVICE); dma_unmap_page(parent,
} desc->lli.daddr,
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { desc->len, DMA_FROM_DEVICE);
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) }
dma_unmap_single(chan2parent(&atchan->chan_common), if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
desc->lli.saddr, if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
desc->len, DMA_TO_DEVICE); dma_unmap_single(parent,
else desc->lli.saddr,
dma_unmap_page(chan2parent(&atchan->chan_common), desc->len, DMA_TO_DEVICE);
desc->lli.saddr, else
desc->len, DMA_TO_DEVICE); dma_unmap_page(parent,
desc->lli.saddr,
desc->len, DMA_TO_DEVICE);
}
} }
/* /*
...@@ -334,7 +338,7 @@ static void atc_cleanup_descriptors(struct at_dma_chan *atchan) ...@@ -334,7 +338,7 @@ static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
/* This one is currently in progress */ /* This one is currently in progress */
return; return;
list_for_each_entry(child, &desc->txd.tx_list, desc_node) list_for_each_entry(child, &desc->tx_list, desc_node)
if (!(child->lli.ctrla & ATC_DONE)) if (!(child->lli.ctrla & ATC_DONE))
/* Currently in progress */ /* Currently in progress */
return; return;
...@@ -407,7 +411,7 @@ static void atc_handle_error(struct at_dma_chan *atchan) ...@@ -407,7 +411,7 @@ static void atc_handle_error(struct at_dma_chan *atchan)
dev_crit(chan2dev(&atchan->chan_common), dev_crit(chan2dev(&atchan->chan_common),
" cookie: %d\n", bad_desc->txd.cookie); " cookie: %d\n", bad_desc->txd.cookie);
atc_dump_lli(atchan, &bad_desc->lli); atc_dump_lli(atchan, &bad_desc->lli);
list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) list_for_each_entry(child, &bad_desc->tx_list, desc_node)
atc_dump_lli(atchan, &child->lli); atc_dump_lli(atchan, &child->lli);
/* Pretend the descriptor completed successfully */ /* Pretend the descriptor completed successfully */
...@@ -587,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, ...@@ -587,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
prev->lli.dscr = desc->txd.phys; prev->lli.dscr = desc->txd.phys;
/* insert the link descriptor to the LD ring */ /* insert the link descriptor to the LD ring */
list_add_tail(&desc->desc_node, list_add_tail(&desc->desc_node,
&first->txd.tx_list); &first->tx_list);
} }
prev = desc; prev = desc;
} }
...@@ -646,8 +650,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -646,8 +650,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg_width = atslave->reg_width; reg_width = atslave->reg_width;
sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
...@@ -687,7 +689,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -687,7 +689,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
prev->lli.dscr = desc->txd.phys; prev->lli.dscr = desc->txd.phys;
/* insert the link descriptor to the LD ring */ /* insert the link descriptor to the LD ring */
list_add_tail(&desc->desc_node, list_add_tail(&desc->desc_node,
&first->txd.tx_list); &first->tx_list);
} }
prev = desc; prev = desc;
total_len += len; total_len += len;
...@@ -729,7 +731,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -729,7 +731,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
prev->lli.dscr = desc->txd.phys; prev->lli.dscr = desc->txd.phys;
/* insert the link descriptor to the LD ring */ /* insert the link descriptor to the LD ring */
list_add_tail(&desc->desc_node, list_add_tail(&desc->desc_node,
&first->txd.tx_list); &first->tx_list);
} }
prev = desc; prev = desc;
total_len += len; total_len += len;
......
...@@ -165,6 +165,7 @@ struct at_desc { ...@@ -165,6 +165,7 @@ struct at_desc {
struct at_lli lli; struct at_lli lli;
/* THEN values for driver housekeeping */ /* THEN values for driver housekeeping */
struct list_head tx_list;
struct dma_async_tx_descriptor txd; struct dma_async_tx_descriptor txd;
struct list_head desc_node; struct list_head desc_node;
size_t len; size_t len;
......
...@@ -608,6 +608,40 @@ void dmaengine_put(void) ...@@ -608,6 +608,40 @@ void dmaengine_put(void)
} }
EXPORT_SYMBOL(dmaengine_put); EXPORT_SYMBOL(dmaengine_put);
static bool device_has_all_tx_types(struct dma_device *device)
{
/* A device that satisfies this test has channels that will never cause
* an async_tx channel switch event as all possible operation types can
* be handled.
*/
#ifdef CONFIG_ASYNC_TX_DMA
if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
return false;
#endif
#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
return false;
#endif
#if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
return false;
#endif
#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
if (!dma_has_cap(DMA_XOR, device->cap_mask))
return false;
#endif
#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
if (!dma_has_cap(DMA_PQ, device->cap_mask))
return false;
#endif
return true;
}
static int get_dma_id(struct dma_device *device) static int get_dma_id(struct dma_device *device)
{ {
int rc; int rc;
...@@ -644,8 +678,12 @@ int dma_async_device_register(struct dma_device *device) ...@@ -644,8 +678,12 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_memcpy); !device->device_prep_dma_memcpy);
BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
!device->device_prep_dma_xor); !device->device_prep_dma_xor);
BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
!device->device_prep_dma_zero_sum); !device->device_prep_dma_xor_val);
BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
!device->device_prep_dma_pq);
BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
!device->device_prep_dma_pq_val);
BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
!device->device_prep_dma_memset); !device->device_prep_dma_memset);
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
...@@ -661,6 +699,12 @@ int dma_async_device_register(struct dma_device *device) ...@@ -661,6 +699,12 @@ int dma_async_device_register(struct dma_device *device)
BUG_ON(!device->device_issue_pending); BUG_ON(!device->device_issue_pending);
BUG_ON(!device->dev); BUG_ON(!device->dev);
/* note: this only matters in the
* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case
*/
if (device_has_all_tx_types(device))
dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
if (!idr_ref) if (!idr_ref)
return -ENOMEM; return -ENOMEM;
...@@ -933,55 +977,29 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, ...@@ -933,55 +977,29 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
{ {
tx->chan = chan; tx->chan = chan;
spin_lock_init(&tx->lock); spin_lock_init(&tx->lock);
INIT_LIST_HEAD(&tx->tx_list);
} }
EXPORT_SYMBOL(dma_async_tx_descriptor_init); EXPORT_SYMBOL(dma_async_tx_descriptor_init);
/* dma_wait_for_async_tx - spin wait for a transaction to complete /* dma_wait_for_async_tx - spin wait for a transaction to complete
* @tx: in-flight transaction to wait on * @tx: in-flight transaction to wait on
*
* This routine assumes that tx was obtained from a call to async_memcpy,
* async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
* and submitted). Walking the parent chain is only meant to cover for DMA
* drivers that do not implement the DMA_INTERRUPT capability and may race with
* the driver's descriptor cleanup routine.
*/ */
enum dma_status enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{ {
enum dma_status status; unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
struct dma_async_tx_descriptor *iter;
struct dma_async_tx_descriptor *parent;
if (!tx) if (!tx)
return DMA_SUCCESS; return DMA_SUCCESS;
WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" while (tx->cookie == -EBUSY) {
" %s\n", __func__, dma_chan_name(tx->chan)); if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
pr_err("%s timeout waiting for descriptor submission\n",
/* poll through the dependency chain, return when tx is complete */ __func__);
do { return DMA_ERROR;
iter = tx; }
cpu_relax();
/* find the root of the unsubmitted dependency chain */ }
do { return dma_sync_wait(tx->chan, tx->cookie);
parent = iter->parent;
if (!parent)
break;
else
iter = parent;
} while (parent);
/* there is a small window for ->parent == NULL and
* ->cookie == -EBUSY
*/
while (iter->cookie == -EBUSY)
cpu_relax();
status = dma_sync_wait(iter->chan, iter->cookie);
} while (status == DMA_IN_PROGRESS || (iter != tx));
return status;
} }
EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
......
...@@ -48,6 +48,11 @@ module_param(xor_sources, uint, S_IRUGO); ...@@ -48,6 +48,11 @@ module_param(xor_sources, uint, S_IRUGO);
MODULE_PARM_DESC(xor_sources, MODULE_PARM_DESC(xor_sources,
"Number of xor source buffers (default: 3)"); "Number of xor source buffers (default: 3)");
static unsigned int pq_sources = 3;
module_param(pq_sources, uint, S_IRUGO);
MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
/* /*
* Initialization patterns. All bytes in the source buffer has bit 7 * Initialization patterns. All bytes in the source buffer has bit 7
* set, all bytes in the destination buffer has bit 7 cleared. * set, all bytes in the destination buffer has bit 7 cleared.
...@@ -232,6 +237,7 @@ static int dmatest_func(void *data) ...@@ -232,6 +237,7 @@ static int dmatest_func(void *data)
dma_cookie_t cookie; dma_cookie_t cookie;
enum dma_status status; enum dma_status status;
enum dma_ctrl_flags flags; enum dma_ctrl_flags flags;
u8 pq_coefs[pq_sources];
int ret; int ret;
int src_cnt; int src_cnt;
int dst_cnt; int dst_cnt;
...@@ -248,6 +254,11 @@ static int dmatest_func(void *data) ...@@ -248,6 +254,11 @@ static int dmatest_func(void *data)
else if (thread->type == DMA_XOR) { else if (thread->type == DMA_XOR) {
src_cnt = xor_sources | 1; /* force odd to ensure dst = src */ src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
dst_cnt = 1; dst_cnt = 1;
} else if (thread->type == DMA_PQ) {
src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
dst_cnt = 2;
for (i = 0; i < pq_sources; i++)
pq_coefs[i] = 1;
} else } else
goto err_srcs; goto err_srcs;
...@@ -283,6 +294,7 @@ static int dmatest_func(void *data) ...@@ -283,6 +294,7 @@ static int dmatest_func(void *data)
dma_addr_t dma_dsts[dst_cnt]; dma_addr_t dma_dsts[dst_cnt];
struct completion cmp; struct completion cmp;
unsigned long tmo = msecs_to_jiffies(3000); unsigned long tmo = msecs_to_jiffies(3000);
u8 align = 0;
total_tests++; total_tests++;
...@@ -290,6 +302,18 @@ static int dmatest_func(void *data) ...@@ -290,6 +302,18 @@ static int dmatest_func(void *data)
src_off = dmatest_random() % (test_buf_size - len + 1); src_off = dmatest_random() % (test_buf_size - len + 1);
dst_off = dmatest_random() % (test_buf_size - len + 1); dst_off = dmatest_random() % (test_buf_size - len + 1);
/* honor alignment restrictions */
if (thread->type == DMA_MEMCPY)
align = dev->copy_align;
else if (thread->type == DMA_XOR)
align = dev->xor_align;
else if (thread->type == DMA_PQ)
align = dev->pq_align;
len = (len >> align) << align;
src_off = (src_off >> align) << align;
dst_off = (dst_off >> align) << align;
dmatest_init_srcs(thread->srcs, src_off, len); dmatest_init_srcs(thread->srcs, src_off, len);
dmatest_init_dsts(thread->dsts, dst_off, len); dmatest_init_dsts(thread->dsts, dst_off, len);
...@@ -306,6 +330,7 @@ static int dmatest_func(void *data) ...@@ -306,6 +330,7 @@ static int dmatest_func(void *data)
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} }
if (thread->type == DMA_MEMCPY) if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan, tx = dev->device_prep_dma_memcpy(chan,
dma_dsts[0] + dst_off, dma_dsts[0] + dst_off,
...@@ -316,6 +341,15 @@ static int dmatest_func(void *data) ...@@ -316,6 +341,15 @@ static int dmatest_func(void *data)
dma_dsts[0] + dst_off, dma_dsts[0] + dst_off,
dma_srcs, xor_sources, dma_srcs, xor_sources,
len, flags); len, flags);
else if (thread->type == DMA_PQ) {
dma_addr_t dma_pq[dst_cnt];
for (i = 0; i < dst_cnt; i++)
dma_pq[i] = dma_dsts[i] + dst_off;
tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
pq_sources, pq_coefs,
len, flags);
}
if (!tx) { if (!tx) {
for (i = 0; i < src_cnt; i++) for (i = 0; i < src_cnt; i++)
...@@ -459,6 +493,8 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty ...@@ -459,6 +493,8 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty
op = "copy"; op = "copy";
else if (type == DMA_XOR) else if (type == DMA_XOR)
op = "xor"; op = "xor";
else if (type == DMA_PQ)
op = "pq";
else else
return -EINVAL; return -EINVAL;
...@@ -514,6 +550,10 @@ static int dmatest_add_channel(struct dma_chan *chan) ...@@ -514,6 +550,10 @@ static int dmatest_add_channel(struct dma_chan *chan)
cnt = dmatest_add_threads(dtc, DMA_XOR); cnt = dmatest_add_threads(dtc, DMA_XOR);
thread_count += cnt > 0 ? cnt : 0; thread_count += cnt > 0 ? cnt : 0;
} }
if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
cnt = dmatest_add_threads(dtc, DMA_PQ);
thread_count += cnt > 0 ?: 0;
}
pr_info("dmatest: Started %u threads using %s\n", pr_info("dmatest: Started %u threads using %s\n",
thread_count, dma_chan_name(chan)); thread_count, dma_chan_name(chan));
......
...@@ -116,7 +116,7 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) ...@@ -116,7 +116,7 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
{ {
struct dw_desc *child; struct dw_desc *child;
list_for_each_entry(child, &desc->txd.tx_list, desc_node) list_for_each_entry(child, &desc->tx_list, desc_node)
dma_sync_single_for_cpu(chan2parent(&dwc->chan), dma_sync_single_for_cpu(chan2parent(&dwc->chan),
child->txd.phys, sizeof(child->lli), child->txd.phys, sizeof(child->lli),
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -137,11 +137,11 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) ...@@ -137,11 +137,11 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
dwc_sync_desc_for_cpu(dwc, desc); dwc_sync_desc_for_cpu(dwc, desc);
spin_lock_bh(&dwc->lock); spin_lock_bh(&dwc->lock);
list_for_each_entry(child, &desc->txd.tx_list, desc_node) list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&dwc->chan), dev_vdbg(chan2dev(&dwc->chan),
"moving child desc %p to freelist\n", "moving child desc %p to freelist\n",
child); child);
list_splice_init(&desc->txd.tx_list, &dwc->free_list); list_splice_init(&desc->tx_list, &dwc->free_list);
dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &dwc->free_list); list_add(&desc->desc_node, &dwc->free_list);
spin_unlock_bh(&dwc->lock); spin_unlock_bh(&dwc->lock);
...@@ -209,19 +209,28 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) ...@@ -209,19 +209,28 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
param = txd->callback_param; param = txd->callback_param;
dwc_sync_desc_for_cpu(dwc, desc); dwc_sync_desc_for_cpu(dwc, desc);
list_splice_init(&txd->tx_list, &dwc->free_list); list_splice_init(&desc->tx_list, &dwc->free_list);
list_move(&desc->desc_node, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list);
/* if (!dwc->chan.private) {
* We use dma_unmap_page() regardless of how the buffers were struct device *parent = chan2parent(&dwc->chan);
* mapped before they were submitted... if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
*/ if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) dma_unmap_single(parent, desc->lli.dar,
dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar, desc->len, DMA_FROM_DEVICE);
desc->len, DMA_FROM_DEVICE); else
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) dma_unmap_page(parent, desc->lli.dar,
dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar, desc->len, DMA_FROM_DEVICE);
desc->len, DMA_TO_DEVICE); }
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(parent, desc->lli.sar,
desc->len, DMA_TO_DEVICE);
else
dma_unmap_page(parent, desc->lli.sar,
desc->len, DMA_TO_DEVICE);
}
}
/* /*
* The API requires that no submissions are done from a * The API requires that no submissions are done from a
...@@ -289,7 +298,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) ...@@ -289,7 +298,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
/* This one is currently in progress */ /* This one is currently in progress */
return; return;
list_for_each_entry(child, &desc->txd.tx_list, desc_node) list_for_each_entry(child, &desc->tx_list, desc_node)
if (child->lli.llp == llp) if (child->lli.llp == llp)
/* Currently in progress */ /* Currently in progress */
return; return;
...@@ -356,7 +365,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) ...@@ -356,7 +365,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dev_printk(KERN_CRIT, chan2dev(&dwc->chan), dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
" cookie: %d\n", bad_desc->txd.cookie); " cookie: %d\n", bad_desc->txd.cookie);
dwc_dump_lli(dwc, &bad_desc->lli); dwc_dump_lli(dwc, &bad_desc->lli);
list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) list_for_each_entry(child, &bad_desc->tx_list, desc_node)
dwc_dump_lli(dwc, &child->lli); dwc_dump_lli(dwc, &child->lli);
/* Pretend the descriptor completed successfully */ /* Pretend the descriptor completed successfully */
...@@ -608,7 +617,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, ...@@ -608,7 +617,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
prev->txd.phys, sizeof(prev->lli), prev->txd.phys, sizeof(prev->lli),
DMA_TO_DEVICE); DMA_TO_DEVICE);
list_add_tail(&desc->desc_node, list_add_tail(&desc->desc_node,
&first->txd.tx_list); &first->tx_list);
} }
prev = desc; prev = desc;
} }
...@@ -658,8 +667,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -658,8 +667,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg_width = dws->reg_width; reg_width = dws->reg_width;
prev = first = NULL; prev = first = NULL;
sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
switch (direction) { switch (direction) {
case DMA_TO_DEVICE: case DMA_TO_DEVICE:
ctllo = (DWC_DEFAULT_CTLLO ctllo = (DWC_DEFAULT_CTLLO
...@@ -700,7 +707,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -700,7 +707,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
sizeof(prev->lli), sizeof(prev->lli),
DMA_TO_DEVICE); DMA_TO_DEVICE);
list_add_tail(&desc->desc_node, list_add_tail(&desc->desc_node,
&first->txd.tx_list); &first->tx_list);
} }
prev = desc; prev = desc;
total_len += len; total_len += len;
...@@ -746,7 +753,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -746,7 +753,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
sizeof(prev->lli), sizeof(prev->lli),
DMA_TO_DEVICE); DMA_TO_DEVICE);
list_add_tail(&desc->desc_node, list_add_tail(&desc->desc_node,
&first->txd.tx_list); &first->tx_list);
} }
prev = desc; prev = desc;
total_len += len; total_len += len;
...@@ -902,6 +909,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) ...@@ -902,6 +909,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
break; break;
} }
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan); dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = dwc_tx_submit; desc->txd.tx_submit = dwc_tx_submit;
desc->txd.flags = DMA_CTRL_ACK; desc->txd.flags = DMA_CTRL_ACK;
......
...@@ -217,6 +217,7 @@ struct dw_desc { ...@@ -217,6 +217,7 @@ struct dw_desc {
/* THEN values for driver housekeeping */ /* THEN values for driver housekeeping */
struct list_head desc_node; struct list_head desc_node;
struct list_head tx_list;
struct dma_async_tx_descriptor txd; struct dma_async_tx_descriptor txd;
size_t len; size_t len;
}; };
......
This diff is collapsed.
...@@ -90,6 +90,7 @@ struct fsl_dma_ld_hw { ...@@ -90,6 +90,7 @@ struct fsl_dma_ld_hw {
struct fsl_desc_sw { struct fsl_desc_sw {
struct fsl_dma_ld_hw hw; struct fsl_dma_ld_hw hw;
struct list_head node; struct list_head node;
struct list_head tx_list;
struct dma_async_tx_descriptor async_tx; struct dma_async_tx_descriptor async_tx;
struct list_head *ld; struct list_head *ld;
void *priv; void *priv;
...@@ -143,10 +144,11 @@ struct fsl_dma_chan { ...@@ -143,10 +144,11 @@ struct fsl_dma_chan {
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
u32 feature; u32 feature;
void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size); void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable);
void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable);
void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size);
}; };
#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) #define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common)
......
obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
ioatdma-objs := pci.o dma.o dma_v2.o dma_v3.o dca.o
...@@ -33,8 +33,8 @@ ...@@ -33,8 +33,8 @@
#define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24) #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
#endif #endif
#include "ioatdma.h" #include "dma.h"
#include "ioatdma_registers.h" #include "registers.h"
/* /*
* Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
...@@ -242,7 +242,8 @@ static struct dca_ops ioat_dca_ops = { ...@@ -242,7 +242,8 @@ static struct dca_ops ioat_dca_ops = {
}; };
struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) struct dca_provider * __devinit
ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{ {
struct dca_provider *dca; struct dca_provider *dca;
struct ioat_dca_priv *ioatdca; struct ioat_dca_priv *ioatdca;
...@@ -407,7 +408,8 @@ static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) ...@@ -407,7 +408,8 @@ static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
return slots; return slots;
} }
struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) struct dca_provider * __devinit
ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{ {
struct dca_provider *dca; struct dca_provider *dca;
struct ioat_dca_priv *ioatdca; struct ioat_dca_priv *ioatdca;
...@@ -602,7 +604,8 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) ...@@ -602,7 +604,8 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
return slots; return slots;
} }
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) struct dca_provider * __devinit
ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{ {
struct dca_provider *dca; struct dca_provider *dca;
struct ioat_dca_priv *ioatdca; struct ioat_dca_priv *ioatdca;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -64,18 +64,37 @@ ...@@ -64,18 +64,37 @@
#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ #define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */
#define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001 #define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001
#define IOAT_DEVICE_MMIO_RESTRICTED 0x0002
#define IOAT_DEVICE_MEMORY_BYPASS 0x0004
#define IOAT_DEVICE_ADDRESS_REMAPPING 0x0008
#define IOAT_DMA_CAP_OFFSET 0x10 /* 32-bit */
#define IOAT_CAP_PAGE_BREAK 0x00000001
#define IOAT_CAP_CRC 0x00000002
#define IOAT_CAP_SKIP_MARKER 0x00000004
#define IOAT_CAP_DCA 0x00000010
#define IOAT_CAP_CRC_MOVE 0x00000020
#define IOAT_CAP_FILL_BLOCK 0x00000040
#define IOAT_CAP_APIC 0x00000080
#define IOAT_CAP_XOR 0x00000100
#define IOAT_CAP_PQ 0x00000200
#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */
/* DMA Channel Registers */ /* DMA Channel Registers */
#define IOAT_CHANCTRL_OFFSET 0x00 /* 16-bit Channel Control Register */ #define IOAT_CHANCTRL_OFFSET 0x00 /* 16-bit Channel Control Register */
#define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000 #define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000
#define IOAT3_CHANCTRL_COMPL_DCA_EN 0x0200
#define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100 #define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100
#define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020 #define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020
#define IOAT_CHANCTRL_ERR_INT_EN 0x0010 #define IOAT_CHANCTRL_ERR_INT_EN 0x0010
#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008 #define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008
#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
#define IOAT_CHANCTRL_INT_DISABLE 0x0001 #define IOAT_CHANCTRL_INT_REARM 0x0001
#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\
IOAT_CHANCTRL_ERR_COMPLETION_EN |\
IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\
IOAT_CHANCTRL_ERR_INT_EN)
#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */
#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ #define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
...@@ -94,14 +113,14 @@ ...@@ -94,14 +113,14 @@
#define IOAT2_CHANSTS_OFFSET_HIGH 0x0C #define IOAT2_CHANSTS_OFFSET_HIGH 0x0C
#define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \ #define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \
? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH) ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH)
#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F #define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL)
#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010 #define IOAT_CHANSTS_SOFT_ERR 0x10ULL
#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x0000000000000008 #define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007 #define IOAT_CHANSTS_STATUS 0x7ULL
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 #define IOAT_CHANSTS_ACTIVE 0x0
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1 #define IOAT_CHANSTS_DONE 0x1
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2 #define IOAT_CHANSTS_SUSPENDED 0x2
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3 #define IOAT_CHANSTS_HALTED 0x3
...@@ -204,22 +223,27 @@ ...@@ -204,22 +223,27 @@
#define IOAT_CDAR_OFFSET_HIGH 0x24 #define IOAT_CDAR_OFFSET_HIGH 0x24
#define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */ #define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */
#define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001 #define IOAT_CHANERR_SRC_ADDR_ERR 0x0001
#define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR 0x0002 #define IOAT_CHANERR_DEST_ADDR_ERR 0x0002
#define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR 0x0004 #define IOAT_CHANERR_NEXT_ADDR_ERR 0x0004
#define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR 0x0008 #define IOAT_CHANERR_NEXT_DESC_ALIGN_ERR 0x0008
#define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010 #define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010
#define IOAT_CHANERR_CHANCMD_ERR 0x0020 #define IOAT_CHANERR_CHANCMD_ERR 0x0020
#define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040 #define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040
#define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080 #define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080
#define IOAT_CHANERR_READ_DATA_ERR 0x0100 #define IOAT_CHANERR_READ_DATA_ERR 0x0100
#define IOAT_CHANERR_WRITE_DATA_ERR 0x0200 #define IOAT_CHANERR_WRITE_DATA_ERR 0x0200
#define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR 0x0400 #define IOAT_CHANERR_CONTROL_ERR 0x0400
#define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR 0x0800 #define IOAT_CHANERR_LENGTH_ERR 0x0800
#define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000 #define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000
#define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 #define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000
#define IOAT_CHANERR_SOFT_ERR 0x4000 #define IOAT_CHANERR_SOFT_ERR 0x4000
#define IOAT_CHANERR_UNAFFILIATED_ERR 0x8000 #define IOAT_CHANERR_UNAFFILIATED_ERR 0x8000
#define IOAT_CHANERR_XOR_P_OR_CRC_ERR 0x10000
#define IOAT_CHANERR_XOR_Q_ERR 0x20000
#define IOAT_CHANERR_DESCRIPTOR_COUNT_ERR 0x40000
#define IOAT_CHANERR_HANDLE_MASK (IOAT_CHANERR_XOR_P_OR_CRC_ERR | IOAT_CHANERR_XOR_Q_ERR)
#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ #define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -183,6 +183,11 @@ dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, ...@@ -183,6 +183,11 @@ dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
iov_byte_offset, iov_byte_offset,
kdata, kdata,
copy); copy);
/* poll for a descriptor slot */
if (unlikely(dma_cookie < 0)) {
dma_async_issue_pending(chan);
continue;
}
len -= copy; len -= copy;
iov[iovec_idx].iov_len -= copy; iov[iovec_idx].iov_len -= copy;
...@@ -248,6 +253,11 @@ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, ...@@ -248,6 +253,11 @@ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
page, page,
offset, offset,
copy); copy);
/* poll for a descriptor slot */
if (unlikely(dma_cookie < 0)) {
dma_async_issue_pending(chan);
continue;
}
len -= copy; len -= copy;
iov[iovec_idx].iov_len -= copy; iov[iovec_idx].iov_len -= copy;
......
This diff is collapsed.
...@@ -126,9 +126,8 @@ struct mv_xor_chan { ...@@ -126,9 +126,8 @@ struct mv_xor_chan {
* @idx: pool index * @idx: pool index
* @unmap_src_cnt: number of xor sources * @unmap_src_cnt: number of xor sources
* @unmap_len: transaction bytecount * @unmap_len: transaction bytecount
* @tx_list: list of slots that make up a multi-descriptor transaction
* @async_tx: support for the async_tx api * @async_tx: support for the async_tx api
* @group_list: list of slots that make up a multi-descriptor transaction
* for example transfer lengths larger than the supported hw max
* @xor_check_result: result of zero sum * @xor_check_result: result of zero sum
* @crc32_result: result crc calculation * @crc32_result: result crc calculation
*/ */
...@@ -145,6 +144,7 @@ struct mv_xor_desc_slot { ...@@ -145,6 +144,7 @@ struct mv_xor_desc_slot {
u16 unmap_src_cnt; u16 unmap_src_cnt;
u32 value; u32 value;
size_t unmap_len; size_t unmap_len;
struct list_head tx_list;
struct dma_async_tx_descriptor async_tx; struct dma_async_tx_descriptor async_tx;
union { union {
u32 *xor_check_result; u32 *xor_check_result;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment