Commit f19624aa authored by Or Gerlitz's avatar Or Gerlitz Committed by Roland Dreier

IB/iser: Simplify send flow/descriptors

Simplify and shrink the logic/code used for the send descriptors.
Changes include removing struct iser_dto (an unnecessary abstraction),
using struct iser_regd_buf only for handling SCSI commands, using
dma_sync instead of dma_map/unmap, etc.
Signed-off-by: default avatarOr Gerlitz <ogerlitz@voltaire.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 78ad0a34
......@@ -128,6 +128,28 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
return 0;
}
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc)
{
struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->ib_conn->device;
struct iscsi_iser_task *iser_task = task->dd_data;
u64 dma_addr;
dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
if (ib_dma_mapping_error(device->ib_device, dma_addr))
return -ENOMEM;
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = device->mr->lkey;
iser_task->headers_initialized = 1;
iser_task->iser_conn = iser_conn;
return 0;
}
/**
* iscsi_iser_task_init - Initialize task
* @task: iscsi task
......@@ -137,17 +159,17 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
static int
iscsi_iser_task_init(struct iscsi_task *task)
{
struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
if (!iser_task->headers_initialized)
if (iser_initialize_task_headers(task, &iser_task->desc))
return -ENOMEM;
/* mgmt task */
if (!task->sc) {
iser_task->desc.data = task->data;
if (!task->sc)
return 0;
}
iser_task->command_sent = 0;
iser_task->iser_conn = iser_conn;
iser_task_rdma_init(iser_task);
return 0;
}
......@@ -675,7 +697,7 @@ static int __init iser_init(void)
memset(&ig, 0, sizeof(struct iser_global));
ig.desc_cache = kmem_cache_create("iser_descriptors",
sizeof (struct iser_desc),
sizeof(struct iser_tx_desc),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (ig.desc_cache == NULL)
......
......@@ -193,28 +193,8 @@ struct iser_regd_buf {
struct iser_mem_reg reg; /* memory registration info */
void *virt_addr;
struct iser_device *device; /* device->device for dma_unmap */
u64 dma_addr; /* if non zero, addr for dma_unmap */
enum dma_data_direction direction; /* direction for dma_unmap */
unsigned int data_size;
atomic_t ref_count; /* refcount, freed when dec to 0 */
};
#define MAX_REGD_BUF_VECTOR_LEN 2
struct iser_dto {
struct iscsi_iser_task *task;
struct iser_conn *ib_conn;
int notify_enable;
/* vector of registered buffers */
unsigned int regd_vector_len;
struct iser_regd_buf *regd[MAX_REGD_BUF_VECTOR_LEN];
/* offset into the registered buffer may be specified */
unsigned int offset[MAX_REGD_BUF_VECTOR_LEN];
/* a smaller size may be specified, if 0, then full size is used */
unsigned int used_sz[MAX_REGD_BUF_VECTOR_LEN];
};
enum iser_desc_type {
......@@ -223,14 +203,15 @@ enum iser_desc_type {
ISCSI_TX_DATAOUT
};
struct iser_desc {
struct iser_tx_desc {
struct iser_hdr iser_header;
struct iscsi_hdr iscsi_header;
struct iser_regd_buf hdr_regd_buf;
void *data; /* used by RX & TX_CONTROL */
struct iser_regd_buf data_regd_buf; /* used by RX & TX_CONTROL */
enum iser_desc_type type;
struct iser_dto dto;
u64 dma_addr;
/* sg[0] points to iser/iscsi headers, sg[1] optionally points to either
of immediate data, unsolicited data-out or control (login,text) */
struct ib_sge tx_sg[2];
int num_sge;
};
#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
......@@ -287,7 +268,7 @@ struct iscsi_iser_conn {
};
struct iscsi_iser_task {
struct iser_desc desc;
struct iser_tx_desc desc;
struct iscsi_iser_conn *iser_conn;
enum iser_task_status status;
int command_sent; /* set if command sent */
......@@ -295,6 +276,7 @@ struct iscsi_iser_task {
struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */
struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/
struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */
int headers_initialized;
};
struct iser_page_vec {
......@@ -346,22 +328,14 @@ void iser_rcv_completion(struct iser_rx_desc *desc,
unsigned long dto_xfer_len,
struct iser_conn *ib_conn);
void iser_snd_completion(struct iser_desc *desc);
void iser_snd_completion(struct iser_tx_desc *desc, struct iser_conn *ib_conn);
void iser_task_rdma_init(struct iscsi_iser_task *task);
void iser_task_rdma_finalize(struct iscsi_iser_task *task);
void iser_dto_buffs_release(struct iser_dto *dto);
int iser_regd_buff_release(struct iser_regd_buf *regd_buf);
void iser_free_rx_descriptors(struct iser_conn *ib_conn);
void iser_reg_single(struct iser_device *device,
struct iser_regd_buf *regd_buf,
enum dma_data_direction direction);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
......@@ -381,7 +355,7 @@ void iser_unreg_mem(struct iser_mem_reg *mem_reg);
int iser_post_recvl(struct iser_conn *ib_conn);
int iser_post_recvm(struct iser_conn *ib_conn, int count);
int iser_post_send(struct iser_desc *tx_desc);
int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc);
int iser_conn_state_comp(struct iser_conn *ib_conn,
enum iser_ib_conn_state comp);
......@@ -392,4 +366,6 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
enum dma_data_direction dma_dir);
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
#endif
......@@ -40,62 +40,6 @@
#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
/**
* Decrements the reference count for the
* registered buffer & releases it
*
* returns 0 if released, 1 if deferred
*/
int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
{
struct ib_device *dev;
if ((atomic_read(&regd_buf->ref_count) == 0) ||
atomic_dec_and_test(&regd_buf->ref_count)) {
/* if we used the dma mr, unreg is just NOP */
if (regd_buf->reg.is_fmr)
iser_unreg_mem(&regd_buf->reg);
if (regd_buf->dma_addr) {
dev = regd_buf->device->ib_device;
ib_dma_unmap_single(dev,
regd_buf->dma_addr,
regd_buf->data_size,
regd_buf->direction);
}
/* else this regd buf is associated with task which we */
/* dma_unmap_single/sg later */
return 0;
} else {
iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf);
return 1;
}
}
/**
* iser_reg_single - fills registered buffer descriptor with
* registration information
*/
void iser_reg_single(struct iser_device *device,
struct iser_regd_buf *regd_buf,
enum dma_data_direction direction)
{
u64 dma_addr;
dma_addr = ib_dma_map_single(device->ib_device,
regd_buf->virt_addr,
regd_buf->data_size, direction);
BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
regd_buf->reg.lkey = device->mr->lkey;
regd_buf->reg.len = regd_buf->data_size;
regd_buf->reg.va = dma_addr;
regd_buf->reg.is_fmr = 0;
regd_buf->dma_addr = dma_addr;
regd_buf->direction = direction;
}
/**
* iser_start_rdma_unaligned_sg
*/
......@@ -474,9 +418,5 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
return err;
}
}
/* take a reference on this regd buf such that it will not be released *
* (eg in send dto completion) before we get the scsi response */
atomic_inc(&regd_buf->ref_count);
return 0;
}
......@@ -194,7 +194,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
init_attr.recv_cq = device->rx_cq;
init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
init_attr.cap.max_send_sge = MAX_REGD_BUF_VECTOR_LEN;
init_attr.cap.max_send_sge = 2;
init_attr.cap.max_recv_sge = 1;
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr.qp_type = IB_QPT_RC;
......@@ -701,86 +701,37 @@ int iser_post_recvm(struct iser_conn *ib_conn, int count)
}
/**
* iser_dto_to_iov - builds IOV from a dto descriptor
*/
static void iser_dto_to_iov(struct iser_dto *dto, struct ib_sge *iov, int iov_len)
{
int i;
struct ib_sge *sge;
struct iser_regd_buf *regd_buf;
if (dto->regd_vector_len > iov_len) {
iser_err("iov size %d too small for posting dto of len %d\n",
iov_len, dto->regd_vector_len);
BUG();
}
for (i = 0; i < dto->regd_vector_len; i++) {
sge = &iov[i];
regd_buf = dto->regd[i];
sge->addr = regd_buf->reg.va;
sge->length = regd_buf->reg.len;
sge->lkey = regd_buf->reg.lkey;
if (dto->used_sz[i] > 0) /* Adjust size */
sge->length = dto->used_sz[i];
/* offset and length should not exceed the regd buf length */
if (sge->length + dto->offset[i] > regd_buf->reg.len) {
iser_err("Used len:%ld + offset:%d, exceed reg.buf.len:"
"%ld in dto:0x%p [%d], va:0x%08lX\n",
(unsigned long)sge->length, dto->offset[i],
(unsigned long)regd_buf->reg.len, dto, i,
(unsigned long)sge->addr);
BUG();
}
sge->addr += dto->offset[i]; /* Adjust offset */
}
}
/**
* iser_start_send - Initiate a Send DTO operation
*
* returns 0 on success, -1 on failure
*/
int iser_post_send(struct iser_desc *tx_desc)
int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc)
{
int ib_ret, ret_val = 0;
int ib_ret;
struct ib_send_wr send_wr, *send_wr_failed;
struct ib_sge iov[MAX_REGD_BUF_VECTOR_LEN];
struct iser_conn *ib_conn;
struct iser_dto *dto = &tx_desc->dto;
ib_conn = dto->ib_conn;
iser_dto_to_iov(dto, iov, MAX_REGD_BUF_VECTOR_LEN);
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
send_wr.next = NULL;
send_wr.wr_id = (unsigned long)tx_desc;
send_wr.sg_list = iov;
send_wr.num_sge = dto->regd_vector_len;
send_wr.sg_list = tx_desc->tx_sg;
send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = dto->notify_enable ? IB_SEND_SIGNALED : 0;
send_wr.send_flags = IB_SEND_SIGNALED;
atomic_inc(&ib_conn->post_send_buf_count);
ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
if (ib_ret) {
iser_err("Failed to start SEND DTO, dto: 0x%p, IOV len: %d\n",
dto, dto->regd_vector_len);
iser_err("ib_post_send failed, ret:%d\n", ib_ret);
atomic_dec(&ib_conn->post_send_buf_count);
ret_val = -1;
}
return ret_val;
return ib_ret;
}
static void iser_handle_comp_error(struct iser_desc *desc,
static void iser_handle_comp_error(struct iser_tx_desc *desc,
struct iser_conn *ib_conn)
{
if (desc && desc->type == ISCSI_TX_DATAOUT)
......@@ -809,16 +760,16 @@ static int iser_drain_tx_cq(struct iser_device *device)
{
struct ib_cq *cq = device->tx_cq;
struct ib_wc wc;
struct iser_desc *tx_desc;
struct iser_tx_desc *tx_desc;
struct iser_conn *ib_conn;
int completed_tx = 0;
while (ib_poll_cq(cq, 1, &wc) == 1) {
tx_desc = (struct iser_desc *) (unsigned long) wc.wr_id;
tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id;
ib_conn = wc.qp->qp_context;
if (wc.status == IB_WC_SUCCESS) {
if (wc.opcode == IB_WC_SEND)
iser_snd_completion(tx_desc);
iser_snd_completion(tx_desc, ib_conn);
else
iser_err("expected opcode %d got %d\n",
IB_WC_SEND, wc.opcode);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment