Commit 6700efdf authored by Bryan O'Sullivan's avatar Bryan O'Sullivan Committed by Linus Torvalds

[PATCH] IB/ipath: fix shared receive queues for RC

Signed-off-by: default avatarRalph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: default avatarBryan O'Sullivan <bryan.osullivan@qlogic.com>
Cc: "Michael S. Tsirkin" <mst@mellanox.co.il>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 7bbb15ea
...@@ -257,7 +257,7 @@ int ipath_make_rc_req(struct ipath_qp *qp, ...@@ -257,7 +257,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
break; break;
case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE:
if (newreq) if (newreq && qp->s_lsn != (u32) -1)
qp->s_lsn++; qp->s_lsn++;
/* FALLTHROUGH */ /* FALLTHROUGH */
case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM:
...@@ -283,8 +283,7 @@ int ipath_make_rc_req(struct ipath_qp *qp, ...@@ -283,8 +283,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
else { else {
qp->s_state = qp->s_state =
OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
/* Immediate data comes /* Immediate data comes after RETH */
* after RETH */
ohdr->u.rc.imm_data = wqe->wr.imm_data; ohdr->u.rc.imm_data = wqe->wr.imm_data;
hwords += 1; hwords += 1;
if (wqe->wr.send_flags & IB_SEND_SOLICITED) if (wqe->wr.send_flags & IB_SEND_SOLICITED)
...@@ -304,7 +303,8 @@ int ipath_make_rc_req(struct ipath_qp *qp, ...@@ -304,7 +303,8 @@ int ipath_make_rc_req(struct ipath_qp *qp,
qp->s_state = OP(RDMA_READ_REQUEST); qp->s_state = OP(RDMA_READ_REQUEST);
hwords += sizeof(ohdr->u.rc.reth) / 4; hwords += sizeof(ohdr->u.rc.reth) / 4;
if (newreq) { if (newreq) {
qp->s_lsn++; if (qp->s_lsn != (u32) -1)
qp->s_lsn++;
/* /*
* Adjust s_next_psn to count the * Adjust s_next_psn to count the
* expected number of responses. * expected number of responses.
...@@ -335,7 +335,8 @@ int ipath_make_rc_req(struct ipath_qp *qp, ...@@ -335,7 +335,8 @@ int ipath_make_rc_req(struct ipath_qp *qp,
wqe->wr.wr.atomic.compare_add); wqe->wr.wr.atomic.compare_add);
hwords += sizeof(struct ib_atomic_eth) / 4; hwords += sizeof(struct ib_atomic_eth) / 4;
if (newreq) { if (newreq) {
qp->s_lsn++; if (qp->s_lsn != (u32) -1)
qp->s_lsn++;
wqe->lpsn = wqe->psn; wqe->lpsn = wqe->psn;
} }
if (++qp->s_cur == qp->s_size) if (++qp->s_cur == qp->s_size)
...@@ -552,6 +553,88 @@ static void send_rc_ack(struct ipath_qp *qp) ...@@ -552,6 +553,88 @@ static void send_rc_ack(struct ipath_qp *qp)
} }
} }
/**
* reset_psn - reset the QP state to send starting from PSN
* @qp: the QP
* @psn: the packet sequence number to restart at
*
* This is called from ipath_rc_rcv() to process an incoming RC ACK
* for the given QP.
* Called at interrupt level with the QP s_lock held.
*/
static void reset_psn(struct ipath_qp *qp, u32 psn)
{
u32 n = qp->s_last;
struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
u32 opcode;
qp->s_cur = n;
/*
* If we are starting the request from the beginning,
* let the normal send code handle initialization.
*/
if (ipath_cmp24(psn, wqe->psn) <= 0) {
qp->s_state = OP(SEND_LAST);
goto done;
}
/* Find the work request opcode corresponding to the given PSN. */
opcode = wqe->wr.opcode;
for (;;) {
int diff;
if (++n == qp->s_size)
n = 0;
if (n == qp->s_tail)
break;
wqe = get_swqe_ptr(qp, n);
diff = ipath_cmp24(psn, wqe->psn);
if (diff < 0)
break;
qp->s_cur = n;
/*
* If we are starting the request from the beginning,
* let the normal send code handle initialization.
*/
if (diff == 0) {
qp->s_state = OP(SEND_LAST);
goto done;
}
opcode = wqe->wr.opcode;
}
/*
* Set the state to restart in the middle of a request.
* Don't change the s_sge, s_cur_sge, or s_cur_size.
* See ipath_do_rc_send().
*/
switch (opcode) {
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
break;
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
break;
case IB_WR_RDMA_READ:
qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
break;
default:
/*
* This case shouldn't happen since its only
* one PSN per req.
*/
qp->s_state = OP(SEND_LAST);
}
done:
qp->s_psn = psn;
}
/** /**
* ipath_restart_rc - back up requester to resend the last un-ACKed request * ipath_restart_rc - back up requester to resend the last un-ACKed request
* @qp: the QP to restart * @qp: the QP to restart
...@@ -564,7 +647,6 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc) ...@@ -564,7 +647,6 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
{ {
struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
struct ipath_ibdev *dev; struct ipath_ibdev *dev;
u32 n;
/* /*
* If there are no requests pending, we are done. * If there are no requests pending, we are done.
...@@ -606,62 +688,7 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc) ...@@ -606,62 +688,7 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
else else
dev->n_rc_resends += (int)qp->s_psn - (int)psn; dev->n_rc_resends += (int)qp->s_psn - (int)psn;
/* reset_psn(qp, psn);
* If we are starting the request from the beginning, let the normal
* send code handle initialization.
*/
qp->s_cur = qp->s_last;
if (ipath_cmp24(psn, wqe->psn) <= 0) {
qp->s_state = OP(SEND_LAST);
qp->s_psn = wqe->psn;
} else {
n = qp->s_cur;
for (;;) {
if (++n == qp->s_size)
n = 0;
if (n == qp->s_tail) {
if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
qp->s_cur = n;
wqe = get_swqe_ptr(qp, n);
}
break;
}
wqe = get_swqe_ptr(qp, n);
if (ipath_cmp24(psn, wqe->psn) < 0)
break;
qp->s_cur = n;
}
qp->s_psn = psn;
/*
* Reset the state to restart in the middle of a request.
* Don't change the s_sge, s_cur_sge, or s_cur_size.
* See ipath_do_rc_send().
*/
switch (wqe->wr.opcode) {
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
break;
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
break;
case IB_WR_RDMA_READ:
qp->s_state =
OP(RDMA_READ_RESPONSE_MIDDLE);
break;
default:
/*
* This case shouldn't happen since its only
* one PSN per req.
*/
qp->s_state = OP(SEND_LAST);
}
}
done: done:
tasklet_hi_schedule(&qp->s_task); tasklet_hi_schedule(&qp->s_task);
...@@ -670,75 +697,13 @@ bail: ...@@ -670,75 +697,13 @@ bail:
return; return;
} }
/**
* reset_psn - reset the QP state to send starting from PSN
* @qp: the QP
* @psn: the packet sequence number to restart at
*
* This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
* for the given QP.
* Called at interrupt level with the QP s_lock held.
*/
static void reset_psn(struct ipath_qp *qp, u32 psn)
{
struct ipath_swqe *wqe;
u32 n;
n = qp->s_cur;
wqe = get_swqe_ptr(qp, n);
for (;;) {
if (++n == qp->s_size)
n = 0;
if (n == qp->s_tail) {
if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
qp->s_cur = n;
wqe = get_swqe_ptr(qp, n);
}
break;
}
wqe = get_swqe_ptr(qp, n);
if (ipath_cmp24(psn, wqe->psn) < 0)
break;
qp->s_cur = n;
}
qp->s_psn = psn;
/*
* Set the state to restart in the middle of a
* request. Don't change the s_sge, s_cur_sge, or
* s_cur_size. See ipath_do_rc_send().
*/
switch (wqe->wr.opcode) {
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
break;
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
break;
case IB_WR_RDMA_READ:
qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
break;
default:
/*
* This case shouldn't happen since its only
* one PSN per req.
*/
qp->s_state = OP(SEND_LAST);
}
}
/** /**
* do_rc_ack - process an incoming RC ACK * do_rc_ack - process an incoming RC ACK
* @qp: the QP the ACK came in on * @qp: the QP the ACK came in on
* @psn: the packet sequence number of the ACK * @psn: the packet sequence number of the ACK
* @opcode: the opcode of the request that resulted in the ACK * @opcode: the opcode of the request that resulted in the ACK
* *
* This is called from ipath_rc_rcv() to process an incoming RC ACK * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
* for the given QP. * for the given QP.
* Called at interrupt level with the QP s_lock held. * Called at interrupt level with the QP s_lock held.
* Returns 1 if OK, 0 if current operation should be aborted (NAK). * Returns 1 if OK, 0 if current operation should be aborted (NAK).
...@@ -877,22 +842,12 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) ...@@ -877,22 +842,12 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
if (qp->s_last == qp->s_tail) if (qp->s_last == qp->s_tail)
goto bail; goto bail;
/* The last valid PSN seen is the previous request's. */ /* The last valid PSN is the previous PSN. */
qp->s_last_psn = wqe->psn - 1; qp->s_last_psn = psn - 1;
dev->n_rc_resends += (int)qp->s_psn - (int)psn; dev->n_rc_resends += (int)qp->s_psn - (int)psn;
/* reset_psn(qp, psn);
* If we are starting the request from the beginning, let
* the normal send code handle initialization.
*/
qp->s_cur = qp->s_last;
wqe = get_swqe_ptr(qp, qp->s_cur);
if (ipath_cmp24(psn, wqe->psn) <= 0) {
qp->s_state = OP(SEND_LAST);
qp->s_psn = wqe->psn;
} else
reset_psn(qp, psn);
qp->s_rnr_timeout = qp->s_rnr_timeout =
ib_ipath_rnr_table[(aeth >> IPS_AETH_CREDIT_SHIFT) & ib_ipath_rnr_table[(aeth >> IPS_AETH_CREDIT_SHIFT) &
...@@ -1070,9 +1025,10 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, ...@@ -1070,9 +1025,10 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
&dev->pending[dev->pending_index]); &dev->pending[dev->pending_index]);
spin_unlock(&dev->pending_lock); spin_unlock(&dev->pending_lock);
/* /*
* Update the RDMA receive state but do the copy w/o holding the * Update the RDMA receive state but do the copy w/o
* locks and blocking interrupts. XXX Yet another place that * holding the locks and blocking interrupts.
* affects relaxed RDMA order since we don't want s_sge modified. * XXX Yet another place that affects relaxed RDMA order
* since we don't want s_sge modified.
*/ */
qp->s_len -= pmtu; qp->s_len -= pmtu;
qp->s_last_psn = psn; qp->s_last_psn = psn;
...@@ -1119,9 +1075,12 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, ...@@ -1119,9 +1075,12 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) { if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) {
/* /*
* Change the state so we contimue * Change the state so we contimue
* processing new requests. * processing new requests and wake up the
* tasklet if there are posted sends.
*/ */
qp->s_state = OP(SEND_LAST); qp->s_state = OP(SEND_LAST);
if (qp->s_tail != qp->s_head)
tasklet_hi_schedule(&qp->s_task);
} }
goto ack_done; goto ack_done;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment