Commit fe9e08e1 authored by Sean Hefty's avatar Sean Hefty Committed by Roland Dreier

[PATCH] IB: Add handling for ABORT and STOP RMPP MADs.

Add handling for ABORT / STOP RMPP MADs.
Signed-off-by: default avatarSean Hefty <sean.hefty@intel.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent b9ef520f
...@@ -100,6 +100,121 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) ...@@ -100,6 +100,121 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
} }
} }
static int data_offset(u8 mgmt_class)
{
if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
return offsetof(struct ib_sa_mad, data);
else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
(mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
return offsetof(struct ib_vendor_mad, data);
else
return offsetof(struct ib_rmpp_mad, data);
}
static void format_ack(struct ib_rmpp_mad *ack,
struct ib_rmpp_mad *data,
struct mad_rmpp_recv *rmpp_recv)
{
unsigned long flags;
memcpy(&ack->mad_hdr, &data->mad_hdr,
data_offset(data->mad_hdr.mgmt_class));
ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
spin_lock_irqsave(&rmpp_recv->lock, flags);
rmpp_recv->last_ack = rmpp_recv->seg_num;
ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
spin_unlock_irqrestore(&rmpp_recv->lock, flags);
}
static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
struct ib_mad_recv_wc *recv_wc)
{
struct ib_mad_send_buf *msg;
struct ib_send_wr *bad_send_wr;
int hdr_len, ret;
hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
GFP_KERNEL);
if (!msg)
return;
format_ack((struct ib_rmpp_mad *) msg->mad,
(struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
&bad_send_wr);
if (ret)
ib_free_send_mad(msg);
}
static int alloc_response_msg(struct ib_mad_agent *agent,
struct ib_mad_recv_wc *recv_wc,
struct ib_mad_send_buf **msg)
{
struct ib_mad_send_buf *m;
struct ib_ah *ah;
int hdr_len;
ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
recv_wc->recv_buf.grh, agent->port_num);
if (IS_ERR(ah))
return PTR_ERR(ah);
hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
m = ib_create_send_mad(agent, recv_wc->wc->src_qp,
recv_wc->wc->pkey_index, ah, 1, hdr_len,
sizeof(struct ib_rmpp_mad) - hdr_len,
GFP_KERNEL);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
return PTR_ERR(m);
}
*msg = m;
return 0;
}
static void free_msg(struct ib_mad_send_buf *msg)
{
ib_destroy_ah(msg->send_wr.wr.ud.ah);
ib_free_send_mad(msg);
}
static void nack_recv(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
{
struct ib_mad_send_buf *msg;
struct ib_rmpp_mad *rmpp_mad;
struct ib_send_wr *bad_send_wr;
int ret;
ret = alloc_response_msg(&agent->agent, recv_wc, &msg);
if (ret)
return;
rmpp_mad = (struct ib_rmpp_mad *) msg->mad;
memcpy(rmpp_mad, recv_wc->recv_buf.mad,
data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class));
rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
rmpp_mad->rmpp_hdr.seg_num = 0;
rmpp_mad->rmpp_hdr.paylen_newwin = 0;
ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr);
if (ret)
free_msg(msg);
}
static void recv_timeout_handler(void *data) static void recv_timeout_handler(void *data)
{ {
struct mad_rmpp_recv *rmpp_recv = data; struct mad_rmpp_recv *rmpp_recv = data;
...@@ -115,8 +230,8 @@ static void recv_timeout_handler(void *data) ...@@ -115,8 +230,8 @@ static void recv_timeout_handler(void *data)
list_del(&rmpp_recv->list); list_del(&rmpp_recv->list);
spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
/* TODO: send abort. */
rmpp_wc = rmpp_recv->rmpp_wc; rmpp_wc = rmpp_recv->rmpp_wc;
nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
destroy_rmpp_recv(rmpp_recv); destroy_rmpp_recv(rmpp_recv);
ib_free_recv_mad(rmpp_wc); ib_free_recv_mad(rmpp_wc);
} }
...@@ -230,60 +345,6 @@ insert_rmpp_recv(struct ib_mad_agent_private *agent, ...@@ -230,60 +345,6 @@ insert_rmpp_recv(struct ib_mad_agent_private *agent,
return cur_rmpp_recv; return cur_rmpp_recv;
} }
static int data_offset(u8 mgmt_class)
{
if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
return offsetof(struct ib_sa_mad, data);
else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
(mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
return offsetof(struct ib_vendor_mad, data);
else
return offsetof(struct ib_rmpp_mad, data);
}
static void format_ack(struct ib_rmpp_mad *ack,
struct ib_rmpp_mad *data,
struct mad_rmpp_recv *rmpp_recv)
{
unsigned long flags;
memcpy(&ack->mad_hdr, &data->mad_hdr,
data_offset(data->mad_hdr.mgmt_class));
ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
spin_lock_irqsave(&rmpp_recv->lock, flags);
rmpp_recv->last_ack = rmpp_recv->seg_num;
ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
spin_unlock_irqrestore(&rmpp_recv->lock, flags);
}
static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
struct ib_mad_recv_wc *recv_wc)
{
struct ib_mad_send_buf *msg;
struct ib_send_wr *bad_send_wr;
int hdr_len, ret;
hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
GFP_KERNEL);
if (!msg)
return;
format_ack((struct ib_rmpp_mad *) msg->mad,
(struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
&bad_send_wr);
if (ret)
ib_free_send_mad(msg);
}
static inline int get_last_flag(struct ib_mad_recv_buf *seg) static inline int get_last_flag(struct ib_mad_recv_buf *seg)
{ {
struct ib_rmpp_mad *rmpp_mad; struct ib_rmpp_mad *rmpp_mad;
...@@ -559,6 +620,34 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -559,6 +620,34 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
return ib_send_mad(mad_send_wr); return ib_send_mad(mad_send_wr);
} }
static void abort_send(struct ib_mad_agent_private *agent, __be64 tid,
u8 rmpp_status)
{
struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc wc;
unsigned long flags;
spin_lock_irqsave(&agent->lock, flags);
mad_send_wr = ib_find_send_mad(agent, tid);
if (!mad_send_wr)
goto out; /* Unmatched send */
if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
(!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
goto out; /* Send is already done */
ib_mark_mad_done(mad_send_wr);
spin_unlock_irqrestore(&agent->lock, flags);
wc.status = IB_WC_REM_ABORT_ERR;
wc.vendor_err = rmpp_status;
wc.wr_id = mad_send_wr->wr_id;
ib_mad_complete_send_wr(mad_send_wr, &wc);
return;
out:
spin_unlock_irqrestore(&agent->lock, flags);
}
static void process_rmpp_ack(struct ib_mad_agent_private *agent, static void process_rmpp_ack(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc) struct ib_mad_recv_wc *mad_recv_wc)
{ {
...@@ -568,11 +657,21 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, ...@@ -568,11 +657,21 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
int seg_num, newwin, ret; int seg_num, newwin, ret;
rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
if (rmpp_mad->rmpp_hdr.rmpp_status) if (rmpp_mad->rmpp_hdr.rmpp_status) {
abort_send(agent, rmpp_mad->mad_hdr.tid,
IB_MGMT_RMPP_STATUS_BAD_STATUS);
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
return; return;
}
seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
if (newwin < seg_num) {
abort_send(agent, rmpp_mad->mad_hdr.tid,
IB_MGMT_RMPP_STATUS_W2S);
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
return;
}
spin_lock_irqsave(&agent->lock, flags); spin_lock_irqsave(&agent->lock, flags);
mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid); mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid);
...@@ -583,8 +682,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, ...@@ -583,8 +682,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
(!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
goto out; /* Send is already done */ goto out; /* Send is already done */
if (seg_num > mad_send_wr->total_seg) if (seg_num > mad_send_wr->total_seg || seg_num > mad_send_wr->newwin) {
goto out; /* Bad ACK */ spin_unlock_irqrestore(&agent->lock, flags);
abort_send(agent, rmpp_mad->mad_hdr.tid,
IB_MGMT_RMPP_STATUS_S2B);
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
return;
}
if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
goto out; /* Old ACK */ goto out; /* Old ACK */
...@@ -628,6 +732,72 @@ out: ...@@ -628,6 +732,72 @@ out:
spin_unlock_irqrestore(&agent->lock, flags); spin_unlock_irqrestore(&agent->lock, flags);
} }
static struct ib_mad_recv_wc *
process_rmpp_data(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct ib_rmpp_hdr *rmpp_hdr;
u8 rmpp_status;
rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
if (rmpp_hdr->rmpp_status) {
rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
goto bad;
}
if (rmpp_hdr->seg_num == __constant_htonl(1)) {
if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
goto bad;
}
return start_rmpp(agent, mad_recv_wc);
} else {
if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
goto bad;
}
return continue_rmpp(agent, mad_recv_wc);
}
bad:
nack_recv(agent, mad_recv_wc, rmpp_status);
ib_free_recv_mad(mad_recv_wc);
return NULL;
}
static void process_rmpp_stop(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct ib_rmpp_mad *rmpp_mad;
rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
abort_send(agent, rmpp_mad->mad_hdr.tid,
IB_MGMT_RMPP_STATUS_BAD_STATUS);
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
} else
abort_send(agent, rmpp_mad->mad_hdr.tid,
rmpp_mad->rmpp_hdr.rmpp_status);
}
static void process_rmpp_abort(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct ib_rmpp_mad *rmpp_mad;
rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
abort_send(agent, rmpp_mad->mad_hdr.tid,
IB_MGMT_RMPP_STATUS_BAD_STATUS);
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
} else
abort_send(agent, rmpp_mad->mad_hdr.tid,
rmpp_mad->rmpp_hdr.rmpp_status);
}
struct ib_mad_recv_wc * struct ib_mad_recv_wc *
ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc) struct ib_mad_recv_wc *mad_recv_wc)
...@@ -638,23 +808,29 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, ...@@ -638,23 +808,29 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
return mad_recv_wc; return mad_recv_wc;
if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
abort_send(agent, rmpp_mad->mad_hdr.tid,
IB_MGMT_RMPP_STATUS_UNV);
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
goto out; goto out;
}
switch (rmpp_mad->rmpp_hdr.rmpp_type) { switch (rmpp_mad->rmpp_hdr.rmpp_type) {
case IB_MGMT_RMPP_TYPE_DATA: case IB_MGMT_RMPP_TYPE_DATA:
if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1)) return process_rmpp_data(agent, mad_recv_wc);
return start_rmpp(agent, mad_recv_wc);
else
return continue_rmpp(agent, mad_recv_wc);
case IB_MGMT_RMPP_TYPE_ACK: case IB_MGMT_RMPP_TYPE_ACK:
process_rmpp_ack(agent, mad_recv_wc); process_rmpp_ack(agent, mad_recv_wc);
break; break;
case IB_MGMT_RMPP_TYPE_STOP: case IB_MGMT_RMPP_TYPE_STOP:
process_rmpp_stop(agent, mad_recv_wc);
break;
case IB_MGMT_RMPP_TYPE_ABORT: case IB_MGMT_RMPP_TYPE_ABORT:
/* TODO: process_rmpp_nack(agent, mad_recv_wc); */ process_rmpp_abort(agent, mad_recv_wc);
break; break;
default: default:
abort_send(agent, rmpp_mad->mad_hdr.tid,
IB_MGMT_RMPP_STATUS_BADT);
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
break; break;
} }
out: out:
...@@ -714,7 +890,10 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, ...@@ -714,7 +890,10 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
msg = (struct ib_mad_send_buf *) (unsigned long) msg = (struct ib_mad_send_buf *) (unsigned long)
mad_send_wc->wr_id; mad_send_wc->wr_id;
ib_free_send_mad(msg); if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK)
ib_free_send_mad(msg);
else
free_msg(msg);
return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
} }
......
...@@ -90,6 +90,7 @@ ...@@ -90,6 +90,7 @@
#define IB_MGMT_RMPP_STATUS_SUCCESS 0 #define IB_MGMT_RMPP_STATUS_SUCCESS 0
#define IB_MGMT_RMPP_STATUS_RESX 1 #define IB_MGMT_RMPP_STATUS_RESX 1
#define IB_MGMT_RMPP_STATUS_ABORT_MIN 118
#define IB_MGMT_RMPP_STATUS_T2L 118 #define IB_MGMT_RMPP_STATUS_T2L 118
#define IB_MGMT_RMPP_STATUS_BAD_LEN 119 #define IB_MGMT_RMPP_STATUS_BAD_LEN 119
#define IB_MGMT_RMPP_STATUS_BAD_SEG 120 #define IB_MGMT_RMPP_STATUS_BAD_SEG 120
...@@ -100,6 +101,7 @@ ...@@ -100,6 +101,7 @@
#define IB_MGMT_RMPP_STATUS_UNV 125 #define IB_MGMT_RMPP_STATUS_UNV 125
#define IB_MGMT_RMPP_STATUS_TMR 126 #define IB_MGMT_RMPP_STATUS_TMR 126
#define IB_MGMT_RMPP_STATUS_UNSPEC 127 #define IB_MGMT_RMPP_STATUS_UNSPEC 127
#define IB_MGMT_RMPP_STATUS_ABORT_MAX 127
#define IB_QP0 0 #define IB_QP0 0
#define IB_QP1 __constant_htonl(1) #define IB_QP1 __constant_htonl(1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment