Commit 618a3c03 authored by Hal Rosenstock's avatar Hal Rosenstock Committed by Roland Dreier

IB/mad: RMPP support for additional classes

Add RMPP support for additional management classes that support it.
Also, validate RMPP is consistent with management class specified.
Signed-off-by: default avatarHal Rosenstock <halr@voltaire.com>
Signed-off-by: default avatarSean Hefty <sean.hefty@intel.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent fa9656bb
...@@ -227,6 +227,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -227,6 +227,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
if (!is_vendor_oui(mad_reg_req->oui)) if (!is_vendor_oui(mad_reg_req->oui))
goto error1; goto error1;
} }
/* Make sure class supplied is consistent with RMPP */
if (ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
if (!rmpp_version)
goto error1;
} else {
if (rmpp_version)
goto error1;
}
/* Make sure class supplied is consistent with QP type */ /* Make sure class supplied is consistent with QP type */
if (qp_type == IB_QPT_SMI) { if (qp_type == IB_QPT_SMI) {
if ((mad_reg_req->mgmt_class != if ((mad_reg_req->mgmt_class !=
...@@ -890,6 +898,35 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, ...@@ -890,6 +898,35 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
} }
EXPORT_SYMBOL(ib_create_send_mad); EXPORT_SYMBOL(ib_create_send_mad);
int ib_get_mad_data_offset(u8 mgmt_class)
{
if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
return IB_MGMT_SA_HDR;
else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
(mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
(mgmt_class == IB_MGMT_CLASS_BIS))
return IB_MGMT_DEVICE_HDR;
else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
(mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
return IB_MGMT_VENDOR_HDR;
else
return IB_MGMT_MAD_HDR;
}
EXPORT_SYMBOL(ib_get_mad_data_offset);
int ib_is_mad_class_rmpp(u8 mgmt_class)
{
if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
(mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
(mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
(mgmt_class == IB_MGMT_CLASS_BIS) ||
((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
(mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
return 1;
return 0;
}
EXPORT_SYMBOL(ib_is_mad_class_rmpp);
void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
{ {
struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wr_private *mad_send_wr;
...@@ -1022,6 +1059,13 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, ...@@ -1022,6 +1059,13 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
goto error; goto error;
} }
if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
if (mad_agent_priv->agent.rmpp_version) {
ret = -EINVAL;
goto error;
}
}
/* /*
* Save pointer to next work request to post in case the * Save pointer to next work request to post in case the
* current one completes, and the user modifies the work * current one completes, and the user modifies the work
......
/* /*
* Copyright (c) 2005 Intel Inc. All rights reserved. * Copyright (c) 2005 Intel Inc. All rights reserved.
* Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
...@@ -100,17 +100,6 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) ...@@ -100,17 +100,6 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
} }
} }
static int data_offset(u8 mgmt_class)
{
if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
return IB_MGMT_SA_HDR;
else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
(mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
return IB_MGMT_VENDOR_HDR;
else
return IB_MGMT_RMPP_HDR;
}
static void format_ack(struct ib_mad_send_buf *msg, static void format_ack(struct ib_mad_send_buf *msg,
struct ib_rmpp_mad *data, struct ib_rmpp_mad *data,
struct mad_rmpp_recv *rmpp_recv) struct mad_rmpp_recv *rmpp_recv)
...@@ -137,7 +126,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv, ...@@ -137,7 +126,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
struct ib_mad_send_buf *msg; struct ib_mad_send_buf *msg;
int ret, hdr_len; int ret, hdr_len;
hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
recv_wc->wc->pkey_index, 1, hdr_len, recv_wc->wc->pkey_index, 1, hdr_len,
0, GFP_KERNEL); 0, GFP_KERNEL);
...@@ -163,7 +152,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, ...@@ -163,7 +152,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
if (IS_ERR(ah)) if (IS_ERR(ah))
return (void *) ah; return (void *) ah;
hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
recv_wc->wc->pkey_index, 1, recv_wc->wc->pkey_index, 1,
hdr_len, 0, GFP_KERNEL); hdr_len, 0, GFP_KERNEL);
...@@ -408,7 +397,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv) ...@@ -408,7 +397,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
data_size = sizeof(struct ib_rmpp_mad) - hdr_size; data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
if (pad > IB_MGMT_RMPP_DATA || pad < 0) if (pad > IB_MGMT_RMPP_DATA || pad < 0)
......
...@@ -177,17 +177,6 @@ static int queue_packet(struct ib_umad_file *file, ...@@ -177,17 +177,6 @@ static int queue_packet(struct ib_umad_file *file,
return ret; return ret;
} }
static int data_offset(u8 mgmt_class)
{
if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
return IB_MGMT_SA_HDR;
else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
(mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
return IB_MGMT_VENDOR_HDR;
else
return IB_MGMT_RMPP_HDR;
}
static void send_handler(struct ib_mad_agent *agent, static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *send_wc) struct ib_mad_send_wc *send_wc)
{ {
...@@ -283,7 +272,7 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet, ...@@ -283,7 +272,7 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet,
*/ */
return -ENOSPC; return -ENOSPC;
} }
offset = data_offset(recv_buf->mad->mad_hdr.mgmt_class); offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class);
max_seg_payload = sizeof (struct ib_mad) - offset; max_seg_payload = sizeof (struct ib_mad) - offset;
for (left = packet->length - seg_payload, buf += seg_payload; for (left = packet->length - seg_payload, buf += seg_payload;
...@@ -441,21 +430,14 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, ...@@ -441,21 +430,14 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
} }
rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
hdr_len = IB_MGMT_SA_HDR; if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) {
copy_offset = IB_MGMT_RMPP_HDR; copy_offset = IB_MGMT_MAD_HDR;
rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & rmpp_active = 0;
IB_MGMT_RMPP_FLAG_ACTIVE; } else {
} else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START &&
rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) {
hdr_len = IB_MGMT_VENDOR_HDR;
copy_offset = IB_MGMT_RMPP_HDR; copy_offset = IB_MGMT_RMPP_HDR;
rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE; IB_MGMT_RMPP_FLAG_ACTIVE;
} else {
hdr_len = IB_MGMT_MAD_HDR;
copy_offset = IB_MGMT_MAD_HDR;
rmpp_active = 0;
} }
data_len = count - sizeof (struct ib_user_mad) - hdr_len; data_len = count - sizeof (struct ib_user_mad) - hdr_len;
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
* Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved. * Copyright (c) 2004-2006 Voltaire Corporation. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
...@@ -55,6 +55,10 @@ ...@@ -55,6 +55,10 @@
#define IB_MGMT_CLASS_DEVICE_MGMT 0x06 #define IB_MGMT_CLASS_DEVICE_MGMT 0x06
#define IB_MGMT_CLASS_CM 0x07 #define IB_MGMT_CLASS_CM 0x07
#define IB_MGMT_CLASS_SNMP 0x08 #define IB_MGMT_CLASS_SNMP 0x08
#define IB_MGMT_CLASS_DEVICE_ADM 0x10
#define IB_MGMT_CLASS_BOOT_MGMT 0x11
#define IB_MGMT_CLASS_BIS 0x12
#define IB_MGMT_CLASS_CONG_MGMT 0x21
#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30 #define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30
#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F #define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F
...@@ -117,6 +121,8 @@ enum { ...@@ -117,6 +121,8 @@ enum {
IB_MGMT_VENDOR_DATA = 216, IB_MGMT_VENDOR_DATA = 216,
IB_MGMT_SA_HDR = 56, IB_MGMT_SA_HDR = 56,
IB_MGMT_SA_DATA = 200, IB_MGMT_SA_DATA = 200,
IB_MGMT_DEVICE_HDR = 64,
IB_MGMT_DEVICE_DATA = 192,
}; };
struct ib_mad_hdr { struct ib_mad_hdr {
...@@ -602,6 +608,25 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, ...@@ -602,6 +608,25 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
int hdr_len, int data_len, int hdr_len, int data_len,
gfp_t gfp_mask); gfp_t gfp_mask);
/**
* ib_is_mad_class_rmpp - returns whether given management class
* supports RMPP.
* @mgmt_class: management class
*
* This routine returns whether the management class supports RMPP.
*/
int ib_is_mad_class_rmpp(u8 mgmt_class);
/**
* ib_get_mad_data_offset - returns the data offset for a given
* management class.
* @mgmt_class: management class
*
* This routine returns the data offset in the MAD for the management
* class requested.
*/
int ib_get_mad_data_offset(u8 mgmt_class);
/** /**
* ib_get_rmpp_segment - returns the data buffer for a given RMPP segment. * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment.
* @send_buf: Previously allocated send data buffer. * @send_buf: Previously allocated send data buffer.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment