Commit 488823f1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  RDMA/nes: Add a driver for NetEffect RNICs
  IB/mthca: Return proper error codes from mthca_fmr_alloc()
  IB: Avoid marking __devinitdata as const
  IB/mlx4: Actually print out the driver version
  IB/ib_mthca: Pre-link receive WQEs in Tavor mode
  IB/mthca: Remove checks for srq->first_free < 0
  IB/fmr_pool: Allocate page list for pool FMRs only when caching enabled
  IB/srp: Retry stale connections
  mlx4_core: Don't read reserved fields in mlx4_QUERY_ADAPTER()
  IB/mthca: Don't read reserved fields in mthca_QUERY_ADAPTER()
  IPoIB: Remove a misleading debug print
  IPoIB: Handle bonding failover race for connected neighbours too
  IB/mthca: Fix and simplify page size calculation in mthca_reg_phys_mr()
  IB/ehca: Add PMA support
  IB/ehca: Update sma_attr also in case of disruptive config change
  IB/ehca: Prevent sending UD packets to QP0
  IB/cm: Add interim support for routed paths
  mlx4_core: Fix more section mismatches
parents 827b3f6a 3c2d774c
......@@ -2681,6 +2681,16 @@ M: James.Bottomley@HansenPartnership.com
L: linux-scsi@vger.kernel.org
S: Maintained
NETEFFECT IWARP RNIC DRIVER (IW_NES)
P: Faisal Latif
M: flatif@neteffect.com
P: Glenn Streiff
M: gstreiff@neteffect.com
L: general@lists.openfabrics.org
W: http://www.neteffect.com
S: Supported
F: drivers/infiniband/hw/nes/
NETEM NETWORK EMULATOR
P: Stephen Hemminger
M: shemminger@linux-foundation.org
......
......@@ -44,8 +44,8 @@ source "drivers/infiniband/hw/ipath/Kconfig"
source "drivers/infiniband/hw/ehca/Kconfig"
source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
......
......@@ -5,6 +5,7 @@ obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
......@@ -974,6 +974,9 @@ static void cm_format_req(struct cm_req_msg *req_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_req_param *param)
{
struct ib_sa_path_rec *pri_path = param->primary_path;
struct ib_sa_path_rec *alt_path = param->alternate_path;
cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
......@@ -997,35 +1000,46 @@ static void cm_format_req(struct cm_req_msg *req_msg,
cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
cm_req_set_srq(req_msg, param->srq);
req_msg->primary_local_lid = param->primary_path->slid;
req_msg->primary_remote_lid = param->primary_path->dlid;
req_msg->primary_local_gid = param->primary_path->sgid;
req_msg->primary_remote_gid = param->primary_path->dgid;
cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
req_msg->primary_traffic_class = param->primary_path->traffic_class;
req_msg->primary_hop_limit = param->primary_path->hop_limit;
cm_req_set_primary_sl(req_msg, param->primary_path->sl);
cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
if (pri_path->hop_limit <= 1) {
req_msg->primary_local_lid = pri_path->slid;
req_msg->primary_remote_lid = pri_path->dlid;
} else {
/* Work-around until there's a way to obtain remote LID info */
req_msg->primary_local_lid = IB_LID_PERMISSIVE;
req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
}
req_msg->primary_local_gid = pri_path->sgid;
req_msg->primary_remote_gid = pri_path->dgid;
cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
req_msg->primary_traffic_class = pri_path->traffic_class;
req_msg->primary_hop_limit = pri_path->hop_limit;
cm_req_set_primary_sl(req_msg, pri_path->sl);
cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
cm_req_set_primary_local_ack_timeout(req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
param->primary_path->packet_life_time));
pri_path->packet_life_time));
if (param->alternate_path) {
req_msg->alt_local_lid = param->alternate_path->slid;
req_msg->alt_remote_lid = param->alternate_path->dlid;
req_msg->alt_local_gid = param->alternate_path->sgid;
req_msg->alt_remote_gid = param->alternate_path->dgid;
if (alt_path) {
if (alt_path->hop_limit <= 1) {
req_msg->alt_local_lid = alt_path->slid;
req_msg->alt_remote_lid = alt_path->dlid;
} else {
req_msg->alt_local_lid = IB_LID_PERMISSIVE;
req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
}
req_msg->alt_local_gid = alt_path->sgid;
req_msg->alt_remote_gid = alt_path->dgid;
cm_req_set_alt_flow_label(req_msg,
param->alternate_path->flow_label);
cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
req_msg->alt_traffic_class = param->alternate_path->traffic_class;
req_msg->alt_hop_limit = param->alternate_path->hop_limit;
cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
alt_path->flow_label);
cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
req_msg->alt_traffic_class = alt_path->traffic_class;
req_msg->alt_hop_limit = alt_path->hop_limit;
cm_req_set_alt_sl(req_msg, alt_path->sl);
cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
cm_req_set_alt_local_ack_timeout(req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
param->alternate_path->packet_life_time));
alt_path->packet_life_time));
}
if (param->private_data && param->private_data_len)
......@@ -1441,6 +1455,34 @@ out:
return listen_cm_id_priv;
}
/*
* Work-around for inter-subnet connections. If the LIDs are permissive,
* we need to override the LID/SL data in the REQ with the LID information
* in the work completion.
*/
static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
{
if (!cm_req_get_primary_subnet_local(req_msg)) {
if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
req_msg->primary_local_lid = cpu_to_be16(wc->slid);
cm_req_set_primary_sl(req_msg, wc->sl);
}
if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
}
if (!cm_req_get_alt_subnet_local(req_msg)) {
if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
req_msg->alt_local_lid = cpu_to_be16(wc->slid);
cm_req_set_alt_sl(req_msg, wc->sl);
}
if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
}
}
static int cm_req_handler(struct cm_work *work)
{
struct ib_cm_id *cm_id;
......@@ -1481,6 +1523,7 @@ static int cm_req_handler(struct cm_work *work)
cm_id_priv->id.service_id = req_msg->service_id;
cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
if (ret) {
......
......@@ -320,10 +320,13 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
.max_maps = pool->max_remaps,
.page_shift = params->page_shift
};
int bytes_per_fmr = sizeof *fmr;
if (pool->cache_bucket)
bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
for (i = 0; i < params->pool_size; ++i) {
fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64),
GFP_KERNEL);
fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
if (!fmr) {
printk(KERN_WARNING PFX "failed to allocate fmr "
"struct for FMR %d\n", i);
......
......@@ -101,6 +101,7 @@ struct ehca_sport {
spinlock_t mod_sqp_lock;
enum ib_port_state port_state;
struct ehca_sma_attr saved_attr;
u32 pma_qp_nr;
};
#define HCA_CAP_MR_PGSIZE_4K 0x80000000
......
......@@ -403,6 +403,8 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
sport->port_state = IB_PORT_ACTIVE;
dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
"is active");
ehca_query_sma_attr(shca, port,
&sport->saved_attr);
} else
notify_port_conf_change(shca, port);
break;
......
......@@ -187,6 +187,11 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context);
int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad,
struct ib_mad *out_mad);
void ehca_poll_eqs(unsigned long data);
int ehca_calc_ipd(struct ehca_shca *shca, int port,
......
......@@ -472,7 +472,7 @@ int ehca_init_device(struct ehca_shca *shca)
shca->ib_device.dealloc_fmr = ehca_dealloc_fmr;
shca->ib_device.attach_mcast = ehca_attach_mcast;
shca->ib_device.detach_mcast = ehca_detach_mcast;
/* shca->ib_device.process_mad = ehca_process_mad; */
shca->ib_device.process_mad = ehca_process_mad;
shca->ib_device.mmap = ehca_mmap;
if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
......
......@@ -209,6 +209,10 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
return -EINVAL;
}
if (unlikely(send_wr->wr.ud.remote_qpn == 0)) {
ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
return -EINVAL;
}
my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
wqe_p->u.ud_av.ud_av = my_av->av;
......
......@@ -39,12 +39,18 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <rdma/ib_mad.h>
#include "ehca_classes.h"
#include "ehca_tools.h"
#include "ehca_iverbs.h"
#include "hcp_if.h"
#define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002)
#define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004)
#define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008)
#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001)
/**
* ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
......@@ -83,6 +89,9 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
port, ret);
return ret;
}
shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
port, pma_qp_nr);
break;
default:
ehca_err(&shca->ib_device, "invalid qp_type=%x",
......@@ -109,3 +118,85 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
return H_SUCCESS;
}
struct ib_perf {
struct ib_mad_hdr mad_hdr;
u8 reserved[40];
u8 data[192];
} __attribute__ ((packed));
static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
struct ib_perf *in_perf = (struct ib_perf *)in_mad;
struct ib_perf *out_perf = (struct ib_perf *)out_mad;
struct ib_class_port_info *poi =
(struct ib_class_port_info *)out_perf->data;
struct ehca_shca *shca =
container_of(ibdev, struct ehca_shca, ib_device);
struct ehca_sport *sport = &shca->sport[port_num - 1];
ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
*out_mad = *in_mad;
if (in_perf->mad_hdr.class_version != 1) {
ehca_warn(ibdev, "Unsupported class_version=%x",
in_perf->mad_hdr.class_version);
out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
goto perf_reply;
}
switch (in_perf->mad_hdr.method) {
case IB_MGMT_METHOD_GET:
case IB_MGMT_METHOD_SET:
/* set class port info for redirection */
out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
memset(poi, 0, sizeof(*poi));
poi->base_version = 1;
poi->class_version = 1;
poi->resp_time_value = 18;
poi->redirect_lid = sport->saved_attr.lid;
poi->redirect_qp = sport->pma_qp_nr;
poi->redirect_qkey = IB_QP1_QKEY;
poi->redirect_pkey = IB_DEFAULT_PKEY_FULL;
ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
sport->saved_attr.lid, sport->pma_qp_nr);
break;
case IB_MGMT_METHOD_GET_RESP:
return IB_MAD_RESULT_FAILURE;
default:
out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
break;
}
perf_reply:
out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
int ret;
if (!port_num || port_num > ibdev->phys_port_cnt)
return IB_MAD_RESULT_FAILURE;
/* accept only pma request */
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
return IB_MAD_RESULT_SUCCESS;
ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad);
return ret;
}
......@@ -52,7 +52,7 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
static const char mlx4_ib_version[] __devinitdata =
static const char mlx4_ib_version[] =
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
......@@ -468,6 +468,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
if (err)
goto out;
dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
out:
......@@ -516,9 +517,16 @@ static struct class_device_attribute *mlx4_class_attributes[] = {
static void *mlx4_ib_add(struct mlx4_dev *dev)
{
static int mlx4_ib_version_printed;
struct mlx4_ib_dev *ibdev;
int i;
if (!mlx4_ib_version_printed) {
printk(KERN_INFO "%s", mlx4_ib_version);
++mlx4_ib_version_printed;
}
ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
if (!ibdev) {
dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
......
......@@ -1255,9 +1255,14 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
if (err)
goto out;
MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
if (!mthca_is_memfree(dev)) {
MTHCA_GET(adapter->vendor_id, outbox,
QUERY_ADAPTER_VENDOR_ID_OFFSET);
MTHCA_GET(adapter->device_id, outbox,
QUERY_ADAPTER_DEVICE_ID_OFFSET);
MTHCA_GET(adapter->revision_id, outbox,
QUERY_ADAPTER_REVISION_ID_OFFSET);
}
MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
......
......@@ -126,7 +126,7 @@ module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
MODULE_PARM_DESC(fmr_reserved_mtts,
"number of memory translation table segments reserved for FMR");
static const char mthca_version[] __devinitdata =
static char mthca_version[] __devinitdata =
DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
......@@ -735,6 +735,7 @@ static int mthca_init_hca(struct mthca_dev *mdev)
}
mdev->eq_table.inta_pin = adapter.inta_pin;
if (!mthca_is_memfree(mdev))
mdev->rev_id = adapter.revision_id;
memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
......
......@@ -613,8 +613,10 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
sizeof *(mr->mem.tavor.mpt) * idx;
mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
if (IS_ERR(mr->mtt))
if (IS_ERR(mr->mtt)) {
err = PTR_ERR(mr->mtt);
goto err_out_table;
}
mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
......@@ -627,8 +629,10 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_out_free_mtt;
}
mpt_entry = mailbox->buf;
......
......@@ -923,17 +923,13 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
struct mthca_mr *mr;
u64 *page_list;
u64 total_size;
u64 mask;
unsigned long mask;
int shift;
int npages;
int err;
int i, j, n;
/* First check that we have enough alignment */
if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
return ERR_PTR(-EINVAL);
mask = 0;
mask = buffer_list[0].addr ^ *iova_start;
total_size = 0;
for (i = 0; i < num_phys_buf; ++i) {
if (i != 0)
......@@ -947,17 +943,7 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
if (mask & ~PAGE_MASK)
return ERR_PTR(-EINVAL);
/* Find largest page shift we can use to cover buffers */
for (shift = PAGE_SHIFT; shift < 31; ++shift)
if (num_phys_buf > 1) {
if ((1ULL << shift) & mask)
break;
} else {
if (1ULL << shift >=
buffer_list[0].size +
(buffer_list[0].addr & ((1ULL << shift) - 1)))
break;
}
shift = __ffs(mask | 1 << 31);
buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
buffer_list[0].addr &= ~0ull << shift;
......@@ -1270,6 +1256,8 @@ static int mthca_init_node_data(struct mthca_dev *dev)
goto out;
}
if (mthca_is_memfree(dev))
dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
out:
......
......@@ -1175,6 +1175,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
{
int ret;
int i;
struct mthca_next_seg *next;
qp->refcount = 1;
init_waitqueue_head(&qp->wait);
......@@ -1217,7 +1218,6 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
}
if (mthca_is_memfree(dev)) {
struct mthca_next_seg *next;
struct mthca_data_seg *scatter;
int size = (sizeof (struct mthca_next_seg) +
qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
......@@ -1240,6 +1240,13 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
qp->sq.wqe_shift) +
qp->send_wqe_offset);
}
} else {
for (i = 0; i < qp->rq.max; ++i) {
next = get_recv_wqe(qp, i);
next->nda_op = htonl((((i + 1) % qp->rq.max) <<
qp->rq.wqe_shift) | 1);
}
}
qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
......@@ -1863,7 +1870,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
prev_wqe = qp->rq.last;
qp->rq.last = wqe;
((struct mthca_next_seg *) wqe)->nda_op = 0;
((struct mthca_next_seg *) wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD);
((struct mthca_next_seg *) wqe)->flags = 0;
......@@ -1885,9 +1891,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qp->wrid[ind] = wr->wr_id;
((struct mthca_next_seg *) prev_wqe)->nda_op =
cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD | size);
......
......@@ -175,9 +175,17 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
* scatter list L_Keys to the sentry value of 0x100.
*/
for (i = 0; i < srq->max; ++i) {
wqe = get_wqe(srq, i);
struct mthca_next_seg *next;
*wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
next = wqe = get_wqe(srq, i);
if (i < srq->max - 1) {
*wqe_to_link(wqe) = i + 1;
next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
} else {
*wqe_to_link(wqe) = -1;
next->nda_op = 0;
}
for (scatter = wqe + sizeof (struct mthca_next_seg);
(void *) scatter < wqe + (1 << srq->wqe_shift);
......@@ -470,16 +478,15 @@ out:
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
{
int ind;
struct mthca_next_seg *last_free;
ind = wqe_addr >> srq->wqe_shift;
spin_lock(&srq->lock);
if (likely(srq->first_free >= 0))
*wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
else
srq->first_free = ind;
last_free = get_wqe(srq, srq->last_free);
*wqe_to_link(last_free) = ind;
last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
*wqe_to_link(get_wqe(srq, ind)) = -1;
srq->last_free = ind;
......@@ -507,14 +514,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
for (nreq = 0; wr; wr = wr->next) {
ind = srq->first_free;
if (unlikely(ind < 0)) {
mthca_err(dev, "SRQ %06x full\n", srq->srqn);
err = -ENOMEM;
*bad_wr = wr;
break;
}
wqe = get_wqe(srq, ind);
next_ind = *wqe_to_link(wqe);
......@@ -528,7 +527,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
prev_wqe = srq->last;
srq->last = wqe;
((struct mthca_next_seg *) wqe)->nda_op = 0;
((struct mthca_next_seg *) wqe)->ee_nds = 0;
/* flags field will always remain 0 */
......@@ -549,9 +547,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
if (i < srq->max_gs)
mthca_set_data_seg_inval(wqe);
((struct mthca_next_seg *) prev_wqe)->nda_op =
cpu_to_be32((ind << srq->wqe_shift) | 1);
wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD);
......@@ -615,14 +610,6 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
for (nreq = 0; wr; ++nreq, wr = wr->next) {
ind = srq->first_free;
if (unlikely(ind < 0)) {
mthca_err(dev, "SRQ %06x full\n", srq->srqn);
err = -ENOMEM;
*bad_wr = wr;
break;
}
wqe = get_wqe(srq, ind);
next_ind = *wqe_to_link(wqe);
......@@ -633,8 +620,6 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
break;
}
((struct mthca_next_seg *) wqe)->nda_op =
cpu_to_be32((next_ind << srq->wqe_shift) | 1);
((struct mthca_next_seg *) wqe)->ee_nds = 0;
/* flags field will always remain 0 */
......
config INFINIBAND_NES
tristate "NetEffect RNIC Driver"
depends on PCI && INET && INFINIBAND
select LIBCRC32C
---help---
This is a low-level driver for NetEffect RDMA enabled
Network Interface Cards (RNIC).
config INFINIBAND_NES_DEBUG
bool "Verbose debugging output"
depends on INFINIBAND_NES
default n
---help---
This option causes the NetEffect RNIC driver to produce debug
messages. Select this if you are developing the driver
or trying to diagnose a problem.
obj-$(CONFIG_INFINIBAND_NES) += iw_nes.o
iw_nes-objs := nes.o nes_hw.o nes_nic.o nes_utils.o nes_verbs.o nes_cm.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef NES_CONTEXT_H
#define NES_CONTEXT_H
struct nes_qp_context {
__le32 misc;
__le32 cqs;
__le32 sq_addr_low;
__le32 sq_addr_high;
__le32 rq_addr_low;
__le32 rq_addr_high;
__le32 misc2;
__le16 tcpPorts[2];
__le32 ip0;
__le32 ip1;
__le32 ip2;
__le32 ip3;
__le32 mss;
__le32 arp_index_vlan;
__le32 tcp_state_flow_label;
__le32 pd_index_wscale;
__le32 keepalive;
u32 ts_recent;
u32 ts_age;
__le32 snd_nxt;
__le32 snd_wnd;
__le32 rcv_nxt;
__le32 rcv_wnd;
__le32 snd_max;
__le32 snd_una;
u32 srtt;
__le32 rttvar;
__le32 ssthresh;
__le32 cwnd;
__le32 snd_wl1;
__le32 snd_wl2;
__le32 max_snd_wnd;
__le32 ts_val_delta;
u32 retransmit;
u32 probe_cnt;
u32 hte_index;
__le32 q2_addr_low;
__le32 q2_addr_high;
__le32 ird_index;
u32 Rsvd3;
__le32 ird_ord_sizes;
u32 mrkr_offset;
__le32 aeq_token_low;
__le32 aeq_token_high;
};
/* QP Context Misc Field */
#define NES_QPCONTEXT_MISC_IWARP_VER_MASK 0x00000003
#define NES_QPCONTEXT_MISC_IWARP_VER_SHIFT 0
#define NES_QPCONTEXT_MISC_EFB_SIZE_MASK 0x000000C0
#define NES_QPCONTEXT_MISC_EFB_SIZE_SHIFT 6
#define NES_QPCONTEXT_MISC_RQ_SIZE_MASK 0x00000300
#define NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT 8
#define NES_QPCONTEXT_MISC_SQ_SIZE_MASK 0x00000c00
#define NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT 10
#define NES_QPCONTEXT_MISC_PCI_FCN_MASK 0x00007000
#define NES_QPCONTEXT_MISC_PCI_FCN_SHIFT 12
#define NES_QPCONTEXT_MISC_DUP_ACKS_MASK 0x00070000
#define NES_QPCONTEXT_MISC_DUP_ACKS_SHIFT 16
enum nes_qp_context_misc_bits {
NES_QPCONTEXT_MISC_RX_WQE_SIZE = 0x00000004,
NES_QPCONTEXT_MISC_IPV4 = 0x00000008,
NES_QPCONTEXT_MISC_DO_NOT_FRAG = 0x00000010,
NES_QPCONTEXT_MISC_INSERT_VLAN = 0x00000020,
NES_QPCONTEXT_MISC_DROS = 0x00008000,
NES_QPCONTEXT_MISC_WSCALE = 0x00080000,
NES_QPCONTEXT_MISC_KEEPALIVE = 0x00100000,
NES_QPCONTEXT_MISC_TIMESTAMP = 0x00200000,
NES_QPCONTEXT_MISC_SACK = 0x00400000,
NES_QPCONTEXT_MISC_RDMA_WRITE_EN = 0x00800000,
NES_QPCONTEXT_MISC_RDMA_READ_EN = 0x01000000,
NES_QPCONTEXT_MISC_WBIND_EN = 0x10000000,
NES_QPCONTEXT_MISC_FAST_REGISTER_EN = 0x20000000,
NES_QPCONTEXT_MISC_PRIV_EN = 0x40000000,
NES_QPCONTEXT_MISC_NO_NAGLE = 0x80000000
};
enum nes_qp_acc_wq_sizes {
HCONTEXT_TSA_WQ_SIZE_4 = 0,
HCONTEXT_TSA_WQ_SIZE_32 = 1,
HCONTEXT_TSA_WQ_SIZE_128 = 2,
HCONTEXT_TSA_WQ_SIZE_512 = 3
};
/* QP Context Misc2 Fields */
#define NES_QPCONTEXT_MISC2_TTL_MASK 0x000000ff
#define NES_QPCONTEXT_MISC2_TTL_SHIFT 0
#define NES_QPCONTEXT_MISC2_HOP_LIMIT_MASK 0x000000ff
#define NES_QPCONTEXT_MISC2_HOP_LIMIT_SHIFT 0
#define NES_QPCONTEXT_MISC2_LIMIT_MASK 0x00000300
#define NES_QPCONTEXT_MISC2_LIMIT_SHIFT 8
#define NES_QPCONTEXT_MISC2_NIC_INDEX_MASK 0x0000fc00
#define NES_QPCONTEXT_MISC2_NIC_INDEX_SHIFT 10
#define NES_QPCONTEXT_MISC2_SRC_IP_MASK 0x001f0000
#define NES_QPCONTEXT_MISC2_SRC_IP_SHIFT 16
#define NES_QPCONTEXT_MISC2_TOS_MASK 0xff000000
#define NES_QPCONTEXT_MISC2_TOS_SHIFT 24
#define NES_QPCONTEXT_MISC2_TRAFFIC_CLASS_MASK 0xff000000
#define NES_QPCONTEXT_MISC2_TRAFFIC_CLASS_SHIFT 24
/* QP Context Tcp State/Flow Label Fields */
#define NES_QPCONTEXT_TCPFLOW_FLOW_LABEL_MASK 0x000fffff
#define NES_QPCONTEXT_TCPFLOW_FLOW_LABEL_SHIFT 0
#define NES_QPCONTEXT_TCPFLOW_TCP_STATE_MASK 0xf0000000
#define NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT 28
enum nes_qp_tcp_state {
NES_QPCONTEXT_TCPSTATE_CLOSED = 1,
NES_QPCONTEXT_TCPSTATE_EST = 5,
NES_QPCONTEXT_TCPSTATE_TIME_WAIT = 11,
};
/* QP Context PD Index/wscale Fields */
#define NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK 0x0000000f
#define NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT 0
#define NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK 0x00000f00
#define NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT 8
#define NES_QPCONTEXT_PDWSCALE_PDINDEX_MASK 0xffff0000
#define NES_QPCONTEXT_PDWSCALE_PDINDEX_SHIFT 16
/* QP Context Keepalive Fields */
#define NES_QPCONTEXT_KEEPALIVE_DELTA_MASK 0x0000ffff
#define NES_QPCONTEXT_KEEPALIVE_DELTA_SHIFT 0
#define NES_QPCONTEXT_KEEPALIVE_PROBE_CNT_MASK 0x00ff0000
#define NES_QPCONTEXT_KEEPALIVE_PROBE_CNT_SHIFT 16
#define NES_QPCONTEXT_KEEPALIVE_INTV_MASK 0xff000000
#define NES_QPCONTEXT_KEEPALIVE_INTV_SHIFT 24
/* QP Context ORD/IRD Fields */
#define NES_QPCONTEXT_ORDIRD_ORDSIZE_MASK 0x0000007f
#define NES_QPCONTEXT_ORDIRD_ORDSIZE_SHIFT 0
#define NES_QPCONTEXT_ORDIRD_IRDSIZE_MASK 0x00030000
#define NES_QPCONTEXT_ORDIRD_IRDSIZE_SHIFT 16
#define NES_QPCONTEXT_ORDIRD_IWARP_MODE_MASK 0x30000000
#define NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT 28
enum nes_ord_ird_bits {
NES_QPCONTEXT_ORDIRD_WRPDU = 0x02000000,
NES_QPCONTEXT_ORDIRD_LSMM_PRESENT = 0x04000000,
NES_QPCONTEXT_ORDIRD_ALSMM = 0x08000000,
NES_QPCONTEXT_ORDIRD_AAH = 0x40000000,
NES_QPCONTEXT_ORDIRD_RNMC = 0x80000000
};
enum nes_iwarp_qp_state {
NES_QPCONTEXT_IWARP_STATE_NONEXIST = 0,
NES_QPCONTEXT_IWARP_STATE_IDLE = 1,
NES_QPCONTEXT_IWARP_STATE_RTS = 2,
NES_QPCONTEXT_IWARP_STATE_CLOSING = 3,
NES_QPCONTEXT_IWARP_STATE_TERMINATE = 5,
NES_QPCONTEXT_IWARP_STATE_ERROR = 6
};
#endif /* NES_CONTEXT_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (c) 2006 - 2008 NetEffect. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef NES_USER_H
#define NES_USER_H
#include <linux/types.h>
#define NES_ABI_USERSPACE_VER 1
#define NES_ABI_KERNEL_VER 1
/*
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
* In particular do not use pointer types -- pass pointers in __u64
* instead.
*/
struct nes_alloc_ucontext_req {
__u32 reserved32;
__u8 userspace_ver;
__u8 reserved8[3];
};
struct nes_alloc_ucontext_resp {
__u32 max_pds; /* maximum pds allowed for this user process */
__u32 max_qps; /* maximum qps allowed for this user process */
__u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
__u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */
__u8 kernel_ver;
__u8 reserved[2];
};
struct nes_alloc_pd_resp {
__u32 pd_id;
__u32 mmap_db_index;
};
struct nes_create_cq_req {
__u64 user_cq_buffer;
__u32 mcrqf;
__u8 reserved[4];
};
struct nes_create_qp_req {
__u64 user_wqe_buffers;
};
enum iwnes_memreg_type {
IWNES_MEMREG_TYPE_MEM = 0x0000,
IWNES_MEMREG_TYPE_QP = 0x0001,
IWNES_MEMREG_TYPE_CQ = 0x0002,
IWNES_MEMREG_TYPE_MW = 0x0003,
IWNES_MEMREG_TYPE_FMR = 0x0004,
};
struct nes_mem_reg_req {
__u32 reg_type; /* indicates if id is memory, QP or CQ */
__u32 reserved;
};
struct nes_create_cq_resp {
__u32 cq_id;
__u32 cq_size;
__u32 mmap_db_index;
__u32 reserved;
};
struct nes_create_qp_resp {
__u32 qp_id;
__u32 actual_sq_size;
__u32 actual_rq_size;
__u32 mmap_sq_db_index;
__u32 mmap_rq_db_index;
__u32 nes_drv_opt;
};
#endif /* NES_USER_H */
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef NES_VERBS_H
#define NES_VERBS_H
struct nes_device;
#define NES_MAX_USER_DB_REGIONS 4096
#define NES_MAX_USER_WQ_REGIONS 4096
struct nes_ucontext {
struct ib_ucontext ibucontext;
struct nes_device *nesdev;
unsigned long mmap_wq_offset;
unsigned long mmap_cq_offset; /* to be removed */
int index; /* rnic index (minor) */
unsigned long allocated_doorbells[BITS_TO_LONGS(NES_MAX_USER_DB_REGIONS)];
u16 mmap_db_index[NES_MAX_USER_DB_REGIONS];
u16 first_free_db;
unsigned long allocated_wqs[BITS_TO_LONGS(NES_MAX_USER_WQ_REGIONS)];
struct nes_qp *mmap_nesqp[NES_MAX_USER_WQ_REGIONS];
u16 first_free_wq;
struct list_head cq_reg_mem_list;
struct list_head qp_reg_mem_list;
u32 mcrqf;
atomic_t usecnt;
};
struct nes_pd {
struct ib_pd ibpd;
u16 pd_id;
atomic_t sqp_count;
u16 mmap_db_index;
};
struct nes_mr {
union {
struct ib_mr ibmr;
struct ib_mw ibmw;
struct ib_fmr ibfmr;
};
struct ib_umem *region;
u16 pbls_used;
u8 mode;
u8 pbl_4k;
};
struct nes_hw_pb {
__le32 pa_low;
__le32 pa_high;
};
struct nes_vpbl {
dma_addr_t pbl_pbase;
struct nes_hw_pb *pbl_vbase;
};
struct nes_root_vpbl {
dma_addr_t pbl_pbase;
struct nes_hw_pb *pbl_vbase;
struct nes_vpbl *leaf_vpbl;
};
struct nes_fmr {
struct nes_mr nesmr;
u32 leaf_pbl_cnt;
struct nes_root_vpbl root_vpbl;
struct ib_qp *ib_qp;
int access_rights;
struct ib_fmr_attr attr;
};
struct nes_av;
struct nes_cq {
struct ib_cq ibcq;
struct nes_hw_cq hw_cq;
u32 polled_completions;
u32 cq_mem_size;
spinlock_t lock;
u8 virtual_cq;
u8 pad[3];
};
struct nes_wq {
spinlock_t lock;
};
struct iw_cm_id;
struct ietf_mpa_frame;
struct nes_qp {
struct ib_qp ibqp;
void *allocated_buffer;
struct iw_cm_id *cm_id;
struct workqueue_struct *wq;
struct work_struct disconn_work;
struct nes_cq *nesscq;
struct nes_cq *nesrcq;
struct nes_pd *nespd;
void *cm_node; /* handle of the node this QP is associated with */
struct ietf_mpa_frame *ietf_frame;
dma_addr_t ietf_frame_pbase;
wait_queue_head_t state_waitq;
unsigned long socket;
struct nes_hw_qp hwqp;
struct work_struct work;
struct work_struct ae_work;
enum ib_qp_state ibqp_state;
u32 iwarp_state;
u32 hte_index;
u32 last_aeq;
u32 qp_mem_size;
atomic_t refcount;
atomic_t close_timer_started;
u32 mmap_sq_db_index;
u32 mmap_rq_db_index;
spinlock_t lock;
struct nes_qp_context *nesqp_context;
dma_addr_t nesqp_context_pbase;
void *pbl_vbase;
dma_addr_t pbl_pbase;
struct page *page;
wait_queue_head_t kick_waitq;
u16 in_disconnect;
u16 private_data_len;
u8 active_conn;
u8 skip_lsmm;
u8 user_mode;
u8 hte_added;
u8 hw_iwarp_state;
u8 flush_issued;
u8 hw_tcp_state;
u8 disconn_pending;
u8 destroyed;
};
#endif /* NES_VERBS_H */
......@@ -680,12 +680,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
neigh = *to_ipoib_neigh(skb->dst->neighbour);
if (ipoib_cm_get(neigh)) {
if (ipoib_cm_up(neigh)) {
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
goto out;
}
} else if (neigh->ah) {
if (neigh->ah)
if (unlikely((memcmp(&neigh->dgid.raw,
skb->dst->neighbour->ha + 4,
sizeof(union ib_gid))) ||
......@@ -706,6 +701,12 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
if (ipoib_cm_get(neigh)) {
if (ipoib_cm_up(neigh)) {
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
goto out;
}
} else if (neigh->ah) {
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
goto out;
}
......@@ -813,11 +814,9 @@ static void ipoib_neigh_cleanup(struct neighbour *n)
struct ipoib_ah *ah = NULL;
neigh = *to_ipoib_neigh(n);
if (neigh) {
if (neigh)
priv = netdev_priv(neigh->dev);
ipoib_dbg(priv, "neigh_destructor for bonding device: %s\n",
n->dev->name);
} else
else
return;
ipoib_dbg(priv,
"neigh_cleanup for %06x " IPOIB_GID_FMT "\n",
......
......@@ -204,6 +204,22 @@ out:
return ret;
}
static int srp_new_cm_id(struct srp_target_port *target)
{
struct ib_cm_id *new_cm_id;
new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
srp_cm_handler, target);
if (IS_ERR(new_cm_id))
return PTR_ERR(new_cm_id);
if (target->cm_id)
ib_destroy_cm_id(target->cm_id);
target->cm_id = new_cm_id;
return 0;
}
static int srp_create_target_ib(struct srp_target_port *target)
{
struct ib_qp_init_attr *init_attr;
......@@ -436,6 +452,7 @@ static void srp_remove_work(struct work_struct *work)
static int srp_connect_target(struct srp_target_port *target)
{
int retries = 3;
int ret;
ret = srp_lookup_path(target);
......@@ -468,6 +485,21 @@ static int srp_connect_target(struct srp_target_port *target)
case SRP_DLID_REDIRECT:
break;
case SRP_STALE_CONN:
/* Our current CM id was stale, and is now in timewait.
* Try to reconnect with a new one.
*/
if (!retries-- || srp_new_cm_id(target)) {
shost_printk(KERN_ERR, target->scsi_host, PFX
"giving up on stale connection\n");
target->status = -ECONNRESET;
return target->status;
}
shost_printk(KERN_ERR, target->scsi_host, PFX
"retrying stale connection\n");
break;
default:
return target->status;
}
......@@ -507,7 +539,6 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re
static int srp_reconnect_target(struct srp_target_port *target)
{
struct ib_cm_id *new_cm_id;
struct ib_qp_attr qp_attr;
struct srp_request *req, *tmp;
struct ib_wc wc;
......@@ -526,14 +557,9 @@ static int srp_reconnect_target(struct srp_target_port *target)
* Now get a new local CM ID so that we avoid confusing the
* target in case things are really fouled up.
*/
new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
srp_cm_handler, target);
if (IS_ERR(new_cm_id)) {
ret = PTR_ERR(new_cm_id);
ret = srp_new_cm_id(target);
if (ret)
goto err;
}
ib_destroy_cm_id(target->cm_id);
target->cm_id = new_cm_id;
qp_attr.qp_state = IB_QPS_RESET;
ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
......@@ -1171,6 +1197,11 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
target->status = -ECONNRESET;
break;
case IB_CM_REJ_STALE_CONN:
shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
target->status = SRP_STALE_CONN;
break;
default:
shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
event->param.rej_rcvd.reason);
......@@ -1862,11 +1893,9 @@ static ssize_t srp_create_target(struct class_device *class_dev,
if (ret)
goto err;
target->cm_id = ib_create_cm_id(host->dev->dev, srp_cm_handler, target);
if (IS_ERR(target->cm_id)) {
ret = PTR_ERR(target->cm_id);
ret = srp_new_cm_id(target);
if (ret)
goto err_free;
}
target->qp_in_error = 0;
ret = srp_connect_target(target);
......
......@@ -54,6 +54,7 @@ enum {
SRP_PORT_REDIRECT = 1,
SRP_DLID_REDIRECT = 2,
SRP_STALE_CONN = 3,
SRP_MAX_LUN = 512,
SRP_DEF_SG_TABLESIZE = 12,
......
......@@ -617,9 +617,6 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
int err;
#define QUERY_ADAPTER_OUT_SIZE 0x100
#define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00
#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04
#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
#define QUERY_ADAPTER_VSD_OFFSET 0x20
......@@ -633,9 +630,6 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
if (err)
goto out;
MLX4_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
MLX4_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
MLX4_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
......
......@@ -99,9 +99,6 @@ struct mlx4_dev_cap {
};
struct mlx4_adapter {
u32 vendor_id;
u32 device_id;
u32 revision_id;
char board_id[MLX4_BOARD_ID_LEN];
u8 inta_pin;
};
......
......@@ -71,7 +71,7 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#endif /* CONFIG_PCI_MSI */
static const char mlx4_version[] __devinitdata =
static char mlx4_version[] __devinitdata =
DRV_NAME ": Mellanox ConnectX core driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
......@@ -163,7 +163,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return 0;
}
static int __devinit mlx4_load_fw(struct mlx4_dev *dev)
static int mlx4_load_fw(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
......@@ -197,7 +197,7 @@ err_free:
return err;
}
static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
int cmpt_entry_sz)
{
struct mlx4_priv *priv = mlx4_priv(dev);
......@@ -534,7 +534,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
}
priv->eq_table.inta_pin = adapter.inta_pin;
dev->rev_id = adapter.revision_id;
memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
return 0;
......@@ -688,7 +687,7 @@ err_uar_table_free:
return err;
}
static void __devinit mlx4_enable_msi_x(struct mlx4_dev *dev)
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry entries[MLX4_NUM_EQ];
......
......@@ -122,7 +122,7 @@ static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
spin_unlock(&buddy->lock);
}
static int __devinit mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
{
int i, s;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment