Commit 5367f2d6 authored by Linus Torvalds's avatar Linus Torvalds
parents 64ca9004 4f8448df
...@@ -308,10 +308,11 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv) ...@@ -308,10 +308,11 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
static int next_id;
do { do {
spin_lock_irqsave(&cm.lock, flags); spin_lock_irqsave(&cm.lock, flags);
ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1, ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++,
(__force int *) &cm_id_priv->id.local_id); (__force int *) &cm_id_priv->id.local_id);
spin_unlock_irqrestore(&cm.lock, flags); spin_unlock_irqrestore(&cm.lock, flags);
} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
...@@ -684,6 +685,13 @@ retest: ...@@ -684,6 +685,13 @@ retest:
cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
break; break;
case IB_CM_REQ_SENT: case IB_CM_REQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
&cm_id_priv->av.port->cm_dev->ca_guid,
sizeof cm_id_priv->av.port->cm_dev->ca_guid,
NULL, 0);
break;
case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REQ_RCVD:
case IB_CM_REP_SENT: case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD: case IB_CM_MRA_REP_RCVD:
...@@ -694,10 +702,8 @@ retest: ...@@ -694,10 +702,8 @@ retest:
case IB_CM_REP_RCVD: case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT: case IB_CM_MRA_REP_SENT:
spin_unlock_irqrestore(&cm_id_priv->lock, flags); spin_unlock_irqrestore(&cm_id_priv->lock, flags);
ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
&cm_id_priv->av.port->cm_dev->ca_guid, NULL, 0, NULL, 0);
sizeof cm_id_priv->av.port->cm_dev->ca_guid,
NULL, 0);
break; break;
case IB_CM_ESTABLISHED: case IB_CM_ESTABLISHED:
spin_unlock_irqrestore(&cm_id_priv->lock, flags); spin_unlock_irqrestore(&cm_id_priv->lock, flags);
......
...@@ -197,8 +197,8 @@ static void send_handler(struct ib_mad_agent *agent, ...@@ -197,8 +197,8 @@ static void send_handler(struct ib_mad_agent *agent,
memcpy(timeout->mad.data, packet->mad.data, memcpy(timeout->mad.data, packet->mad.data,
sizeof (struct ib_mad_hdr)); sizeof (struct ib_mad_hdr));
if (!queue_packet(file, agent, timeout)) if (queue_packet(file, agent, timeout))
return; kfree(timeout);
} }
out: out:
kfree(packet); kfree(packet);
......
...@@ -489,6 +489,7 @@ err_idr: ...@@ -489,6 +489,7 @@ err_idr:
err_unreg: err_unreg:
ib_dereg_mr(mr); ib_dereg_mr(mr);
atomic_dec(&pd->usecnt);
err_up: err_up:
up(&ib_uverbs_idr_mutex); up(&ib_uverbs_idr_mutex);
...@@ -593,13 +594,18 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, ...@@ -593,13 +594,18 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
if (cmd.comp_vector >= file->device->num_comp_vectors) if (cmd.comp_vector >= file->device->num_comp_vectors)
return -EINVAL; return -EINVAL;
if (cmd.comp_channel >= 0)
ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
uobj = kmalloc(sizeof *uobj, GFP_KERNEL); uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
if (!uobj) if (!uobj)
return -ENOMEM; return -ENOMEM;
if (cmd.comp_channel >= 0) {
ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
if (!ev_file) {
ret = -EINVAL;
goto err;
}
}
uobj->uobject.user_handle = cmd.user_handle; uobj->uobject.user_handle = cmd.user_handle;
uobj->uobject.context = file->ucontext; uobj->uobject.context = file->ucontext;
uobj->uverbs_file = file; uobj->uverbs_file = file;
...@@ -663,6 +669,8 @@ err_up: ...@@ -663,6 +669,8 @@ err_up:
ib_destroy_cq(cq); ib_destroy_cq(cq);
err: err:
if (ev_file)
ib_uverbs_release_ucq(file, ev_file, uobj);
kfree(uobj); kfree(uobj);
return ret; return ret;
} }
...@@ -935,6 +943,11 @@ err_idr: ...@@ -935,6 +943,11 @@ err_idr:
err_destroy: err_destroy:
ib_destroy_qp(qp); ib_destroy_qp(qp);
atomic_dec(&pd->usecnt);
atomic_dec(&attr.send_cq->usecnt);
atomic_dec(&attr.recv_cq->usecnt);
if (attr.srq)
atomic_dec(&attr.srq->usecnt);
err_up: err_up:
up(&ib_uverbs_idr_mutex); up(&ib_uverbs_idr_mutex);
...@@ -1448,6 +1461,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, ...@@ -1448,6 +1461,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
attr.sl = cmd.attr.sl; attr.sl = cmd.attr.sl;
attr.src_path_bits = cmd.attr.src_path_bits; attr.src_path_bits = cmd.attr.src_path_bits;
attr.static_rate = cmd.attr.static_rate; attr.static_rate = cmd.attr.static_rate;
attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
attr.port_num = cmd.attr.port_num; attr.port_num = cmd.attr.port_num;
attr.grh.flow_label = cmd.attr.grh.flow_label; attr.grh.flow_label = cmd.attr.grh.flow_label;
attr.grh.sgid_index = cmd.attr.grh.sgid_index; attr.grh.sgid_index = cmd.attr.grh.sgid_index;
...@@ -1729,6 +1743,7 @@ err_idr: ...@@ -1729,6 +1743,7 @@ err_idr:
err_destroy: err_destroy:
ib_destroy_srq(srq); ib_destroy_srq(srq);
atomic_dec(&pd->usecnt);
err_up: err_up:
up(&ib_uverbs_idr_mutex); up(&ib_uverbs_idr_mutex);
......
...@@ -107,9 +107,9 @@ struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, ...@@ -107,9 +107,9 @@ struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
if (wc->wc_flags & IB_WC_GRH) { if (wc->wc_flags & IB_WC_GRH) {
ah_attr.ah_flags = IB_AH_GRH; ah_attr.ah_flags = IB_AH_GRH;
ah_attr.grh.dgid = grh->dgid; ah_attr.grh.dgid = grh->sgid;
ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num, ret = ib_find_cached_gid(pd->device, &grh->dgid, &port_num,
&gid_index); &gid_index);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
......
...@@ -937,10 +937,6 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, ...@@ -937,10 +937,6 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
if (err) if (err)
goto out; goto out;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
dev_lim->max_srq_sz = (1 << field) - 1;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
dev_lim->max_qp_sz = (1 << field) - 1;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET);
dev_lim->reserved_qps = 1 << (field & 0xf); dev_lim->reserved_qps = 1 << (field & 0xf);
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET);
...@@ -1056,6 +1052,10 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, ...@@ -1056,6 +1052,10 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags); mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags);
if (mthca_is_memfree(dev)) { if (mthca_is_memfree(dev)) {
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
dev_lim->max_srq_sz = 1 << field;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
dev_lim->max_qp_sz = 1 << field;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET);
dev_lim->hca.arbel.resize_srq = field & 1; dev_lim->hca.arbel.resize_srq = field & 1;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
...@@ -1087,6 +1087,10 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, ...@@ -1087,6 +1087,10 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
mthca_dbg(dev, "Max ICM size %lld MB\n", mthca_dbg(dev, "Max ICM size %lld MB\n",
(unsigned long long) dev_lim->hca.arbel.max_icm_sz >> 20); (unsigned long long) dev_lim->hca.arbel.max_icm_sz >> 20);
} else { } else {
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
dev_lim->max_srq_sz = (1 << field) - 1;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
dev_lim->max_qp_sz = (1 << field) - 1;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET);
dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f); dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f);
dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE; dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE;
......
...@@ -128,12 +128,12 @@ struct mthca_err_cqe { ...@@ -128,12 +128,12 @@ struct mthca_err_cqe {
__be32 my_qpn; __be32 my_qpn;
u32 reserved1[3]; u32 reserved1[3];
u8 syndrome; u8 syndrome;
u8 reserved2; u8 vendor_err;
__be16 db_cnt; __be16 db_cnt;
u32 reserved3; u32 reserved2;
__be32 wqe; __be32 wqe;
u8 opcode; u8 opcode;
u8 reserved4[2]; u8 reserved3[2];
u8 owner; u8 owner;
}; };
...@@ -253,6 +253,15 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, ...@@ -253,6 +253,15 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
wake_up(&cq->wait); wake_up(&cq->wait);
} }
static inline int is_recv_cqe(struct mthca_cqe *cqe)
{
if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
MTHCA_ERROR_CQE_OPCODE_MASK)
return !(cqe->opcode & 0x01);
else
return !(cqe->is_send & 0x80);
}
void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
struct mthca_srq *srq) struct mthca_srq *srq)
{ {
...@@ -296,7 +305,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, ...@@ -296,7 +305,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
while ((int) --prod_index - (int) cq->cons_index >= 0) { while ((int) --prod_index - (int) cq->cons_index >= 0) {
cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
if (cqe->my_qpn == cpu_to_be32(qpn)) { if (cqe->my_qpn == cpu_to_be32(qpn)) {
if (srq) if (srq && is_recv_cqe(cqe))
mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
++nfreed; ++nfreed;
} else if (nfreed) } else if (nfreed)
...@@ -333,8 +342,8 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, ...@@ -333,8 +342,8 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
} }
/* /*
* For completions in error, only work request ID, status (and * For completions in error, only work request ID, status, vendor error
* freed resource count for RD) have to be set. * (and freed resource count for RD) have to be set.
*/ */
switch (cqe->syndrome) { switch (cqe->syndrome) {
case SYNDROME_LOCAL_LENGTH_ERR: case SYNDROME_LOCAL_LENGTH_ERR:
...@@ -396,6 +405,8 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, ...@@ -396,6 +405,8 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
break; break;
} }
entry->vendor_err = cqe->vendor_err;
/* /*
* Mem-free HCAs always generate one CQE per WQE, even in the * Mem-free HCAs always generate one CQE per WQE, even in the
* error case, so we don't have to check the doorbell count, etc. * error case, so we don't have to check the doorbell count, etc.
......
...@@ -484,8 +484,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, ...@@ -484,8 +484,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
u8 intr, u8 intr,
struct mthca_eq *eq) struct mthca_eq *eq)
{ {
int npages = (nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / int npages;
PAGE_SIZE;
u64 *dma_list = NULL; u64 *dma_list = NULL;
dma_addr_t t; dma_addr_t t;
struct mthca_mailbox *mailbox; struct mthca_mailbox *mailbox;
...@@ -496,6 +495,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, ...@@ -496,6 +495,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
eq->dev = dev; eq->dev = dev;
eq->nent = roundup_pow_of_two(max(nent, 2)); eq->nent = roundup_pow_of_two(max(nent, 2));
npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
eq->page_list = kmalloc(npages * sizeof *eq->page_list, eq->page_list = kmalloc(npages * sizeof *eq->page_list,
GFP_KERNEL); GFP_KERNEL);
......
...@@ -261,6 +261,10 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev) ...@@ -261,6 +261,10 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
} }
err = mthca_dev_lim(mdev, &dev_lim); err = mthca_dev_lim(mdev, &dev_lim);
if (err) {
mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
goto err_disable;
}
profile = default_profile; profile = default_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE; profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
......
...@@ -111,7 +111,8 @@ static int find_mgm(struct mthca_dev *dev, ...@@ -111,7 +111,8 @@ static int find_mgm(struct mthca_dev *dev,
goto out; goto out;
if (status) { if (status) {
mthca_err(dev, "READ_MGM returned status %02x\n", status); mthca_err(dev, "READ_MGM returned status %02x\n", status);
return -EINVAL; err = -EINVAL;
goto out;
} }
if (!memcmp(mgm->gid, zero_gid, 16)) { if (!memcmp(mgm->gid, zero_gid, 16)) {
...@@ -126,7 +127,7 @@ static int find_mgm(struct mthca_dev *dev, ...@@ -126,7 +127,7 @@ static int find_mgm(struct mthca_dev *dev,
goto out; goto out;
*prev = *index; *prev = *index;
*index = be32_to_cpu(mgm->next_gid_index) >> 5; *index = be32_to_cpu(mgm->next_gid_index) >> 6;
} while (*index); } while (*index);
*index = -1; *index = -1;
...@@ -153,8 +154,10 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -153,8 +154,10 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
mgm = mailbox->buf; mgm = mailbox->buf;
if (down_interruptible(&dev->mcg_table.sem)) if (down_interruptible(&dev->mcg_table.sem)) {
return -EINTR; err = -EINTR;
goto err_sem;
}
err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
if (err) if (err)
...@@ -181,9 +184,8 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -181,9 +184,8 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
memset(mgm, 0, sizeof *mgm);
memcpy(mgm->gid, gid->raw, 16); memcpy(mgm->gid, gid->raw, 16);
mgm->next_gid_index = 0;
} }
for (i = 0; i < MTHCA_QP_PER_MGM; ++i) for (i = 0; i < MTHCA_QP_PER_MGM; ++i)
...@@ -209,6 +211,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -209,6 +211,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (status) { if (status) {
mthca_err(dev, "WRITE_MGM returned status %02x\n", status); mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
err = -EINVAL; err = -EINVAL;
goto out;
} }
if (!link) if (!link)
...@@ -223,7 +226,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -223,7 +226,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
goto out; goto out;
} }
mgm->next_gid_index = cpu_to_be32(index << 5); mgm->next_gid_index = cpu_to_be32(index << 6);
err = mthca_WRITE_MGM(dev, prev, mailbox, &status); err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
if (err) if (err)
...@@ -234,7 +237,12 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -234,7 +237,12 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
} }
out: out:
if (err && link && index != -1) {
BUG_ON(index < dev->limits.num_mgms);
mthca_free(&dev->mcg_table.alloc, index);
}
up(&dev->mcg_table.sem); up(&dev->mcg_table.sem);
err_sem:
mthca_free_mailbox(dev, mailbox); mthca_free_mailbox(dev, mailbox);
return err; return err;
} }
...@@ -255,8 +263,10 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -255,8 +263,10 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
mgm = mailbox->buf; mgm = mailbox->buf;
if (down_interruptible(&dev->mcg_table.sem)) if (down_interruptible(&dev->mcg_table.sem)) {
return -EINTR; err = -EINTR;
goto err_sem;
}
err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
if (err) if (err)
...@@ -305,13 +315,11 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -305,13 +315,11 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (i != 1) if (i != 1)
goto out; goto out;
goto out;
if (prev == -1) { if (prev == -1) {
/* Remove entry from MGM */ /* Remove entry from MGM */
if (be32_to_cpu(mgm->next_gid_index) >> 5) { int amgm_index_to_free = be32_to_cpu(mgm->next_gid_index) >> 6;
err = mthca_READ_MGM(dev, if (amgm_index_to_free) {
be32_to_cpu(mgm->next_gid_index) >> 5, err = mthca_READ_MGM(dev, amgm_index_to_free,
mailbox, &status); mailbox, &status);
if (err) if (err)
goto out; goto out;
...@@ -332,9 +340,13 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -332,9 +340,13 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
if (amgm_index_to_free) {
BUG_ON(amgm_index_to_free < dev->limits.num_mgms);
mthca_free(&dev->mcg_table.alloc, amgm_index_to_free);
}
} else { } else {
/* Remove entry from AMGM */ /* Remove entry from AMGM */
index = be32_to_cpu(mgm->next_gid_index) >> 5; int curr_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
err = mthca_READ_MGM(dev, prev, mailbox, &status); err = mthca_READ_MGM(dev, prev, mailbox, &status);
if (err) if (err)
goto out; goto out;
...@@ -344,7 +356,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -344,7 +356,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
goto out; goto out;
} }
mgm->next_gid_index = cpu_to_be32(index << 5); mgm->next_gid_index = cpu_to_be32(curr_next_index << 6);
err = mthca_WRITE_MGM(dev, prev, mailbox, &status); err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
if (err) if (err)
...@@ -354,10 +366,13 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -354,10 +366,13 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
BUG_ON(index < dev->limits.num_mgms);
mthca_free(&dev->mcg_table.alloc, index);
} }
out: out:
up(&dev->mcg_table.sem); up(&dev->mcg_table.sem);
err_sem:
mthca_free_mailbox(dev, mailbox); mthca_free_mailbox(dev, mailbox);
return err; return err;
} }
...@@ -365,11 +380,12 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -365,11 +380,12 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int __devinit mthca_init_mcg_table(struct mthca_dev *dev) int __devinit mthca_init_mcg_table(struct mthca_dev *dev)
{ {
int err; int err;
int table_size = dev->limits.num_mgms + dev->limits.num_amgms;
err = mthca_alloc_init(&dev->mcg_table.alloc, err = mthca_alloc_init(&dev->mcg_table.alloc,
dev->limits.num_amgms, table_size,
dev->limits.num_amgms - 1, table_size - 1,
0); dev->limits.num_mgms);
if (err) if (err)
return err; return err;
......
...@@ -233,7 +233,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj) ...@@ -233,7 +233,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj)
for (i = 0; i < chunk->npages; ++i) { for (i = 0; i < chunk->npages; ++i) {
if (chunk->mem[i].length >= offset) { if (chunk->mem[i].length >= offset) {
page = chunk->mem[i].page; page = chunk->mem[i].page;
break; goto out;
} }
offset -= chunk->mem[i].length; offset -= chunk->mem[i].length;
} }
...@@ -485,6 +485,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, ...@@ -485,6 +485,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
put_page(db_tab->page[i].mem.page); put_page(db_tab->page[i].mem.page);
} }
} }
kfree(db_tab);
} }
int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
......
This diff is collapsed.
...@@ -201,7 +201,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, ...@@ -201,7 +201,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
if (mthca_is_memfree(dev)) if (mthca_is_memfree(dev))
srq->max = roundup_pow_of_two(srq->max + 1); srq->max = roundup_pow_of_two(srq->max + 1);
ds = min(64UL, ds = max(64UL,
roundup_pow_of_two(sizeof (struct mthca_next_seg) + roundup_pow_of_two(sizeof (struct mthca_next_seg) +
srq->max_gs * sizeof (struct mthca_data_seg))); srq->max_gs * sizeof (struct mthca_data_seg)));
srq->wqe_shift = long_log2(ds); srq->wqe_shift = long_log2(ds);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment