Commit 373d9915 authored by Ralph Campbell's avatar Ralph Campbell Committed by Roland Dreier

IB/ipath: Performance improvements via mmap of queues

Improve performance of userspace post receive, post SRQ receive, and
poll CQ operations for ipath by allowing userspace to directly mmap()
receive queues and completion queues.  This eliminates the copying
between userspace and the kernel in the data path.
Signed-off-by: default avatarRalph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 9bc57e2d
...@@ -25,6 +25,7 @@ ib_ipath-y := \ ...@@ -25,6 +25,7 @@ ib_ipath-y := \
ipath_cq.o \ ipath_cq.o \
ipath_keys.o \ ipath_keys.o \
ipath_mad.o \ ipath_mad.o \
ipath_mmap.o \
ipath_mr.o \ ipath_mr.o \
ipath_qp.o \ ipath_qp.o \
ipath_rc.o \ ipath_rc.o \
......
...@@ -42,20 +42,28 @@ ...@@ -42,20 +42,28 @@
* @entry: work completion entry to add * @entry: work completion entry to add
* @sig: true if @entry is a solicitated entry * @sig: true if @entry is a solicitated entry
* *
* This may be called with one of the qp->s_lock or qp->r_rq.lock held. * This may be called with qp->s_lock held.
*/ */
void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
{ {
struct ipath_cq_wc *wc = cq->queue;
unsigned long flags; unsigned long flags;
u32 head;
u32 next; u32 next;
spin_lock_irqsave(&cq->lock, flags); spin_lock_irqsave(&cq->lock, flags);
if (cq->head == cq->ibcq.cqe) /*
* Note that the head pointer might be writable by user processes.
* Take care to verify it is a sane value.
*/
head = wc->head;
if (head >= (unsigned) cq->ibcq.cqe) {
head = cq->ibcq.cqe;
next = 0; next = 0;
else } else
next = cq->head + 1; next = head + 1;
if (unlikely(next == cq->tail)) { if (unlikely(next == wc->tail)) {
spin_unlock_irqrestore(&cq->lock, flags); spin_unlock_irqrestore(&cq->lock, flags);
if (cq->ibcq.event_handler) { if (cq->ibcq.event_handler) {
struct ib_event ev; struct ib_event ev;
...@@ -67,8 +75,8 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) ...@@ -67,8 +75,8 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
} }
return; return;
} }
cq->queue[cq->head] = *entry; wc->queue[head] = *entry;
cq->head = next; wc->head = next;
if (cq->notify == IB_CQ_NEXT_COMP || if (cq->notify == IB_CQ_NEXT_COMP ||
(cq->notify == IB_CQ_SOLICITED && solicited)) { (cq->notify == IB_CQ_SOLICITED && solicited)) {
...@@ -101,19 +109,20 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) ...@@ -101,19 +109,20 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
{ {
struct ipath_cq *cq = to_icq(ibcq); struct ipath_cq *cq = to_icq(ibcq);
struct ipath_cq_wc *wc = cq->queue;
unsigned long flags; unsigned long flags;
int npolled; int npolled;
spin_lock_irqsave(&cq->lock, flags); spin_lock_irqsave(&cq->lock, flags);
for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
if (cq->tail == cq->head) if (wc->tail == wc->head)
break; break;
*entry = cq->queue[cq->tail]; *entry = wc->queue[wc->tail];
if (cq->tail == cq->ibcq.cqe) if (wc->tail >= cq->ibcq.cqe)
cq->tail = 0; wc->tail = 0;
else else
cq->tail++; wc->tail++;
} }
spin_unlock_irqrestore(&cq->lock, flags); spin_unlock_irqrestore(&cq->lock, flags);
...@@ -160,38 +169,74 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, ...@@ -160,38 +169,74 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
{ {
struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_ibdev *dev = to_idev(ibdev);
struct ipath_cq *cq; struct ipath_cq *cq;
struct ib_wc *wc; struct ipath_cq_wc *wc;
struct ib_cq *ret; struct ib_cq *ret;
if (entries > ib_ipath_max_cqes) { if (entries > ib_ipath_max_cqes) {
ret = ERR_PTR(-EINVAL); ret = ERR_PTR(-EINVAL);
goto bail; goto done;
} }
if (dev->n_cqs_allocated == ib_ipath_max_cqs) { if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto bail; goto done;
} }
/* /* Allocate the completion queue structure. */
* Need to use vmalloc() if we want to support large #s of
* entries.
*/
cq = kmalloc(sizeof(*cq), GFP_KERNEL); cq = kmalloc(sizeof(*cq), GFP_KERNEL);
if (!cq) { if (!cq) {
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto bail; goto done;
} }
/* /*
* Need to use vmalloc() if we want to support large #s of entries. * Allocate the completion queue entries and head/tail pointers.
* This is allocated separately so that it can be resized and
* also mapped into user space.
* We need to use vmalloc() in order to support mmap and large
* numbers of entries.
*/ */
wc = vmalloc(sizeof(*wc) * (entries + 1)); wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * entries);
if (!wc) { if (!wc) {
kfree(cq);
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto bail; goto bail_cq;
}
/*
* Return the address of the WC as the offset to mmap.
* See ipath_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
struct ipath_mmap_info *ip;
__u64 offset = (__u64) wc;
int err;
err = ib_copy_to_udata(udata, &offset, sizeof(offset));
if (err) {
ret = ERR_PTR(err);
goto bail_wc;
}
/* Allocate info for ipath_mmap(). */
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
if (!ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wc;
} }
cq->ip = ip;
ip->context = context;
ip->obj = wc;
kref_init(&ip->ref);
ip->mmap_cnt = 0;
ip->size = PAGE_ALIGN(sizeof(*wc) +
sizeof(struct ib_wc) * entries);
spin_lock_irq(&dev->pending_lock);
ip->next = dev->pending_mmaps;
dev->pending_mmaps = ip;
spin_unlock_irq(&dev->pending_lock);
} else
cq->ip = NULL;
/* /*
* ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
* The number of entries should be >= the number requested or return * The number of entries should be >= the number requested or return
...@@ -202,15 +247,22 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, ...@@ -202,15 +247,22 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
cq->triggered = 0; cq->triggered = 0;
spin_lock_init(&cq->lock); spin_lock_init(&cq->lock);
tasklet_init(&cq->comptask, send_complete, (unsigned long)cq); tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
cq->head = 0; wc->head = 0;
cq->tail = 0; wc->tail = 0;
cq->queue = wc; cq->queue = wc;
ret = &cq->ibcq; ret = &cq->ibcq;
dev->n_cqs_allocated++; dev->n_cqs_allocated++;
goto done;
bail: bail_wc:
vfree(wc);
bail_cq:
kfree(cq);
done:
return ret; return ret;
} }
...@@ -229,6 +281,9 @@ int ipath_destroy_cq(struct ib_cq *ibcq) ...@@ -229,6 +281,9 @@ int ipath_destroy_cq(struct ib_cq *ibcq)
tasklet_kill(&cq->comptask); tasklet_kill(&cq->comptask);
dev->n_cqs_allocated--; dev->n_cqs_allocated--;
if (cq->ip)
kref_put(&cq->ip->ref, ipath_release_mmap_info);
else
vfree(cq->queue); vfree(cq->queue);
kfree(cq); kfree(cq);
...@@ -253,7 +308,7 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) ...@@ -253,7 +308,7 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
spin_lock_irqsave(&cq->lock, flags); spin_lock_irqsave(&cq->lock, flags);
/* /*
* Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
* any other transitions. * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
*/ */
if (cq->notify != IB_CQ_NEXT_COMP) if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = notify; cq->notify = notify;
...@@ -264,46 +319,81 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) ...@@ -264,46 +319,81 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
{ {
struct ipath_cq *cq = to_icq(ibcq); struct ipath_cq *cq = to_icq(ibcq);
struct ib_wc *wc, *old_wc; struct ipath_cq_wc *old_wc = cq->queue;
u32 n; struct ipath_cq_wc *wc;
u32 head, tail, n;
int ret; int ret;
/* /*
* Need to use vmalloc() if we want to support large #s of entries. * Need to use vmalloc() if we want to support large #s of entries.
*/ */
wc = vmalloc(sizeof(*wc) * (cqe + 1)); wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * cqe);
if (!wc) { if (!wc) {
ret = -ENOMEM; ret = -ENOMEM;
goto bail; goto bail;
} }
/*
* Return the address of the WC as the offset to mmap.
* See ipath_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
__u64 offset = (__u64) wc;
ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
if (ret)
goto bail;
}
spin_lock_irq(&cq->lock); spin_lock_irq(&cq->lock);
if (cq->head < cq->tail) /*
n = cq->ibcq.cqe + 1 + cq->head - cq->tail; * Make sure head and tail are sane since they
* might be user writable.
*/
head = old_wc->head;
if (head > (u32) cq->ibcq.cqe)
head = (u32) cq->ibcq.cqe;
tail = old_wc->tail;
if (tail > (u32) cq->ibcq.cqe)
tail = (u32) cq->ibcq.cqe;
if (head < tail)
n = cq->ibcq.cqe + 1 + head - tail;
else else
n = cq->head - cq->tail; n = head - tail;
if (unlikely((u32)cqe < n)) { if (unlikely((u32)cqe < n)) {
spin_unlock_irq(&cq->lock); spin_unlock_irq(&cq->lock);
vfree(wc); vfree(wc);
ret = -EOVERFLOW; ret = -EOVERFLOW;
goto bail; goto bail;
} }
for (n = 0; cq->tail != cq->head; n++) { for (n = 0; tail != head; n++) {
wc[n] = cq->queue[cq->tail]; wc->queue[n] = old_wc->queue[tail];
if (cq->tail == cq->ibcq.cqe) if (tail == (u32) cq->ibcq.cqe)
cq->tail = 0; tail = 0;
else else
cq->tail++; tail++;
} }
cq->ibcq.cqe = cqe; cq->ibcq.cqe = cqe;
cq->head = n; wc->head = n;
cq->tail = 0; wc->tail = 0;
old_wc = cq->queue;
cq->queue = wc; cq->queue = wc;
spin_unlock_irq(&cq->lock); spin_unlock_irq(&cq->lock);
vfree(old_wc); vfree(old_wc);
if (cq->ip) {
struct ipath_ibdev *dev = to_idev(ibcq->device);
struct ipath_mmap_info *ip = cq->ip;
ip->obj = wc;
ip->size = PAGE_ALIGN(sizeof(*wc) +
sizeof(struct ib_wc) * cqe);
spin_lock_irq(&dev->pending_lock);
ip->next = dev->pending_mmaps;
dev->pending_mmaps = ip;
spin_unlock_irq(&dev->pending_lock);
}
ret = 0; ret = 0;
bail: bail:
......
/*
* Copyright (c) 2006 QLogic, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <asm/pgtable.h>
#include "ipath_verbs.h"
/**
* ipath_release_mmap_info - free mmap info structure
* @ref: a pointer to the kref within struct ipath_mmap_info
*/
void ipath_release_mmap_info(struct kref *ref)
{
struct ipath_mmap_info *ip =
container_of(ref, struct ipath_mmap_info, ref);
vfree(ip->obj);
kfree(ip);
}
/*
* open and close keep track of how many times the CQ is mapped,
* to avoid releasing it.
*/
static void ipath_vma_open(struct vm_area_struct *vma)
{
struct ipath_mmap_info *ip = vma->vm_private_data;
kref_get(&ip->ref);
ip->mmap_cnt++;
}
static void ipath_vma_close(struct vm_area_struct *vma)
{
struct ipath_mmap_info *ip = vma->vm_private_data;
ip->mmap_cnt--;
kref_put(&ip->ref, ipath_release_mmap_info);
}
static struct vm_operations_struct ipath_vm_ops = {
.open = ipath_vma_open,
.close = ipath_vma_close,
};
/**
* ipath_mmap - create a new mmap region
* @context: the IB user context of the process making the mmap() call
* @vma: the VMA to be initialized
* Return zero if the mmap is OK. Otherwise, return an errno.
*/
int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct ipath_ibdev *dev = to_idev(context->device);
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long size = vma->vm_end - vma->vm_start;
struct ipath_mmap_info *ip, **pp;
int ret = -EINVAL;
/*
* Search the device's list of objects waiting for a mmap call.
* Normally, this list is very short since a call to create a
* CQ, QP, or SRQ is soon followed by a call to mmap().
*/
spin_lock_irq(&dev->pending_lock);
for (pp = &dev->pending_mmaps; (ip = *pp); pp = &ip->next) {
/* Only the creator is allowed to mmap the object */
if (context != ip->context || (void *) offset != ip->obj)
continue;
/* Don't allow a mmap larger than the object. */
if (size > ip->size)
break;
*pp = ip->next;
spin_unlock_irq(&dev->pending_lock);
ret = remap_vmalloc_range(vma, ip->obj, 0);
if (ret)
goto done;
vma->vm_ops = &ipath_vm_ops;
vma->vm_private_data = ip;
ipath_vma_open(vma);
goto done;
}
spin_unlock_irq(&dev->pending_lock);
done:
return ret;
}
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include "ipath_verbs.h" #include "ipath_verbs.h"
#include "ipath_common.h" #include "ipath_kernel.h"
#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
...@@ -44,19 +44,6 @@ ...@@ -44,19 +44,6 @@
#define find_next_offset(map, off) find_next_zero_bit((map)->page, \ #define find_next_offset(map, off) find_next_zero_bit((map)->page, \
BITS_PER_PAGE, off) BITS_PER_PAGE, off)
#define TRANS_INVALID 0
#define TRANS_ANY2RST 1
#define TRANS_RST2INIT 2
#define TRANS_INIT2INIT 3
#define TRANS_INIT2RTR 4
#define TRANS_RTR2RTS 5
#define TRANS_RTS2RTS 6
#define TRANS_SQERR2RTS 7
#define TRANS_ANY2ERR 8
#define TRANS_RTS2SQD 9 /* XXX Wait for expected ACKs & signal event */
#define TRANS_SQD2SQD 10 /* error if not drained & parameter change */
#define TRANS_SQD2RTS 11 /* error if not drained */
/* /*
* Convert the AETH credit code into the number of credits. * Convert the AETH credit code into the number of credits.
*/ */
...@@ -355,8 +342,10 @@ static void ipath_reset_qp(struct ipath_qp *qp) ...@@ -355,8 +342,10 @@ static void ipath_reset_qp(struct ipath_qp *qp)
qp->s_last = 0; qp->s_last = 0;
qp->s_ssn = 1; qp->s_ssn = 1;
qp->s_lsn = 0; qp->s_lsn = 0;
qp->r_rq.head = 0; if (qp->r_rq.wq) {
qp->r_rq.tail = 0; qp->r_rq.wq->head = 0;
qp->r_rq.wq->tail = 0;
}
qp->r_reuse_sge = 0; qp->r_reuse_sge = 0;
} }
...@@ -410,15 +399,32 @@ void ipath_error_qp(struct ipath_qp *qp) ...@@ -410,15 +399,32 @@ void ipath_error_qp(struct ipath_qp *qp)
qp->s_hdrwords = 0; qp->s_hdrwords = 0;
qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
wc.opcode = IB_WC_RECV; if (qp->r_rq.wq) {
struct ipath_rwq *wq;
u32 head;
u32 tail;
spin_lock(&qp->r_rq.lock); spin_lock(&qp->r_rq.lock);
while (qp->r_rq.tail != qp->r_rq.head) {
wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; /* sanity check pointers before trusting them */
if (++qp->r_rq.tail >= qp->r_rq.size) wq = qp->r_rq.wq;
qp->r_rq.tail = 0; head = wq->head;
if (head >= qp->r_rq.size)
head = 0;
tail = wq->tail;
if (tail >= qp->r_rq.size)
tail = 0;
wc.opcode = IB_WC_RECV;
while (tail != head) {
wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
if (++tail >= qp->r_rq.size)
tail = 0;
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
} }
wq->tail = tail;
spin_unlock(&qp->r_rq.lock); spin_unlock(&qp->r_rq.lock);
}
} }
/** /**
...@@ -544,7 +550,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -544,7 +550,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr->dest_qp_num = qp->remote_qpn; attr->dest_qp_num = qp->remote_qpn;
attr->qp_access_flags = qp->qp_access_flags; attr->qp_access_flags = qp->qp_access_flags;
attr->cap.max_send_wr = qp->s_size - 1; attr->cap.max_send_wr = qp->s_size - 1;
attr->cap.max_recv_wr = qp->r_rq.size - 1; attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
attr->cap.max_send_sge = qp->s_max_sge; attr->cap.max_send_sge = qp->s_max_sge;
attr->cap.max_recv_sge = qp->r_rq.max_sge; attr->cap.max_recv_sge = qp->r_rq.max_sge;
attr->cap.max_inline_data = 0; attr->cap.max_inline_data = 0;
...@@ -597,13 +603,23 @@ __be32 ipath_compute_aeth(struct ipath_qp *qp) ...@@ -597,13 +603,23 @@ __be32 ipath_compute_aeth(struct ipath_qp *qp)
} else { } else {
u32 min, max, x; u32 min, max, x;
u32 credits; u32 credits;
struct ipath_rwq *wq = qp->r_rq.wq;
u32 head;
u32 tail;
/* sanity check pointers before trusting them */
head = wq->head;
if (head >= qp->r_rq.size)
head = 0;
tail = wq->tail;
if (tail >= qp->r_rq.size)
tail = 0;
/* /*
* Compute the number of credits available (RWQEs). * Compute the number of credits available (RWQEs).
* XXX Not holding the r_rq.lock here so there is a small * XXX Not holding the r_rq.lock here so there is a small
* chance that the pair of reads are not atomic. * chance that the pair of reads are not atomic.
*/ */
credits = qp->r_rq.head - qp->r_rq.tail; credits = head - tail;
if ((int)credits < 0) if ((int)credits < 0)
credits += qp->r_rq.size; credits += qp->r_rq.size;
/* /*
...@@ -680,27 +696,37 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, ...@@ -680,27 +696,37 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
case IB_QPT_UD: case IB_QPT_UD:
case IB_QPT_SMI: case IB_QPT_SMI:
case IB_QPT_GSI: case IB_QPT_GSI:
qp = kmalloc(sizeof(*qp), GFP_KERNEL); sz = sizeof(*qp);
if (init_attr->srq) {
struct ipath_srq *srq = to_isrq(init_attr->srq);
sz += sizeof(*qp->r_sg_list) *
srq->rq.max_sge;
} else
sz += sizeof(*qp->r_sg_list) *
init_attr->cap.max_recv_sge;
qp = kmalloc(sz, GFP_KERNEL);
if (!qp) { if (!qp) {
vfree(swq);
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto bail; goto bail_swq;
} }
if (init_attr->srq) { if (init_attr->srq) {
sz = 0;
qp->r_rq.size = 0; qp->r_rq.size = 0;
qp->r_rq.max_sge = 0; qp->r_rq.max_sge = 0;
qp->r_rq.wq = NULL; qp->r_rq.wq = NULL;
init_attr->cap.max_recv_wr = 0;
init_attr->cap.max_recv_sge = 0;
} else { } else {
qp->r_rq.size = init_attr->cap.max_recv_wr + 1; qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
qp->r_rq.max_sge = init_attr->cap.max_recv_sge; qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
sz = (sizeof(struct ipath_sge) * qp->r_rq.max_sge) + sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
sizeof(struct ipath_rwqe); sizeof(struct ipath_rwqe);
qp->r_rq.wq = vmalloc(qp->r_rq.size * sz); qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
qp->r_rq.size * sz);
if (!qp->r_rq.wq) { if (!qp->r_rq.wq) {
kfree(qp);
vfree(swq);
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto bail; goto bail_qp;
} }
} }
...@@ -726,12 +752,10 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, ...@@ -726,12 +752,10 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
err = ipath_alloc_qpn(&dev->qp_table, qp, err = ipath_alloc_qpn(&dev->qp_table, qp,
init_attr->qp_type); init_attr->qp_type);
if (err) { if (err) {
vfree(swq);
vfree(qp->r_rq.wq);
kfree(qp);
ret = ERR_PTR(err); ret = ERR_PTR(err);
goto bail; goto bail_rwq;
} }
qp->ip = NULL;
ipath_reset_qp(qp); ipath_reset_qp(qp);
/* Tell the core driver that the kernel SMA is present. */ /* Tell the core driver that the kernel SMA is present. */
...@@ -748,8 +772,51 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, ...@@ -748,8 +772,51 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
init_attr->cap.max_inline_data = 0; init_attr->cap.max_inline_data = 0;
/*
* Return the address of the RWQ as the offset to mmap.
* See ipath_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
struct ipath_mmap_info *ip;
__u64 offset = (__u64) qp->r_rq.wq;
int err;
err = ib_copy_to_udata(udata, &offset, sizeof(offset));
if (err) {
ret = ERR_PTR(err);
goto bail_rwq;
}
if (qp->r_rq.wq) {
/* Allocate info for ipath_mmap(). */
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
if (!ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_rwq;
}
qp->ip = ip;
ip->context = ibpd->uobject->context;
ip->obj = qp->r_rq.wq;
kref_init(&ip->ref);
ip->mmap_cnt = 0;
ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
qp->r_rq.size * sz);
spin_lock_irq(&dev->pending_lock);
ip->next = dev->pending_mmaps;
dev->pending_mmaps = ip;
spin_unlock_irq(&dev->pending_lock);
}
}
ret = &qp->ibqp; ret = &qp->ibqp;
goto bail;
bail_rwq:
vfree(qp->r_rq.wq);
bail_qp:
kfree(qp);
bail_swq:
vfree(swq);
bail: bail:
return ret; return ret;
} }
...@@ -773,11 +840,9 @@ int ipath_destroy_qp(struct ib_qp *ibqp) ...@@ -773,11 +840,9 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
if (qp->ibqp.qp_type == IB_QPT_SMI) if (qp->ibqp.qp_type == IB_QPT_SMI)
ipath_layer_set_verbs_flags(dev->dd, 0); ipath_layer_set_verbs_flags(dev->dd, 0);
spin_lock_irqsave(&qp->r_rq.lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
spin_lock(&qp->s_lock);
qp->state = IB_QPS_ERR; qp->state = IB_QPS_ERR;
spin_unlock(&qp->s_lock); spin_unlock_irqrestore(&qp->s_lock, flags);
spin_unlock_irqrestore(&qp->r_rq.lock, flags);
/* Stop the sending tasklet. */ /* Stop the sending tasklet. */
tasklet_kill(&qp->s_task); tasklet_kill(&qp->s_task);
...@@ -798,8 +863,11 @@ int ipath_destroy_qp(struct ib_qp *ibqp) ...@@ -798,8 +863,11 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
if (atomic_read(&qp->refcount) != 0) if (atomic_read(&qp->refcount) != 0)
ipath_free_qp(&dev->qp_table, qp); ipath_free_qp(&dev->qp_table, qp);
vfree(qp->s_wq); if (qp->ip)
kref_put(&qp->ip->ref, ipath_release_mmap_info);
else
vfree(qp->r_rq.wq); vfree(qp->r_rq.wq);
vfree(qp->s_wq);
kfree(qp); kfree(qp);
return 0; return 0;
} }
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
*/ */
#include "ipath_verbs.h" #include "ipath_verbs.h"
#include "ipath_common.h" #include "ipath_kernel.h"
/* /*
* Convert the AETH RNR timeout code into the number of milliseconds. * Convert the AETH RNR timeout code into the number of milliseconds.
...@@ -106,6 +106,54 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp) ...@@ -106,6 +106,54 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp)
spin_unlock_irqrestore(&dev->pending_lock, flags); spin_unlock_irqrestore(&dev->pending_lock, flags);
} }
static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe)
{
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
int user = to_ipd(qp->ibqp.pd)->user;
int i, j, ret;
struct ib_wc wc;
qp->r_len = 0;
for (i = j = 0; i < wqe->num_sge; i++) {
if (wqe->sg_list[i].length == 0)
continue;
/* Check LKEY */
if ((user && wqe->sg_list[i].lkey == 0) ||
!ipath_lkey_ok(&dev->lk_table,
&qp->r_sg_list[j], &wqe->sg_list[i],
IB_ACCESS_LOCAL_WRITE))
goto bad_lkey;
qp->r_len += wqe->sg_list[i].length;
j++;
}
qp->r_sge.sge = qp->r_sg_list[0];
qp->r_sge.sg_list = qp->r_sg_list + 1;
qp->r_sge.num_sge = j;
ret = 1;
goto bail;
bad_lkey:
wc.wr_id = wqe->wr_id;
wc.status = IB_WC_LOC_PROT_ERR;
wc.opcode = IB_WC_RECV;
wc.vendor_err = 0;
wc.byte_len = 0;
wc.imm_data = 0;
wc.qp_num = qp->ibqp.qp_num;
wc.src_qp = 0;
wc.wc_flags = 0;
wc.pkey_index = 0;
wc.slid = 0;
wc.sl = 0;
wc.dlid_path_bits = 0;
wc.port_num = 0;
/* Signal solicited completion event. */
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
ret = 0;
bail:
return ret;
}
/** /**
* ipath_get_rwqe - copy the next RWQE into the QP's RWQE * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
* @qp: the QP * @qp: the QP
...@@ -119,71 +167,71 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) ...@@ -119,71 +167,71 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
{ {
unsigned long flags; unsigned long flags;
struct ipath_rq *rq; struct ipath_rq *rq;
struct ipath_rwq *wq;
struct ipath_srq *srq; struct ipath_srq *srq;
struct ipath_rwqe *wqe; struct ipath_rwqe *wqe;
int ret = 1; void (*handler)(struct ib_event *, void *);
u32 tail;
int ret;
if (!qp->ibqp.srq) { if (qp->ibqp.srq) {
srq = to_isrq(qp->ibqp.srq);
handler = srq->ibsrq.event_handler;
rq = &srq->rq;
} else {
srq = NULL;
handler = NULL;
rq = &qp->r_rq; rq = &qp->r_rq;
spin_lock_irqsave(&rq->lock, flags);
if (unlikely(rq->tail == rq->head)) {
ret = 0;
goto done;
}
wqe = get_rwqe_ptr(rq, rq->tail);
qp->r_wr_id = wqe->wr_id;
if (!wr_id_only) {
qp->r_sge.sge = wqe->sg_list[0];
qp->r_sge.sg_list = wqe->sg_list + 1;
qp->r_sge.num_sge = wqe->num_sge;
qp->r_len = wqe->length;
}
if (++rq->tail >= rq->size)
rq->tail = 0;
goto done;
} }
srq = to_isrq(qp->ibqp.srq);
rq = &srq->rq;
spin_lock_irqsave(&rq->lock, flags); spin_lock_irqsave(&rq->lock, flags);
wq = rq->wq;
if (unlikely(rq->tail == rq->head)) { tail = wq->tail;
/* Validate tail before using it since it is user writable. */
if (tail >= rq->size)
tail = 0;
do {
if (unlikely(tail == wq->head)) {
spin_unlock_irqrestore(&rq->lock, flags);
ret = 0; ret = 0;
goto done; goto bail;
} }
wqe = get_rwqe_ptr(rq, rq->tail); wqe = get_rwqe_ptr(rq, tail);
if (++tail >= rq->size)
tail = 0;
} while (!wr_id_only && !init_sge(qp, wqe));
qp->r_wr_id = wqe->wr_id; qp->r_wr_id = wqe->wr_id;
if (!wr_id_only) { wq->tail = tail;
qp->r_sge.sge = wqe->sg_list[0];
qp->r_sge.sg_list = wqe->sg_list + 1; ret = 1;
qp->r_sge.num_sge = wqe->num_sge; if (handler) {
qp->r_len = wqe->length;
}
if (++rq->tail >= rq->size)
rq->tail = 0;
if (srq->ibsrq.event_handler) {
struct ib_event ev;
u32 n; u32 n;
if (rq->head < rq->tail) /*
n = rq->size + rq->head - rq->tail; * validate head pointer value and compute
* the number of remaining WQEs.
*/
n = wq->head;
if (n >= rq->size)
n = 0;
if (n < tail)
n += rq->size - tail;
else else
n = rq->head - rq->tail; n -= tail;
if (n < srq->limit) { if (n < srq->limit) {
struct ib_event ev;
srq->limit = 0; srq->limit = 0;
spin_unlock_irqrestore(&rq->lock, flags); spin_unlock_irqrestore(&rq->lock, flags);
ev.device = qp->ibqp.device; ev.device = qp->ibqp.device;
ev.element.srq = qp->ibqp.srq; ev.element.srq = qp->ibqp.srq;
ev.event = IB_EVENT_SRQ_LIMIT_REACHED; ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
srq->ibsrq.event_handler(&ev, handler(&ev, srq->ibsrq.srq_context);
srq->ibsrq.srq_context);
goto bail; goto bail;
} }
} }
done:
spin_unlock_irqrestore(&rq->lock, flags); spin_unlock_irqrestore(&rq->lock, flags);
bail: bail:
return ret; return ret;
} }
......
...@@ -48,66 +48,39 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, ...@@ -48,66 +48,39 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr) struct ib_recv_wr **bad_wr)
{ {
struct ipath_srq *srq = to_isrq(ibsrq); struct ipath_srq *srq = to_isrq(ibsrq);
struct ipath_ibdev *dev = to_idev(ibsrq->device); struct ipath_rwq *wq;
unsigned long flags; unsigned long flags;
int ret; int ret;
for (; wr; wr = wr->next) { for (; wr; wr = wr->next) {
struct ipath_rwqe *wqe; struct ipath_rwqe *wqe;
u32 next; u32 next;
int i, j; int i;
if (wr->num_sge > srq->rq.max_sge) { if ((unsigned) wr->num_sge > srq->rq.max_sge) {
*bad_wr = wr; *bad_wr = wr;
ret = -ENOMEM; ret = -ENOMEM;
goto bail; goto bail;
} }
spin_lock_irqsave(&srq->rq.lock, flags); spin_lock_irqsave(&srq->rq.lock, flags);
next = srq->rq.head + 1; wq = srq->rq.wq;
next = wq->head + 1;
if (next >= srq->rq.size) if (next >= srq->rq.size)
next = 0; next = 0;
if (next == srq->rq.tail) { if (next == wq->tail) {
spin_unlock_irqrestore(&srq->rq.lock, flags); spin_unlock_irqrestore(&srq->rq.lock, flags);
*bad_wr = wr; *bad_wr = wr;
ret = -ENOMEM; ret = -ENOMEM;
goto bail; goto bail;
} }
wqe = get_rwqe_ptr(&srq->rq, srq->rq.head); wqe = get_rwqe_ptr(&srq->rq, wq->head);
wqe->wr_id = wr->wr_id; wqe->wr_id = wr->wr_id;
wqe->sg_list[0].mr = NULL; wqe->num_sge = wr->num_sge;
wqe->sg_list[0].vaddr = NULL; for (i = 0; i < wr->num_sge; i++)
wqe->sg_list[0].length = 0; wqe->sg_list[i] = wr->sg_list[i];
wqe->sg_list[0].sge_length = 0; wq->head = next;
wqe->length = 0;
for (i = 0, j = 0; i < wr->num_sge; i++) {
/* Check LKEY */
if (to_ipd(srq->ibsrq.pd)->user &&
wr->sg_list[i].lkey == 0) {
spin_unlock_irqrestore(&srq->rq.lock,
flags);
*bad_wr = wr;
ret = -EINVAL;
goto bail;
}
if (wr->sg_list[i].length == 0)
continue;
if (!ipath_lkey_ok(&dev->lk_table,
&wqe->sg_list[j],
&wr->sg_list[i],
IB_ACCESS_LOCAL_WRITE)) {
spin_unlock_irqrestore(&srq->rq.lock,
flags);
*bad_wr = wr;
ret = -EINVAL;
goto bail;
}
wqe->length += wr->sg_list[i].length;
j++;
}
wqe->num_sge = j;
srq->rq.head = next;
spin_unlock_irqrestore(&srq->rq.lock, flags); spin_unlock_irqrestore(&srq->rq.lock, flags);
} }
ret = 0; ret = 0;
...@@ -133,53 +106,95 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, ...@@ -133,53 +106,95 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
if (dev->n_srqs_allocated == ib_ipath_max_srqs) { if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto bail; goto done;
} }
if (srq_init_attr->attr.max_wr == 0) { if (srq_init_attr->attr.max_wr == 0) {
ret = ERR_PTR(-EINVAL); ret = ERR_PTR(-EINVAL);
goto bail; goto done;
} }
if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) || if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
(srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) { (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
ret = ERR_PTR(-EINVAL); ret = ERR_PTR(-EINVAL);
goto bail; goto done;
} }
srq = kmalloc(sizeof(*srq), GFP_KERNEL); srq = kmalloc(sizeof(*srq), GFP_KERNEL);
if (!srq) { if (!srq) {
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto bail; goto done;
} }
/* /*
* Need to use vmalloc() if we want to support large #s of entries. * Need to use vmalloc() if we want to support large #s of entries.
*/ */
srq->rq.size = srq_init_attr->attr.max_wr + 1; srq->rq.size = srq_init_attr->attr.max_wr + 1;
sz = sizeof(struct ipath_sge) * srq_init_attr->attr.max_sge + srq->rq.max_sge = srq_init_attr->attr.max_sge;
sz = sizeof(struct ib_sge) * srq->rq.max_sge +
sizeof(struct ipath_rwqe); sizeof(struct ipath_rwqe);
srq->rq.wq = vmalloc(srq->rq.size * sz); srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz);
if (!srq->rq.wq) { if (!srq->rq.wq) {
kfree(srq);
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto bail; goto bail_srq;
} }
/*
* Return the address of the RWQ as the offset to mmap.
* See ipath_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
struct ipath_mmap_info *ip;
__u64 offset = (__u64) srq->rq.wq;
int err;
err = ib_copy_to_udata(udata, &offset, sizeof(offset));
if (err) {
ret = ERR_PTR(err);
goto bail_wq;
}
/* Allocate info for ipath_mmap(). */
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
if (!ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wq;
}
srq->ip = ip;
ip->context = ibpd->uobject->context;
ip->obj = srq->rq.wq;
kref_init(&ip->ref);
ip->mmap_cnt = 0;
ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
srq->rq.size * sz);
spin_lock_irq(&dev->pending_lock);
ip->next = dev->pending_mmaps;
dev->pending_mmaps = ip;
spin_unlock_irq(&dev->pending_lock);
} else
srq->ip = NULL;
/* /*
* ib_create_srq() will initialize srq->ibsrq. * ib_create_srq() will initialize srq->ibsrq.
*/ */
spin_lock_init(&srq->rq.lock); spin_lock_init(&srq->rq.lock);
srq->rq.head = 0; srq->rq.wq->head = 0;
srq->rq.tail = 0; srq->rq.wq->tail = 0;
srq->rq.max_sge = srq_init_attr->attr.max_sge; srq->rq.max_sge = srq_init_attr->attr.max_sge;
srq->limit = srq_init_attr->attr.srq_limit; srq->limit = srq_init_attr->attr.srq_limit;
dev->n_srqs_allocated++;
ret = &srq->ibsrq; ret = &srq->ibsrq;
goto done;
dev->n_srqs_allocated++; bail_wq:
vfree(srq->rq.wq);
bail: bail_srq:
kfree(srq);
done:
return ret; return ret;
} }
...@@ -195,78 +210,123 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, ...@@ -195,78 +210,123 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct ipath_srq *srq = to_isrq(ibsrq); struct ipath_srq *srq = to_isrq(ibsrq);
unsigned long flags; int ret = 0;
int ret;
if (attr_mask & IB_SRQ_MAX_WR) if (attr_mask & IB_SRQ_MAX_WR) {
if ((attr->max_wr > ib_ipath_max_srq_wrs) || struct ipath_rwq *owq;
(attr->max_sge > srq->rq.max_sge)) { struct ipath_rwq *wq;
ret = -EINVAL; struct ipath_rwqe *p;
goto bail; u32 sz, size, n, head, tail;
}
if (attr_mask & IB_SRQ_LIMIT) /* Check that the requested sizes are below the limits. */
if (attr->srq_limit >= srq->rq.size) { if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
((attr_mask & IB_SRQ_LIMIT) ?
attr->srq_limit : srq->limit) > attr->max_wr) {
ret = -EINVAL; ret = -EINVAL;
goto bail; goto bail;
} }
if (attr_mask & IB_SRQ_MAX_WR) {
struct ipath_rwqe *wq, *p;
u32 sz, size, n;
sz = sizeof(struct ipath_rwqe) + sz = sizeof(struct ipath_rwqe) +
attr->max_sge * sizeof(struct ipath_sge); srq->rq.max_sge * sizeof(struct ib_sge);
size = attr->max_wr + 1; size = attr->max_wr + 1;
wq = vmalloc(size * sz); wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz);
if (!wq) { if (!wq) {
ret = -ENOMEM; ret = -ENOMEM;
goto bail; goto bail;
} }
spin_lock_irqsave(&srq->rq.lock, flags); /*
if (srq->rq.head < srq->rq.tail) * Return the address of the RWQ as the offset to mmap.
n = srq->rq.size + srq->rq.head - srq->rq.tail; * See ipath_mmap() for details.
*/
if (udata && udata->inlen >= sizeof(__u64)) {
__u64 offset_addr;
__u64 offset = (__u64) wq;
ret = ib_copy_from_udata(&offset_addr, udata,
sizeof(offset_addr));
if (ret) {
vfree(wq);
goto bail;
}
udata->outbuf = (void __user *) offset_addr;
ret = ib_copy_to_udata(udata, &offset,
sizeof(offset));
if (ret) {
vfree(wq);
goto bail;
}
}
spin_lock_irq(&srq->rq.lock);
/*
* validate head pointer value and compute
* the number of remaining WQEs.
*/
owq = srq->rq.wq;
head = owq->head;
if (head >= srq->rq.size)
head = 0;
tail = owq->tail;
if (tail >= srq->rq.size)
tail = 0;
n = head;
if (n < tail)
n += srq->rq.size - tail;
else else
n = srq->rq.head - srq->rq.tail; n -= tail;
if (size <= n || size <= srq->limit) { if (size <= n) {
spin_unlock_irqrestore(&srq->rq.lock, flags); spin_unlock_irq(&srq->rq.lock);
vfree(wq); vfree(wq);
ret = -EINVAL; ret = -EINVAL;
goto bail; goto bail;
} }
n = 0; n = 0;
p = wq; p = wq->wq;
while (srq->rq.tail != srq->rq.head) { while (tail != head) {
struct ipath_rwqe *wqe; struct ipath_rwqe *wqe;
int i; int i;
wqe = get_rwqe_ptr(&srq->rq, srq->rq.tail); wqe = get_rwqe_ptr(&srq->rq, tail);
p->wr_id = wqe->wr_id; p->wr_id = wqe->wr_id;
p->length = wqe->length;
p->num_sge = wqe->num_sge; p->num_sge = wqe->num_sge;
for (i = 0; i < wqe->num_sge; i++) for (i = 0; i < wqe->num_sge; i++)
p->sg_list[i] = wqe->sg_list[i]; p->sg_list[i] = wqe->sg_list[i];
n++; n++;
p = (struct ipath_rwqe *)((char *) p + sz); p = (struct ipath_rwqe *)((char *) p + sz);
if (++srq->rq.tail >= srq->rq.size) if (++tail >= srq->rq.size)
srq->rq.tail = 0; tail = 0;
} }
vfree(srq->rq.wq);
srq->rq.wq = wq; srq->rq.wq = wq;
srq->rq.size = size; srq->rq.size = size;
srq->rq.head = n; wq->head = n;
srq->rq.tail = 0; wq->tail = 0;
srq->rq.max_sge = attr->max_sge; if (attr_mask & IB_SRQ_LIMIT)
spin_unlock_irqrestore(&srq->rq.lock, flags); srq->limit = attr->srq_limit;
} spin_unlock_irq(&srq->rq.lock);
if (attr_mask & IB_SRQ_LIMIT) { vfree(owq);
spin_lock_irqsave(&srq->rq.lock, flags);
if (srq->ip) {
struct ipath_mmap_info *ip = srq->ip;
struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
ip->obj = wq;
ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
size * sz);
spin_lock_irq(&dev->pending_lock);
ip->next = dev->pending_mmaps;
dev->pending_mmaps = ip;
spin_unlock_irq(&dev->pending_lock);
}
} else if (attr_mask & IB_SRQ_LIMIT) {
spin_lock_irq(&srq->rq.lock);
if (attr->srq_limit >= srq->rq.size)
ret = -EINVAL;
else
srq->limit = attr->srq_limit; srq->limit = attr->srq_limit;
spin_unlock_irqrestore(&srq->rq.lock, flags); spin_unlock_irq(&srq->rq.lock);
} }
ret = 0;
bail: bail:
return ret; return ret;
......
...@@ -34,7 +34,54 @@ ...@@ -34,7 +34,54 @@
#include <rdma/ib_smi.h> #include <rdma/ib_smi.h>
#include "ipath_verbs.h" #include "ipath_verbs.h"
#include "ipath_common.h" #include "ipath_kernel.h"
static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
u32 *lengthp, struct ipath_sge_state *ss)
{
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
int user = to_ipd(qp->ibqp.pd)->user;
int i, j, ret;
struct ib_wc wc;
*lengthp = 0;
for (i = j = 0; i < wqe->num_sge; i++) {
if (wqe->sg_list[i].length == 0)
continue;
/* Check LKEY */
if ((user && wqe->sg_list[i].lkey == 0) ||
!ipath_lkey_ok(&dev->lk_table,
j ? &ss->sg_list[j - 1] : &ss->sge,
&wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
goto bad_lkey;
*lengthp += wqe->sg_list[i].length;
j++;
}
ss->num_sge = j;
ret = 1;
goto bail;
bad_lkey:
wc.wr_id = wqe->wr_id;
wc.status = IB_WC_LOC_PROT_ERR;
wc.opcode = IB_WC_RECV;
wc.vendor_err = 0;
wc.byte_len = 0;
wc.imm_data = 0;
wc.qp_num = qp->ibqp.qp_num;
wc.src_qp = 0;
wc.wc_flags = 0;
wc.pkey_index = 0;
wc.slid = 0;
wc.sl = 0;
wc.dlid_path_bits = 0;
wc.port_num = 0;
/* Signal solicited completion event. */
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
ret = 0;
bail:
return ret;
}
/** /**
* ipath_ud_loopback - handle send on loopback QPs * ipath_ud_loopback - handle send on loopback QPs
...@@ -46,6 +93,8 @@ ...@@ -46,6 +93,8 @@
* *
* This is called from ipath_post_ud_send() to forward a WQE addressed * This is called from ipath_post_ud_send() to forward a WQE addressed
* to the same HCA. * to the same HCA.
* Note that the receive interrupt handler may be calling ipath_ud_rcv()
* while this is being called.
*/ */
static void ipath_ud_loopback(struct ipath_qp *sqp, static void ipath_ud_loopback(struct ipath_qp *sqp,
struct ipath_sge_state *ss, struct ipath_sge_state *ss,
...@@ -60,7 +109,11 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, ...@@ -60,7 +109,11 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
struct ipath_srq *srq; struct ipath_srq *srq;
struct ipath_sge_state rsge; struct ipath_sge_state rsge;
struct ipath_sge *sge; struct ipath_sge *sge;
struct ipath_rwq *wq;
struct ipath_rwqe *wqe; struct ipath_rwqe *wqe;
void (*handler)(struct ib_event *, void *);
u32 tail;
u32 rlen;
qp = ipath_lookup_qpn(&dev->qp_table, wr->wr.ud.remote_qpn); qp = ipath_lookup_qpn(&dev->qp_table, wr->wr.ud.remote_qpn);
if (!qp) if (!qp)
...@@ -94,6 +147,13 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, ...@@ -94,6 +147,13 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
wc->imm_data = 0; wc->imm_data = 0;
} }
if (wr->num_sge > 1) {
rsge.sg_list = kmalloc((wr->num_sge - 1) *
sizeof(struct ipath_sge),
GFP_ATOMIC);
} else
rsge.sg_list = NULL;
/* /*
* Get the next work request entry to find where to put the data. * Get the next work request entry to find where to put the data.
* Note that it is safe to drop the lock after changing rq->tail * Note that it is safe to drop the lock after changing rq->tail
...@@ -101,37 +161,52 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, ...@@ -101,37 +161,52 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
*/ */
if (qp->ibqp.srq) { if (qp->ibqp.srq) {
srq = to_isrq(qp->ibqp.srq); srq = to_isrq(qp->ibqp.srq);
handler = srq->ibsrq.event_handler;
rq = &srq->rq; rq = &srq->rq;
} else { } else {
srq = NULL; srq = NULL;
handler = NULL;
rq = &qp->r_rq; rq = &qp->r_rq;
} }
spin_lock_irqsave(&rq->lock, flags); spin_lock_irqsave(&rq->lock, flags);
if (rq->tail == rq->head) { wq = rq->wq;
tail = wq->tail;
while (1) {
if (unlikely(tail == wq->head)) {
spin_unlock_irqrestore(&rq->lock, flags); spin_unlock_irqrestore(&rq->lock, flags);
dev->n_pkt_drops++; dev->n_pkt_drops++;
goto done; goto bail_sge;
}
wqe = get_rwqe_ptr(rq, tail);
if (++tail >= rq->size)
tail = 0;
if (init_sge(qp, wqe, &rlen, &rsge))
break;
wq->tail = tail;
} }
/* Silently drop packets which are too big. */ /* Silently drop packets which are too big. */
wqe = get_rwqe_ptr(rq, rq->tail); if (wc->byte_len > rlen) {
if (wc->byte_len > wqe->length) {
spin_unlock_irqrestore(&rq->lock, flags); spin_unlock_irqrestore(&rq->lock, flags);
dev->n_pkt_drops++; dev->n_pkt_drops++;
goto done; goto bail_sge;
} }
wq->tail = tail;
wc->wr_id = wqe->wr_id; wc->wr_id = wqe->wr_id;
rsge.sge = wqe->sg_list[0]; if (handler) {
rsge.sg_list = wqe->sg_list + 1;
rsge.num_sge = wqe->num_sge;
if (++rq->tail >= rq->size)
rq->tail = 0;
if (srq && srq->ibsrq.event_handler) {
u32 n; u32 n;
if (rq->head < rq->tail) /*
n = rq->size + rq->head - rq->tail; * validate head pointer value and compute
* the number of remaining WQEs.
*/
n = wq->head;
if (n >= rq->size)
n = 0;
if (n < tail)
n += rq->size - tail;
else else
n = rq->head - rq->tail; n -= tail;
if (n < srq->limit) { if (n < srq->limit) {
struct ib_event ev; struct ib_event ev;
...@@ -140,12 +215,12 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, ...@@ -140,12 +215,12 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
ev.device = qp->ibqp.device; ev.device = qp->ibqp.device;
ev.element.srq = qp->ibqp.srq; ev.element.srq = qp->ibqp.srq;
ev.event = IB_EVENT_SRQ_LIMIT_REACHED; ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
srq->ibsrq.event_handler(&ev, handler(&ev, srq->ibsrq.srq_context);
srq->ibsrq.srq_context);
} else } else
spin_unlock_irqrestore(&rq->lock, flags); spin_unlock_irqrestore(&rq->lock, flags);
} else } else
spin_unlock_irqrestore(&rq->lock, flags); spin_unlock_irqrestore(&rq->lock, flags);
ah_attr = &to_iah(wr->wr.ud.ah)->attr; ah_attr = &to_iah(wr->wr.ud.ah)->attr;
if (ah_attr->ah_flags & IB_AH_GRH) { if (ah_attr->ah_flags & IB_AH_GRH) {
ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh)); ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
...@@ -186,7 +261,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, ...@@ -186,7 +261,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
wc->src_qp = sqp->ibqp.qp_num; wc->src_qp = sqp->ibqp.qp_num;
/* XXX do we know which pkey matched? Only needed for GSI. */ /* XXX do we know which pkey matched? Only needed for GSI. */
wc->pkey_index = 0; wc->pkey_index = 0;
wc->slid = ipath_layer_get_lid(dev->dd) | wc->slid = dev->dd->ipath_lid |
(ah_attr->src_path_bits & (ah_attr->src_path_bits &
((1 << (dev->mkeyprot_resv_lmc & 7)) - 1)); ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1));
wc->sl = ah_attr->sl; wc->sl = ah_attr->sl;
...@@ -196,6 +271,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, ...@@ -196,6 +271,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc, ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc,
wr->send_flags & IB_SEND_SOLICITED); wr->send_flags & IB_SEND_SOLICITED);
bail_sge:
kfree(rsge.sg_list);
done: done:
if (atomic_dec_and_test(&qp->refcount)) if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait); wake_up(&qp->wait);
...@@ -433,13 +510,9 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, ...@@ -433,13 +510,9 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
int opcode; int opcode;
u32 hdrsize; u32 hdrsize;
u32 pad; u32 pad;
unsigned long flags;
struct ib_wc wc; struct ib_wc wc;
u32 qkey; u32 qkey;
u32 src_qp; u32 src_qp;
struct ipath_rq *rq;
struct ipath_srq *srq;
struct ipath_rwqe *wqe;
u16 dlid; u16 dlid;
int header_in_data; int header_in_data;
...@@ -547,19 +620,10 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, ...@@ -547,19 +620,10 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
/* /*
* Get the next work request entry to find where to put the data. * Get the next work request entry to find where to put the data.
* Note that it is safe to drop the lock after changing rq->tail
* since ipath_post_receive() won't fill the empty slot.
*/ */
if (qp->ibqp.srq) { if (qp->r_reuse_sge)
srq = to_isrq(qp->ibqp.srq); qp->r_reuse_sge = 0;
rq = &srq->rq; else if (!ipath_get_rwqe(qp, 0)) {
} else {
srq = NULL;
rq = &qp->r_rq;
}
spin_lock_irqsave(&rq->lock, flags);
if (rq->tail == rq->head) {
spin_unlock_irqrestore(&rq->lock, flags);
/* /*
* Count VL15 packets dropped due to no receive buffer. * Count VL15 packets dropped due to no receive buffer.
* Otherwise, count them as buffer overruns since usually, * Otherwise, count them as buffer overruns since usually,
...@@ -573,39 +637,11 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, ...@@ -573,39 +637,11 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
goto bail; goto bail;
} }
/* Silently drop packets which are too big. */ /* Silently drop packets which are too big. */
wqe = get_rwqe_ptr(rq, rq->tail); if (wc.byte_len > qp->r_len) {
if (wc.byte_len > wqe->length) { qp->r_reuse_sge = 1;
spin_unlock_irqrestore(&rq->lock, flags);
dev->n_pkt_drops++; dev->n_pkt_drops++;
goto bail; goto bail;
} }
wc.wr_id = wqe->wr_id;
qp->r_sge.sge = wqe->sg_list[0];
qp->r_sge.sg_list = wqe->sg_list + 1;
qp->r_sge.num_sge = wqe->num_sge;
if (++rq->tail >= rq->size)
rq->tail = 0;
if (srq && srq->ibsrq.event_handler) {
u32 n;
if (rq->head < rq->tail)
n = rq->size + rq->head - rq->tail;
else
n = rq->head - rq->tail;
if (n < srq->limit) {
struct ib_event ev;
srq->limit = 0;
spin_unlock_irqrestore(&rq->lock, flags);
ev.device = qp->ibqp.device;
ev.element.srq = qp->ibqp.srq;
ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
srq->ibsrq.event_handler(&ev,
srq->ibsrq.srq_context);
} else
spin_unlock_irqrestore(&rq->lock, flags);
} else
spin_unlock_irqrestore(&rq->lock, flags);
if (has_grh) { if (has_grh) {
ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh, ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh,
sizeof(struct ib_grh)); sizeof(struct ib_grh));
...@@ -614,6 +650,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, ...@@ -614,6 +650,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh)); ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
ipath_copy_sge(&qp->r_sge, data, ipath_copy_sge(&qp->r_sge, data,
wc.byte_len - sizeof(struct ib_grh)); wc.byte_len - sizeof(struct ib_grh));
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS; wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV; wc.opcode = IB_WC_RECV;
wc.vendor_err = 0; wc.vendor_err = 0;
......
...@@ -277,11 +277,12 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -277,11 +277,12 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr) struct ib_recv_wr **bad_wr)
{ {
struct ipath_qp *qp = to_iqp(ibqp); struct ipath_qp *qp = to_iqp(ibqp);
struct ipath_rwq *wq = qp->r_rq.wq;
unsigned long flags; unsigned long flags;
int ret; int ret;
/* Check that state is OK to post receive. */ /* Check that state is OK to post receive. */
if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK)) { if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
*bad_wr = wr; *bad_wr = wr;
ret = -EINVAL; ret = -EINVAL;
goto bail; goto bail;
...@@ -290,59 +291,31 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -290,59 +291,31 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
for (; wr; wr = wr->next) { for (; wr; wr = wr->next) {
struct ipath_rwqe *wqe; struct ipath_rwqe *wqe;
u32 next; u32 next;
int i, j; int i;
if (wr->num_sge > qp->r_rq.max_sge) { if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
*bad_wr = wr; *bad_wr = wr;
ret = -ENOMEM; ret = -ENOMEM;
goto bail; goto bail;
} }
spin_lock_irqsave(&qp->r_rq.lock, flags); spin_lock_irqsave(&qp->r_rq.lock, flags);
next = qp->r_rq.head + 1; next = wq->head + 1;
if (next >= qp->r_rq.size) if (next >= qp->r_rq.size)
next = 0; next = 0;
if (next == qp->r_rq.tail) { if (next == wq->tail) {
spin_unlock_irqrestore(&qp->r_rq.lock, flags); spin_unlock_irqrestore(&qp->r_rq.lock, flags);
*bad_wr = wr; *bad_wr = wr;
ret = -ENOMEM; ret = -ENOMEM;
goto bail; goto bail;
} }
wqe = get_rwqe_ptr(&qp->r_rq, qp->r_rq.head); wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
wqe->wr_id = wr->wr_id; wqe->wr_id = wr->wr_id;
wqe->sg_list[0].mr = NULL; wqe->num_sge = wr->num_sge;
wqe->sg_list[0].vaddr = NULL; for (i = 0; i < wr->num_sge; i++)
wqe->sg_list[0].length = 0; wqe->sg_list[i] = wr->sg_list[i];
wqe->sg_list[0].sge_length = 0; wq->head = next;
wqe->length = 0;
for (i = 0, j = 0; i < wr->num_sge; i++) {
/* Check LKEY */
if (to_ipd(qp->ibqp.pd)->user &&
wr->sg_list[i].lkey == 0) {
spin_unlock_irqrestore(&qp->r_rq.lock,
flags);
*bad_wr = wr;
ret = -EINVAL;
goto bail;
}
if (wr->sg_list[i].length == 0)
continue;
if (!ipath_lkey_ok(
&to_idev(qp->ibqp.device)->lk_table,
&wqe->sg_list[j], &wr->sg_list[i],
IB_ACCESS_LOCAL_WRITE)) {
spin_unlock_irqrestore(&qp->r_rq.lock,
flags);
*bad_wr = wr;
ret = -EINVAL;
goto bail;
}
wqe->length += wr->sg_list[i].length;
j++;
}
wqe->num_sge = j;
qp->r_rq.head = next;
spin_unlock_irqrestore(&qp->r_rq.lock, flags); spin_unlock_irqrestore(&qp->r_rq.lock, flags);
} }
ret = 0; ret = 0;
...@@ -1137,6 +1110,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) ...@@ -1137,6 +1110,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
dev->attach_mcast = ipath_multicast_attach; dev->attach_mcast = ipath_multicast_attach;
dev->detach_mcast = ipath_multicast_detach; dev->detach_mcast = ipath_multicast_detach;
dev->process_mad = ipath_process_mad; dev->process_mad = ipath_process_mad;
dev->mmap = ipath_mmap;
snprintf(dev->node_desc, sizeof(dev->node_desc), snprintf(dev->node_desc, sizeof(dev->node_desc),
IPATH_IDSTR " %s kernel_SMA", system_utsname.nodename); IPATH_IDSTR " %s kernel_SMA", system_utsname.nodename);
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/kref.h>
#include <rdma/ib_pack.h> #include <rdma/ib_pack.h>
#include "ipath_layer.h" #include "ipath_layer.h"
...@@ -50,7 +51,7 @@ ...@@ -50,7 +51,7 @@
* Increment this value if any changes that break userspace ABI * Increment this value if any changes that break userspace ABI
* compatibility are made. * compatibility are made.
*/ */
#define IPATH_UVERBS_ABI_VERSION 1 #define IPATH_UVERBS_ABI_VERSION 2
/* /*
* Define an ib_cq_notify value that is not valid so we know when CQ * Define an ib_cq_notify value that is not valid so we know when CQ
...@@ -178,58 +179,41 @@ struct ipath_ah { ...@@ -178,58 +179,41 @@ struct ipath_ah {
}; };
/* /*
* Quick description of our CQ/QP locking scheme: * This structure is used by ipath_mmap() to validate an offset
* * when an mmap() request is made. The vm_area_struct then uses
* We have one global lock that protects dev->cq/qp_table. Each * this as its vm_private_data.
* struct ipath_cq/qp also has its own lock. An individual qp lock
* may be taken inside of an individual cq lock. Both cqs attached to
* a qp may be locked, with the send cq locked first. No other
* nesting should be done.
*
* Each struct ipath_cq/qp also has an atomic_t ref count. The
* pointer from the cq/qp_table to the struct counts as one reference.
* This reference also is good for access through the consumer API, so
* modifying the CQ/QP etc doesn't need to take another reference.
* Access because of a completion being polled does need a reference.
*
* Finally, each struct ipath_cq/qp has a wait_queue_head_t for the
* destroy function to sleep on.
*
* This means that access from the consumer API requires nothing but
* taking the struct's lock.
*
* Access because of a completion event should go as follows:
* - lock cq/qp_table and look up struct
* - increment ref count in struct
* - drop cq/qp_table lock
* - lock struct, do your thing, and unlock struct
* - decrement ref count; if zero, wake up waiters
*
* To destroy a CQ/QP, we can do the following:
* - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
* - decrement ref count
* - wait_event until ref count is zero
*
* It is the consumer's responsibilty to make sure that no QP
* operations (WQE posting or state modification) are pending when the
* QP is destroyed. Also, the consumer must make sure that calls to
* qp_modify are serialized.
*
* Possible optimizations (wait for profile data to see if/where we
* have locks bouncing between CPUs):
* - split cq/qp table lock into n separate (cache-aligned) locks,
* indexed (say) by the page in the table
*/ */
struct ipath_mmap_info {
struct ipath_mmap_info *next;
struct ib_ucontext *context;
void *obj;
struct kref ref;
unsigned size;
unsigned mmap_cnt;
};
/*
* This structure is used to contain the head pointer, tail pointer,
* and completion queue entries as a single memory allocation so
* it can be mmap'ed into user space.
*/
struct ipath_cq_wc {
u32 head; /* index of next entry to fill */
u32 tail; /* index of next ib_poll_cq() entry */
struct ib_wc queue[1]; /* this is actually size ibcq.cqe + 1 */
};
/*
* The completion queue structure.
*/
struct ipath_cq { struct ipath_cq {
struct ib_cq ibcq; struct ib_cq ibcq;
struct tasklet_struct comptask; struct tasklet_struct comptask;
spinlock_t lock; spinlock_t lock;
u8 notify; u8 notify;
u8 triggered; u8 triggered;
u32 head; /* new records added to the head */ struct ipath_cq_wc *queue;
u32 tail; /* poll_cq() reads from here. */ struct ipath_mmap_info *ip;
struct ib_wc *queue; /* this is actually ibcq.cqe + 1 */
}; };
/* /*
...@@ -248,28 +232,40 @@ struct ipath_swqe { ...@@ -248,28 +232,40 @@ struct ipath_swqe {
/* /*
* Receive work request queue entry. * Receive work request queue entry.
* The size of the sg_list is determined when the QP is created and stored * The size of the sg_list is determined when the QP (or SRQ) is created
* in qp->r_max_sge. * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
*/ */
struct ipath_rwqe { struct ipath_rwqe {
u64 wr_id; u64 wr_id;
u32 length; /* total length of data in sg_list */
u8 num_sge; u8 num_sge;
struct ipath_sge sg_list[0]; struct ib_sge sg_list[0];
}; };
struct ipath_rq { /*
spinlock_t lock; * This structure is used to contain the head pointer, tail pointer,
* and receive work queue entries as a single memory allocation so
* it can be mmap'ed into user space.
* Note that the wq array elements are variable size so you can't
* just index into the array to get the N'th element;
* use get_rwqe_ptr() instead.
*/
struct ipath_rwq {
u32 head; /* new work requests posted to the head */ u32 head; /* new work requests posted to the head */
u32 tail; /* receives pull requests from here. */ u32 tail; /* receives pull requests from here. */
struct ipath_rwqe wq[0];
};
struct ipath_rq {
struct ipath_rwq *wq;
spinlock_t lock;
u32 size; /* size of RWQE array */ u32 size; /* size of RWQE array */
u8 max_sge; u8 max_sge;
struct ipath_rwqe *wq; /* RWQE array */
}; };
struct ipath_srq { struct ipath_srq {
struct ib_srq ibsrq; struct ib_srq ibsrq;
struct ipath_rq rq; struct ipath_rq rq;
struct ipath_mmap_info *ip;
/* send signal when number of RWQEs < limit */ /* send signal when number of RWQEs < limit */
u32 limit; u32 limit;
}; };
...@@ -293,6 +289,7 @@ struct ipath_qp { ...@@ -293,6 +289,7 @@ struct ipath_qp {
atomic_t refcount; atomic_t refcount;
wait_queue_head_t wait; wait_queue_head_t wait;
struct tasklet_struct s_task; struct tasklet_struct s_task;
struct ipath_mmap_info *ip;
struct ipath_sge_state *s_cur_sge; struct ipath_sge_state *s_cur_sge;
struct ipath_sge_state s_sge; /* current send request data */ struct ipath_sge_state s_sge; /* current send request data */
/* current RDMA read send data */ /* current RDMA read send data */
...@@ -346,6 +343,7 @@ struct ipath_qp { ...@@ -346,6 +343,7 @@ struct ipath_qp {
u32 s_lsn; /* limit sequence number (credit) */ u32 s_lsn; /* limit sequence number (credit) */
struct ipath_swqe *s_wq; /* send work queue */ struct ipath_swqe *s_wq; /* send work queue */
struct ipath_rq r_rq; /* receive work queue */ struct ipath_rq r_rq; /* receive work queue */
struct ipath_sge r_sg_list[0]; /* verified SGEs */
}; };
/* /*
...@@ -369,15 +367,15 @@ static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp, ...@@ -369,15 +367,15 @@ static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
/* /*
* Since struct ipath_rwqe is not a fixed size, we can't simply index into * Since struct ipath_rwqe is not a fixed size, we can't simply index into
* struct ipath_rq.wq. This function does the array index computation. * struct ipath_rwq.wq. This function does the array index computation.
*/ */
static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq, static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
unsigned n) unsigned n)
{ {
return (struct ipath_rwqe *) return (struct ipath_rwqe *)
((char *) rq->wq + ((char *) rq->wq->wq +
(sizeof(struct ipath_rwqe) + (sizeof(struct ipath_rwqe) +
rq->max_sge * sizeof(struct ipath_sge)) * n); rq->max_sge * sizeof(struct ib_sge)) * n);
} }
/* /*
...@@ -417,6 +415,7 @@ struct ipath_ibdev { ...@@ -417,6 +415,7 @@ struct ipath_ibdev {
struct ib_device ibdev; struct ib_device ibdev;
struct list_head dev_list; struct list_head dev_list;
struct ipath_devdata *dd; struct ipath_devdata *dd;
struct ipath_mmap_info *pending_mmaps;
int ib_unit; /* This is the device number */ int ib_unit; /* This is the device number */
u16 sm_lid; /* in host order */ u16 sm_lid; /* in host order */
u8 sm_sl; u8 sm_sl;
...@@ -681,6 +680,10 @@ int ipath_unmap_fmr(struct list_head *fmr_list); ...@@ -681,6 +680,10 @@ int ipath_unmap_fmr(struct list_head *fmr_list);
int ipath_dealloc_fmr(struct ib_fmr *ibfmr); int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
void ipath_release_mmap_info(struct kref *ref);
int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev); void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);
void ipath_insert_rnr_queue(struct ipath_qp *qp); void ipath_insert_rnr_queue(struct ipath_qp *qp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment