Commit cf504632 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  mlx4_core: Don't double-free IRQs when falling back from MSI-X to INTx
  IB/mthca: Don't double-free IRQs when falling back from MSI-X to INTx
  IB/mlx4: Add strong ordering to local inval and fast reg work requests
  IB/ehca: Remove superfluous bitmasks from QP control block
  RDMA/cxgb3: Limit fast register size based on T3 limitations
  RDMA/cxgb3: Report correct port state and MTU
  mlx4_core: Add module parameter for number of MTTs per segment
  IB/mthca: Add module parameter for number of MTTs per segment
  RDMA/nes: Fix off-by-one bugs in reset_adapter_ne020() and init_serdes()
  infiniband: Remove void casts
  IB/ehca: Increment version number
  IB/ehca: Remove unnecessary memory operations for userspace queue pairs
  IB/ehca: Fall back to vmalloc() for big allocations
  IB/ehca: Replace vmalloc() with kmalloc() for queue allocation
parents ae937deb 8d34ff34
...@@ -133,7 +133,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev, ...@@ -133,7 +133,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
struct c2_qp *qp; struct c2_qp *qp;
int is_recv = 0; int is_recv = 0;
ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); ce = c2_mq_consume(&cq->mq);
if (!ce) { if (!ce) {
return -EAGAIN; return -EAGAIN;
} }
...@@ -146,7 +146,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev, ...@@ -146,7 +146,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
while ((qp = while ((qp =
(struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) { (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
c2_mq_free(&cq->mq); c2_mq_free(&cq->mq);
ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); ce = c2_mq_consume(&cq->mq);
if (!ce) if (!ce)
return -EAGAIN; return -EAGAIN;
} }
......
...@@ -176,7 +176,7 @@ struct t3_send_wr { ...@@ -176,7 +176,7 @@ struct t3_send_wr {
struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */ struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
}; };
#define T3_MAX_FASTREG_DEPTH 24 #define T3_MAX_FASTREG_DEPTH 10
#define T3_MAX_FASTREG_FRAG 10 #define T3_MAX_FASTREG_FRAG 10
struct t3_fastreg_wr { struct t3_fastreg_wr {
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/inetdevice.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -1152,12 +1153,39 @@ static int iwch_query_device(struct ib_device *ibdev, ...@@ -1152,12 +1153,39 @@ static int iwch_query_device(struct ib_device *ibdev,
static int iwch_query_port(struct ib_device *ibdev, static int iwch_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props) u8 port, struct ib_port_attr *props)
{ {
struct iwch_dev *dev;
struct net_device *netdev;
struct in_device *inetdev;
PDBG("%s ibdev %p\n", __func__, ibdev); PDBG("%s ibdev %p\n", __func__, ibdev);
dev = to_iwch_dev(ibdev);
netdev = dev->rdev.port_info.lldevs[port-1];
memset(props, 0, sizeof(struct ib_port_attr)); memset(props, 0, sizeof(struct ib_port_attr));
props->max_mtu = IB_MTU_4096; props->max_mtu = IB_MTU_4096;
if (netdev->mtu >= 4096)
props->active_mtu = IB_MTU_4096;
else if (netdev->mtu >= 2048)
props->active_mtu = IB_MTU_2048; props->active_mtu = IB_MTU_2048;
else if (netdev->mtu >= 1024)
props->active_mtu = IB_MTU_1024;
else if (netdev->mtu >= 512)
props->active_mtu = IB_MTU_512;
else
props->active_mtu = IB_MTU_256;
if (!netif_carrier_ok(netdev))
props->state = IB_PORT_DOWN;
else {
inetdev = in_dev_get(netdev);
if (inetdev->ifa_list)
props->state = IB_PORT_ACTIVE; props->state = IB_PORT_ACTIVE;
else
props->state = IB_PORT_INIT;
in_dev_put(inetdev);
}
props->port_cap_flags = props->port_cap_flags =
IB_PORT_CM_SUP | IB_PORT_CM_SUP |
IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_SNMP_TUNNEL_SUP |
......
...@@ -165,7 +165,6 @@ struct hcp_modify_qp_control_block { ...@@ -165,7 +165,6 @@ struct hcp_modify_qp_control_block {
#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7) #define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7)
#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8) #define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8)
#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9) #define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9)
#define MQPCB_QP_STATE EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11) #define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11)
#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12) #define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12)
#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13) #define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13)
...@@ -176,60 +175,33 @@ struct hcp_modify_qp_control_block { ...@@ -176,60 +175,33 @@ struct hcp_modify_qp_control_block {
#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18) #define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18)
#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19) #define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19)
#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20) #define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20)
#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21) #define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21)
#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22) #define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22)
#define MQPCB_DLID EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23) #define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23)
#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29, 31)
#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24) #define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24)
#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25, 31)
#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25) #define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25)
#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26) #define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26)
#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27) #define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27)
#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28) #define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28)
#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12, 31)
#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30) #define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30)
#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31) #define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31)
#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28, 31)
#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32) #define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32)
#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31, 31)
#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33) #define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33)
#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34) #define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34)
#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27, 31)
#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35) #define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35)
#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36) #define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36)
#define MQPCB_DLID_AL EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37) #define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37)
#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38) #define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38)
#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25, 31)
#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39) #define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39)
#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40) #define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40)
#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41) #define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41)
#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42) #define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42)
#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12, 31)
#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44) #define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44)
#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45) #define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45)
#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46) #define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46)
#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47) #define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47)
#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31, 31)
#define MQPCB_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48) #define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48)
#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31, 31)
#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49) #define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49)
#define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50) #define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50)
#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51) #define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51)
......
...@@ -479,13 +479,13 @@ void ehca_tasklet_neq(unsigned long data) ...@@ -479,13 +479,13 @@ void ehca_tasklet_neq(unsigned long data)
struct ehca_eqe *eqe; struct ehca_eqe *eqe;
u64 ret; u64 ret;
eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); eqe = ehca_poll_eq(shca, &shca->neq);
while (eqe) { while (eqe) {
if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry)) if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
parse_ec(shca, eqe->entry); parse_ec(shca, eqe->entry);
eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); eqe = ehca_poll_eq(shca, &shca->neq);
} }
ret = hipz_h_reset_event(shca->ipz_hca_handle, ret = hipz_h_reset_event(shca->ipz_hca_handle,
...@@ -572,8 +572,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) ...@@ -572,8 +572,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
eqe_cnt = 0; eqe_cnt = 0;
do { do {
u32 token; u32 token;
eqe_cache[eqe_cnt].eqe = eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
(struct ehca_eqe *)ehca_poll_eq(shca, eq);
if (!eqe_cache[eqe_cnt].eqe) if (!eqe_cache[eqe_cnt].eqe)
break; break;
eqe_value = eqe_cache[eqe_cnt].eqe->entry; eqe_value = eqe_cache[eqe_cnt].eqe->entry;
...@@ -637,7 +636,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) ...@@ -637,7 +636,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
goto unlock_irq_spinlock; goto unlock_irq_spinlock;
do { do {
struct ehca_eqe *eqe; struct ehca_eqe *eqe;
eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); eqe = ehca_poll_eq(shca, &shca->eq);
if (!eqe) if (!eqe)
break; break;
process_eqe(shca, eqe); process_eqe(shca, eqe);
......
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
#include "ehca_tools.h" #include "ehca_tools.h"
#include "hcp_if.h" #include "hcp_if.h"
#define HCAD_VERSION "0026" #define HCAD_VERSION "0027"
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
......
...@@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp( ...@@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp(
ib_device); ib_device);
struct ib_ucontext *context = NULL; struct ib_ucontext *context = NULL;
u64 h_ret; u64 h_ret;
int is_llqp = 0, has_srq = 0; int is_llqp = 0, has_srq = 0, is_user = 0;
int qp_type, max_send_sge, max_recv_sge, ret; int qp_type, max_send_sge, max_recv_sge, ret;
/* h_call's out parameters */ /* h_call's out parameters */
...@@ -609,9 +609,6 @@ static struct ehca_qp *internal_create_qp( ...@@ -609,9 +609,6 @@ static struct ehca_qp *internal_create_qp(
} }
} }
if (pd->uobject && udata)
context = pd->uobject->context;
my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
if (!my_qp) { if (!my_qp) {
ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
...@@ -619,6 +616,11 @@ static struct ehca_qp *internal_create_qp( ...@@ -619,6 +616,11 @@ static struct ehca_qp *internal_create_qp(
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
if (pd->uobject && udata) {
is_user = 1;
context = pd->uobject->context;
}
atomic_set(&my_qp->nr_events, 0); atomic_set(&my_qp->nr_events, 0);
init_waitqueue_head(&my_qp->wait_completion); init_waitqueue_head(&my_qp->wait_completion);
spin_lock_init(&my_qp->spinlock_s); spin_lock_init(&my_qp->spinlock_s);
...@@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp( ...@@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp(
(parms.squeue.is_small || parms.rqueue.is_small); (parms.squeue.is_small || parms.rqueue.is_small);
} }
h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
if (h_ret != H_SUCCESS) { if (h_ret != H_SUCCESS) {
ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
h_ret); h_ret);
...@@ -769,6 +771,7 @@ static struct ehca_qp *internal_create_qp( ...@@ -769,6 +771,7 @@ static struct ehca_qp *internal_create_qp(
goto create_qp_exit2; goto create_qp_exit2;
} }
if (!is_user) {
my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
my_qp->ipz_squeue.qe_size; my_qp->ipz_squeue.qe_size;
my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
...@@ -782,6 +785,7 @@ static struct ehca_qp *internal_create_qp( ...@@ -782,6 +785,7 @@ static struct ehca_qp *internal_create_qp(
/* to avoid the generation of bogus flush CQEs */ /* to avoid the generation of bogus flush CQEs */
reset_queue_map(&my_qp->sq_map); reset_queue_map(&my_qp->sq_map);
} }
}
if (HAS_RQ(my_qp)) { if (HAS_RQ(my_qp)) {
ret = init_qp_queue( ret = init_qp_queue(
...@@ -792,7 +796,7 @@ static struct ehca_qp *internal_create_qp( ...@@ -792,7 +796,7 @@ static struct ehca_qp *internal_create_qp(
"and pages ret=%i", ret); "and pages ret=%i", ret);
goto create_qp_exit4; goto create_qp_exit4;
} }
if (!is_user) {
my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
my_qp->ipz_rqueue.qe_size; my_qp->ipz_rqueue.qe_size;
my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
...@@ -805,7 +809,8 @@ static struct ehca_qp *internal_create_qp( ...@@ -805,7 +809,8 @@ static struct ehca_qp *internal_create_qp(
INIT_LIST_HEAD(&my_qp->rq_err_node); INIT_LIST_HEAD(&my_qp->rq_err_node);
/* to avoid the generation of bogus flush CQEs */ /* to avoid the generation of bogus flush CQEs */
reset_queue_map(&my_qp->rq_map); reset_queue_map(&my_qp->rq_map);
} else if (init_attr->srq) { }
} else if (init_attr->srq && !is_user) {
/* this is a base QP, use the queue map of the SRQ */ /* this is a base QP, use the queue map of the SRQ */
my_qp->rq_map = my_srq->rq_map; my_qp->rq_map = my_srq->rq_map;
INIT_LIST_HEAD(&my_qp->rq_err_node); INIT_LIST_HEAD(&my_qp->rq_err_node);
...@@ -918,7 +923,7 @@ create_qp_exit7: ...@@ -918,7 +923,7 @@ create_qp_exit7:
kfree(my_qp->mod_qp_parm); kfree(my_qp->mod_qp_parm);
create_qp_exit6: create_qp_exit6:
if (HAS_RQ(my_qp)) if (HAS_RQ(my_qp) && !is_user)
vfree(my_qp->rq_map.map); vfree(my_qp->rq_map.map);
create_qp_exit5: create_qp_exit5:
...@@ -926,7 +931,7 @@ create_qp_exit5: ...@@ -926,7 +931,7 @@ create_qp_exit5:
ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
create_qp_exit4: create_qp_exit4:
if (HAS_SQ(my_qp)) if (HAS_SQ(my_qp) && !is_user)
vfree(my_qp->sq_map.map); vfree(my_qp->sq_map.map);
create_qp_exit3: create_qp_exit3:
...@@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, ...@@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
u64 update_mask; u64 update_mask;
u64 h_ret; u64 h_ret;
int bad_wqe_cnt = 0; int bad_wqe_cnt = 0;
int is_user = 0;
int squeue_locked = 0; int squeue_locked = 0;
unsigned long flags = 0; unsigned long flags = 0;
...@@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, ...@@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
ret = ehca2ib_return_code(h_ret); ret = ehca2ib_return_code(h_ret);
goto modify_qp_exit1; goto modify_qp_exit1;
} }
if (ibqp->uobject)
is_user = 1;
qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
...@@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, ...@@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
goto modify_qp_exit2; goto modify_qp_exit2;
} }
} }
if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) { if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
&& !is_user) {
ret = check_for_left_cqes(my_qp, shca); ret = check_for_left_cqes(my_qp, shca);
if (ret) if (ret)
goto modify_qp_exit2; goto modify_qp_exit2;
...@@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp, ...@@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp,
ipz_qeit_reset(&my_qp->ipz_rqueue); ipz_qeit_reset(&my_qp->ipz_rqueue);
ipz_qeit_reset(&my_qp->ipz_squeue); ipz_qeit_reset(&my_qp->ipz_squeue);
if (qp_cur_state == IB_QPS_ERR) { if (qp_cur_state == IB_QPS_ERR && !is_user) {
del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
if (HAS_RQ(my_qp)) if (HAS_RQ(my_qp))
del_from_err_list(my_qp->recv_cq, del_from_err_list(my_qp->recv_cq,
&my_qp->rq_err_node); &my_qp->rq_err_node);
} }
if (!is_user)
reset_queue_map(&my_qp->sq_map); reset_queue_map(&my_qp->sq_map);
if (HAS_RQ(my_qp)) if (HAS_RQ(my_qp) && !is_user)
reset_queue_map(&my_qp->rq_map); reset_queue_map(&my_qp->rq_map);
} }
...@@ -1952,19 +1962,13 @@ int ehca_query_qp(struct ib_qp *qp, ...@@ -1952,19 +1962,13 @@ int ehca_query_qp(struct ib_qp *qp,
qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size; qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
qp_attr->dest_qp_num = qpcb->dest_qp_nr; qp_attr->dest_qp_num = qpcb->dest_qp_nr;
qp_attr->pkey_index = qp_attr->pkey_index = qpcb->prim_p_key_idx;
EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx); qp_attr->port_num = qpcb->prim_phys_port;
qp_attr->port_num =
EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port);
qp_attr->timeout = qpcb->timeout; qp_attr->timeout = qpcb->timeout;
qp_attr->retry_cnt = qpcb->retry_count; qp_attr->retry_cnt = qpcb->retry_count;
qp_attr->rnr_retry = qpcb->rnr_retry_count; qp_attr->rnr_retry = qpcb->rnr_retry_count;
qp_attr->alt_pkey_index = qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx);
qp_attr->alt_port_num = qpcb->alt_phys_port; qp_attr->alt_port_num = qpcb->alt_phys_port;
qp_attr->alt_timeout = qpcb->timeout_al; qp_attr->alt_timeout = qpcb->timeout_al;
...@@ -2051,8 +2055,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, ...@@ -2051,8 +2055,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
update_mask |= update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1) EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
| EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1); | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
mqpcb->curr_srq_limit = mqpcb->curr_srq_limit = attr->srq_limit;
EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT, attr->srq_limit);
mqpcb->qp_aff_asyn_ev_log_reg = mqpcb->qp_aff_asyn_ev_log_reg =
EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1); EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
} }
...@@ -2115,8 +2118,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) ...@@ -2115,8 +2118,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1; srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
srq_attr->max_sge = 3; srq_attr->max_sge = 3;
srq_attr->srq_limit = EHCA_BMASK_GET( srq_attr->srq_limit = qpcb->curr_srq_limit;
MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
if (ehca_debug_level >= 2) if (ehca_debug_level >= 2)
ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
...@@ -2138,10 +2140,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, ...@@ -2138,10 +2140,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
int ret; int ret;
u64 h_ret; u64 h_ret;
u8 port_num; u8 port_num;
int is_user = 0;
enum ib_qp_type qp_type; enum ib_qp_type qp_type;
unsigned long flags; unsigned long flags;
if (uobject) { if (uobject) {
is_user = 1;
if (my_qp->mm_count_galpa || if (my_qp->mm_count_galpa ||
my_qp->mm_count_rqueue || my_qp->mm_count_squeue) { my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
ehca_err(dev, "Resources still referenced in " ehca_err(dev, "Resources still referenced in "
...@@ -2168,10 +2172,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, ...@@ -2168,10 +2172,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
* SRQs will never get into an error list and do not have a recv_cq, * SRQs will never get into an error list and do not have a recv_cq,
* so we need to skip them here. * so we need to skip them here.
*/ */
if (HAS_RQ(my_qp) && !IS_SRQ(my_qp)) if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
if (HAS_SQ(my_qp)) if (HAS_SQ(my_qp) && !is_user)
del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
/* now wait until all pending events have completed */ /* now wait until all pending events have completed */
...@@ -2209,12 +2213,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, ...@@ -2209,12 +2213,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
if (HAS_RQ(my_qp)) { if (HAS_RQ(my_qp)) {
ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
if (!is_user)
vfree(my_qp->rq_map.map); vfree(my_qp->rq_map.map);
} }
if (HAS_SQ(my_qp)) { if (HAS_SQ(my_qp)) {
ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
if (!is_user)
vfree(my_qp->sq_map.map); vfree(my_qp->sq_map.map);
} }
kmem_cache_free(qp_cache, my_qp); kmem_cache_free(qp_cache, my_qp);
......
...@@ -284,7 +284,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, ...@@ -284,7 +284,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
param->act_pages = (u32)outs[4]; param->act_pages = (u32)outs[4];
if (ret == H_SUCCESS) if (ret == H_SUCCESS)
hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]); hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
if (ret == H_NOT_ENOUGH_RESOURCES) if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret); ehca_gen_err("Not enough resources. ret=%lli", ret);
...@@ -293,7 +293,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, ...@@ -293,7 +293,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
} }
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_alloc_qp_parms *parms) struct ehca_alloc_qp_parms *parms, int is_user)
{ {
u64 ret; u64 ret;
u64 allocate_controls, max_r10_reg, r11, r12; u64 allocate_controls, max_r10_reg, r11, r12;
...@@ -359,7 +359,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, ...@@ -359,7 +359,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
(u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
if (ret == H_SUCCESS) if (ret == H_SUCCESS)
hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]); hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
if (ret == H_NOT_ENOUGH_RESOURCES) if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret); ehca_gen_err("Not enough resources. ret=%lli", ret);
......
...@@ -78,7 +78,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, ...@@ -78,7 +78,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
* initialize resources, create empty QPPTs (2 rings). * initialize resources, create empty QPPTs (2 rings).
*/ */
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_alloc_qp_parms *parms); struct ehca_alloc_qp_parms *parms, int is_user);
u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
const u8 port_id, const u8 port_id,
......
...@@ -54,12 +54,15 @@ int hcall_unmap_page(u64 mapaddr) ...@@ -54,12 +54,15 @@ int hcall_unmap_page(u64 mapaddr)
return 0; return 0;
} }
int hcp_galpas_ctor(struct h_galpas *galpas, int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
u64 paddr_kernel, u64 paddr_user) u64 paddr_kernel, u64 paddr_user)
{ {
if (!is_user) {
int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle); int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle);
if (ret) if (ret)
return ret; return ret;
} else
galpas->kernel.fw_handle = 0;
galpas->user.fw_handle = paddr_user; galpas->user.fw_handle = paddr_user;
......
...@@ -78,7 +78,7 @@ static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value) ...@@ -78,7 +78,7 @@ static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
*(volatile u64 __force *)addr = value; *(volatile u64 __force *)addr = value;
} }
int hcp_galpas_ctor(struct h_galpas *galpas, int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
u64 paddr_kernel, u64 paddr_user); u64 paddr_kernel, u64 paddr_user);
int hcp_galpas_dtor(struct h_galpas *galpas); int hcp_galpas_dtor(struct h_galpas *galpas);
......
...@@ -220,11 +220,14 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, ...@@ -220,11 +220,14 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
queue->small_page = NULL; queue->small_page = NULL;
/* allocate queue page pointers */ /* allocate queue page pointers */
queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
if (!queue->queue_pages) {
queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *)); queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
if (!queue->queue_pages) { if (!queue->queue_pages) {
ehca_gen_err("Couldn't allocate queue page list"); ehca_gen_err("Couldn't allocate queue page list");
return 0; return 0;
} }
}
memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *)); memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
/* allocate actual queue pages */ /* allocate actual queue pages */
...@@ -240,7 +243,10 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, ...@@ -240,7 +243,10 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
ipz_queue_ctor_exit0: ipz_queue_ctor_exit0:
ehca_gen_err("Couldn't alloc pages queue=%p " ehca_gen_err("Couldn't alloc pages queue=%p "
"nr_of_pages=%x", queue, nr_of_pages); "nr_of_pages=%x", queue, nr_of_pages);
if (is_vmalloc_addr(queue->queue_pages))
vfree(queue->queue_pages); vfree(queue->queue_pages);
else
kfree(queue->queue_pages);
return 0; return 0;
} }
...@@ -262,7 +268,10 @@ int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue) ...@@ -262,7 +268,10 @@ int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
free_page((unsigned long)queue->queue_pages[i]); free_page((unsigned long)queue->queue_pages[i]);
} }
if (is_vmalloc_addr(queue->queue_pages))
vfree(queue->queue_pages); vfree(queue->queue_pages);
else
kfree(queue->queue_pages);
return 1; return 1;
} }
......
...@@ -1585,12 +1585,16 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1585,12 +1585,16 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break; break;
case IB_WR_LOCAL_INV: case IB_WR_LOCAL_INV:
ctrl->srcrb_flags |=
cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
set_local_inv_seg(wqe, wr->ex.invalidate_rkey); set_local_inv_seg(wqe, wr->ex.invalidate_rkey);
wqe += sizeof (struct mlx4_wqe_local_inval_seg); wqe += sizeof (struct mlx4_wqe_local_inval_seg);
size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; size += sizeof (struct mlx4_wqe_local_inval_seg) / 16;
break; break;
case IB_WR_FAST_REG_MR: case IB_WR_FAST_REG_MR:
ctrl->srcrb_flags |=
cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
set_fmr_seg(wqe, wr); set_fmr_seg(wqe, wr);
wqe += sizeof (struct mlx4_wqe_fmr_seg); wqe += sizeof (struct mlx4_wqe_fmr_seg);
size += sizeof (struct mlx4_wqe_fmr_seg) / 16; size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
......
...@@ -1059,7 +1059,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, ...@@ -1059,7 +1059,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET);
if (mthca_is_memfree(dev)) if (mthca_is_memfree(dev))
dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64), dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64),
MTHCA_MTT_SEG_SIZE) / MTHCA_MTT_SEG_SIZE; dev->limits.mtt_seg_size) / dev->limits.mtt_seg_size;
else else
dev_lim->reserved_mtts = 1 << (field >> 4); dev_lim->reserved_mtts = 1 << (field >> 4);
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET);
......
...@@ -159,6 +159,7 @@ struct mthca_limits { ...@@ -159,6 +159,7 @@ struct mthca_limits {
int reserved_eqs; int reserved_eqs;
int num_mpts; int num_mpts;
int num_mtt_segs; int num_mtt_segs;
int mtt_seg_size;
int fmr_reserved_mtts; int fmr_reserved_mtts;
int reserved_mtts; int reserved_mtts;
int reserved_mrws; int reserved_mrws;
......
...@@ -641,9 +641,11 @@ static void mthca_free_irqs(struct mthca_dev *dev) ...@@ -641,9 +641,11 @@ static void mthca_free_irqs(struct mthca_dev *dev)
if (dev->eq_table.have_irq) if (dev->eq_table.have_irq)
free_irq(dev->pdev->irq, dev); free_irq(dev->pdev->irq, dev);
for (i = 0; i < MTHCA_NUM_EQ; ++i) for (i = 0; i < MTHCA_NUM_EQ; ++i)
if (dev->eq_table.eq[i].have_irq) if (dev->eq_table.eq[i].have_irq) {
free_irq(dev->eq_table.eq[i].msi_x_vector, free_irq(dev->eq_table.eq[i].msi_x_vector,
dev->eq_table.eq + i); dev->eq_table.eq + i);
dev->eq_table.eq[i].have_irq = 0;
}
} }
static int mthca_map_reg(struct mthca_dev *dev, static int mthca_map_reg(struct mthca_dev *dev,
......
...@@ -125,6 +125,10 @@ module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444); ...@@ -125,6 +125,10 @@ module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
MODULE_PARM_DESC(fmr_reserved_mtts, MODULE_PARM_DESC(fmr_reserved_mtts,
"number of memory translation table segments reserved for FMR"); "number of memory translation table segments reserved for FMR");
static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
static char mthca_version[] __devinitdata = static char mthca_version[] __devinitdata =
DRV_NAME ": Mellanox InfiniBand HCA driver v" DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n"; DRV_VERSION " (" DRV_RELDATE ")\n";
...@@ -162,6 +166,7 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) ...@@ -162,6 +166,7 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
int err; int err;
u8 status; u8 status;
mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status); err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
if (err) { if (err) {
mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n"); mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
...@@ -460,11 +465,11 @@ static int mthca_init_icm(struct mthca_dev *mdev, ...@@ -460,11 +465,11 @@ static int mthca_init_icm(struct mthca_dev *mdev,
} }
/* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */ /* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */
mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * MTHCA_MTT_SEG_SIZE, mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size,
dma_get_cache_alignment()) / MTHCA_MTT_SEG_SIZE; dma_get_cache_alignment()) / mdev->limits.mtt_seg_size;
mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base, mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
MTHCA_MTT_SEG_SIZE, mdev->limits.mtt_seg_size,
mdev->limits.num_mtt_segs, mdev->limits.num_mtt_segs,
mdev->limits.reserved_mtts, mdev->limits.reserved_mtts,
1, 0); 1, 0);
...@@ -1315,6 +1320,12 @@ static void __init mthca_validate_profile(void) ...@@ -1315,6 +1320,12 @@ static void __init mthca_validate_profile(void)
printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n", printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n",
hca_profile.fmr_reserved_mtts); hca_profile.fmr_reserved_mtts);
} }
if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n",
log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8));
log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
}
} }
static int __init mthca_init(void) static int __init mthca_init(void)
......
...@@ -220,7 +220,7 @@ static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size, ...@@ -220,7 +220,7 @@ static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
mtt->buddy = buddy; mtt->buddy = buddy;
mtt->order = 0; mtt->order = 0;
for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1) for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1)
++mtt->order; ++mtt->order;
mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy); mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
...@@ -267,7 +267,7 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, ...@@ -267,7 +267,7 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
while (list_len > 0) { while (list_len > 0) {
mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base + mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
mtt->first_seg * MTHCA_MTT_SEG_SIZE + mtt->first_seg * dev->limits.mtt_seg_size +
start_index * 8); start_index * 8);
mtt_entry[1] = 0; mtt_entry[1] = 0;
for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i) for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
...@@ -326,7 +326,7 @@ static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, ...@@ -326,7 +326,7 @@ static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev,
u64 __iomem *mtts; u64 __iomem *mtts;
int i; int i;
mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * MTHCA_MTT_SEG_SIZE + mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size +
start_index * sizeof (u64); start_index * sizeof (u64);
for (i = 0; i < list_len; ++i) for (i = 0; i < list_len; ++i)
mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT), mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT),
...@@ -345,10 +345,10 @@ static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, ...@@ -345,10 +345,10 @@ static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
/* For Arbel, all MTTs must fit in the same page. */ /* For Arbel, all MTTs must fit in the same page. */
BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE); BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE);
/* Require full segments */ /* Require full segments */
BUG_ON(s % MTHCA_MTT_SEG_SIZE); BUG_ON(s % dev->limits.mtt_seg_size);
mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg + mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg +
s / MTHCA_MTT_SEG_SIZE, &dma_handle); s / dev->limits.mtt_seg_size, &dma_handle);
BUG_ON(!mtts); BUG_ON(!mtts);
...@@ -479,7 +479,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, ...@@ -479,7 +479,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
if (mr->mtt) if (mr->mtt)
mpt_entry->mtt_seg = mpt_entry->mtt_seg =
cpu_to_be64(dev->mr_table.mtt_base + cpu_to_be64(dev->mr_table.mtt_base +
mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE); mr->mtt->first_seg * dev->limits.mtt_seg_size);
if (0) { if (0) {
mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
...@@ -626,7 +626,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, ...@@ -626,7 +626,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
goto err_out_table; goto err_out_table;
} }
mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE; mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size;
if (mthca_is_memfree(dev)) { if (mthca_is_memfree(dev)) {
mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table, mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
...@@ -908,7 +908,7 @@ int mthca_init_mr_table(struct mthca_dev *dev) ...@@ -908,7 +908,7 @@ int mthca_init_mr_table(struct mthca_dev *dev)
dev->mr_table.mtt_base); dev->mr_table.mtt_base);
dev->mr_table.tavor_fmr.mtt_base = dev->mr_table.tavor_fmr.mtt_base =
ioremap(addr, mtts * MTHCA_MTT_SEG_SIZE); ioremap(addr, mtts * dev->limits.mtt_seg_size);
if (!dev->mr_table.tavor_fmr.mtt_base) { if (!dev->mr_table.tavor_fmr.mtt_base) {
mthca_warn(dev, "MTT ioremap for FMR failed.\n"); mthca_warn(dev, "MTT ioremap for FMR failed.\n");
err = -ENOMEM; err = -ENOMEM;
......
...@@ -94,7 +94,7 @@ s64 mthca_make_profile(struct mthca_dev *dev, ...@@ -94,7 +94,7 @@ s64 mthca_make_profile(struct mthca_dev *dev,
profile[MTHCA_RES_RDB].size = MTHCA_RDB_ENTRY_SIZE; profile[MTHCA_RES_RDB].size = MTHCA_RDB_ENTRY_SIZE;
profile[MTHCA_RES_MCG].size = MTHCA_MGM_ENTRY_SIZE; profile[MTHCA_RES_MCG].size = MTHCA_MGM_ENTRY_SIZE;
profile[MTHCA_RES_MPT].size = dev_lim->mpt_entry_sz; profile[MTHCA_RES_MPT].size = dev_lim->mpt_entry_sz;
profile[MTHCA_RES_MTT].size = MTHCA_MTT_SEG_SIZE; profile[MTHCA_RES_MTT].size = dev->limits.mtt_seg_size;
profile[MTHCA_RES_UAR].size = dev_lim->uar_scratch_entry_sz; profile[MTHCA_RES_UAR].size = dev_lim->uar_scratch_entry_sz;
profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE; profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE;
profile[MTHCA_RES_UARC].size = request->uarc_size; profile[MTHCA_RES_UARC].size = request->uarc_size;
...@@ -232,7 +232,7 @@ s64 mthca_make_profile(struct mthca_dev *dev, ...@@ -232,7 +232,7 @@ s64 mthca_make_profile(struct mthca_dev *dev,
dev->limits.num_mtt_segs = profile[i].num; dev->limits.num_mtt_segs = profile[i].num;
dev->mr_table.mtt_base = profile[i].start; dev->mr_table.mtt_base = profile[i].start;
init_hca->mtt_base = profile[i].start; init_hca->mtt_base = profile[i].start;
init_hca->mtt_seg_sz = ffs(MTHCA_MTT_SEG_SIZE) - 7; init_hca->mtt_seg_sz = ffs(dev->limits.mtt_seg_size) - 7;
break; break;
case MTHCA_RES_UAR: case MTHCA_RES_UAR:
dev->limits.num_uars = profile[i].num; dev->limits.num_uars = profile[i].num;
......
...@@ -667,7 +667,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ ...@@ -667,7 +667,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
i = 0; i = 0;
while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
mdelay(1); mdelay(1);
if (i >= 10000) { if (i > 10000) {
nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n");
return 0; return 0;
} }
...@@ -675,7 +675,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ ...@@ -675,7 +675,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
i = 0; i = 0;
while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000) while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
mdelay(1); mdelay(1);
if (i >= 10000) { if (i > 10000) {
printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n", printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS)); nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
return 0; return 0;
...@@ -701,7 +701,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ ...@@ -701,7 +701,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
i = 0; i = 0;
while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
mdelay(1); mdelay(1);
if (i >= 10000) { if (i > 10000) {
nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n"); nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n");
return 0; return 0;
} }
...@@ -711,7 +711,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ ...@@ -711,7 +711,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
& 0x0000000f)) != 0x0000000f) && i++ < 5000) & 0x0000000f)) != 0x0000000f) && i++ < 5000)
mdelay(1); mdelay(1);
if (i >= 5000) { if (i > 5000) {
nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp); nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp);
return 0; return 0;
} }
...@@ -722,7 +722,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ ...@@ -722,7 +722,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
& 0x0000000f)) != 0x0000000f) && i++ < 5000) & 0x0000000f)) != 0x0000000f) && i++ < 5000)
mdelay(1); mdelay(1);
if (i >= 5000) { if (i > 5000) {
nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp); nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp);
return 0; return 0;
} }
...@@ -792,7 +792,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, ...@@ -792,7 +792,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
& 0x0000000f)) != 0x0000000f) && i++ < 5000) & 0x0000000f)) != 0x0000000f) && i++ < 5000)
mdelay(1); mdelay(1);
if (i >= 5000) { if (i > 5000) {
nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp); nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp);
return 1; return 1;
} }
...@@ -815,7 +815,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, ...@@ -815,7 +815,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
& 0x0000000f)) != 0x0000000f) && (i++ < 5000)) & 0x0000000f)) != 0x0000000f) && (i++ < 5000))
mdelay(1); mdelay(1);
if (i >= 5000) { if (i > 5000) {
printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp); printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp);
/* return 1; */ /* return 1; */
} }
......
...@@ -497,8 +497,10 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) ...@@ -497,8 +497,10 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
if (eq_table->have_irq) if (eq_table->have_irq)
free_irq(dev->pdev->irq, dev); free_irq(dev->pdev->irq, dev);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) if (eq_table->eq[i].have_irq) {
free_irq(eq_table->eq[i].irq, eq_table->eq + i); free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
kfree(eq_table->irq_names); kfree(eq_table->irq_names);
} }
......
...@@ -100,6 +100,10 @@ module_param_named(use_prio, use_prio, bool, 0444); ...@@ -100,6 +100,10 @@ module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
"(0/1, default 0)"); "(0/1, default 0)");
static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
int mlx4_check_port_params(struct mlx4_dev *dev, int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type) enum mlx4_port_type *port_type)
{ {
...@@ -203,12 +207,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -203,12 +207,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_cqes = dev_cap->max_cq_sz - 1; dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
dev->caps.reserved_cqs = dev_cap->reserved_cqs; dev->caps.reserved_cqs = dev_cap->reserved_cqs;
dev->caps.reserved_eqs = dev_cap->reserved_eqs; dev->caps.reserved_eqs = dev_cap->reserved_eqs;
dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
MLX4_MTT_ENTRY_PER_SEG); dev->caps.mtts_per_seg);
dev->caps.reserved_mrws = dev_cap->reserved_mrws; dev->caps.reserved_mrws = dev_cap->reserved_mrws;
dev->caps.reserved_uars = dev_cap->reserved_uars; dev->caps.reserved_uars = dev_cap->reserved_uars;
dev->caps.reserved_pds = dev_cap->reserved_pds; dev->caps.reserved_pds = dev_cap->reserved_pds;
dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
dev->caps.max_msg_sz = dev_cap->max_msg_sz; dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags; dev->caps.flags = dev_cap->flags;
...@@ -1304,6 +1309,11 @@ static int __init mlx4_verify_params(void) ...@@ -1304,6 +1309,11 @@ static int __init mlx4_verify_params(void)
return -1; return -1;
} }
if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
return -1;
}
return 0; return 0;
} }
......
...@@ -209,7 +209,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, ...@@ -209,7 +209,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
} else } else
mtt->page_shift = page_shift; mtt->page_shift = page_shift;
for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1) for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
++mtt->order; ++mtt->order;
mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
...@@ -350,7 +350,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) ...@@ -350,7 +350,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
MLX4_MPT_PD_FLAG_RAE); MLX4_MPT_PD_FLAG_RAE);
mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
MLX4_MTT_ENTRY_PER_SEG); dev->caps.mtts_per_seg);
} else { } else {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
} }
...@@ -391,7 +391,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, ...@@ -391,7 +391,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
(start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
return -EINVAL; return -EINVAL;
if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1)) if (start_index & (dev->caps.mtts_per_seg - 1))
return -EINVAL; return -EINVAL;
mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
......
...@@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, ...@@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE; profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
profile[MLX4_RES_QP].num = request->num_qp; profile[MLX4_RES_QP].num = request->num_qp;
......
...@@ -210,6 +210,7 @@ struct mlx4_caps { ...@@ -210,6 +210,7 @@ struct mlx4_caps {
int num_comp_vectors; int num_comp_vectors;
int num_mpts; int num_mpts;
int num_mtt_segs; int num_mtt_segs;
int mtts_per_seg;
int fmr_reserved_mtts; int fmr_reserved_mtts;
int reserved_mtts; int reserved_mtts;
int reserved_mrws; int reserved_mrws;
......
...@@ -165,6 +165,7 @@ enum { ...@@ -165,6 +165,7 @@ enum {
MLX4_WQE_CTRL_IP_CSUM = 1 << 4, MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
MLX4_WQE_CTRL_INS_VLAN = 1 << 6, MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
}; };
struct mlx4_wqe_ctrl_seg { struct mlx4_wqe_ctrl_seg {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment