Commit 9c96ecfb authored by Stephen Rothwell's avatar Stephen Rothwell

Merge commit 'scsi-post-merge/master'

Conflicts:
	drivers/scsi/Makefile
parents 29ad1035 15addcb9
......@@ -4612,6 +4612,14 @@ F: drivers/ata/
F: include/linux/ata.h
F: include/linux/libata.h
SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
P: Jayamohan Kallickal
M: jayamohank@serverengines.com
L: linux-scsi@vger.kernel.org
W: http://www.serverengines.com
S: Supported
F: drivers/scsi/be2iscsi/
SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER
M: Sathya Perla <sathyap@serverengines.com>
M: Subbu Seetharaman <subbus@serverengines.com>
......
......@@ -45,6 +45,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include <linux/blk-iopoll.h>
#define DRV_NAME "ahci"
#define DRV_VERSION "3.0"
......@@ -2114,7 +2115,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
ata_port_abort(ap);
}
static void ahci_port_intr(struct ata_port *ap)
static int ahci_port_intr(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ata_eh_info *ehi = &ap->link.eh_info;
......@@ -2144,7 +2145,7 @@ static void ahci_port_intr(struct ata_port *ap)
if (unlikely(status & PORT_IRQ_ERROR)) {
ahci_error_intr(ap, status);
return;
return 0;
}
if (status & PORT_IRQ_SDB_FIS) {
......@@ -2185,7 +2186,43 @@ static void ahci_port_intr(struct ata_port *ap)
ehi->err_mask |= AC_ERR_HSM;
ehi->action |= ATA_EH_RESET;
ata_port_freeze(ap);
rc = 0;
}
return rc;
}
static void ap_irq_disable(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
writel(0, port_mmio + PORT_IRQ_MASK);
}
static void ap_irq_enable(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_port_priv *pp = ap->private_data;
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
}
static int ahci_iopoll(struct blk_iopoll *iop, int budget)
{
struct ata_port *ap = container_of(iop, struct ata_port, iopoll);
unsigned long flags;
int ret;
spin_lock_irqsave(&ap->host->lock, flags);
ret = ahci_port_intr(ap);
spin_unlock_irqrestore(&ap->host->lock, flags);
if (ret < budget) {
blk_iopoll_complete(iop);
ap_irq_enable(ap);
}
return ret;
}
static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
......@@ -2218,7 +2255,12 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
ap = host->ports[i];
if (ap) {
ahci_port_intr(ap);
if (!blk_iopoll_enabled)
ahci_port_intr(ap);
else if (!blk_iopoll_sched_prep(&ap->iopoll)) {
ap_irq_disable(ap);
blk_iopoll_sched(&ap->iopoll);
}
VPRINTK("port %u\n", i);
} else {
VPRINTK("port %u (no irq)\n", i);
......@@ -2352,6 +2394,8 @@ static void ahci_pmp_detach(struct ata_port *ap)
static int ahci_port_resume(struct ata_port *ap)
{
blk_iopoll_enable(&ap->iopoll);
ahci_power_up(ap);
ahci_start_port(ap);
......@@ -2482,6 +2526,8 @@ static int ahci_port_start(struct ata_port *ap)
ap->private_data = pp;
blk_iopoll_init(&ap->iopoll, 32, ahci_iopoll);
/* engage engines, captain */
return ahci_port_resume(ap);
}
......@@ -2495,6 +2541,8 @@ static void ahci_port_stop(struct ata_port *ap)
rc = ahci_deinit_port(ap, &emsg);
if (rc)
ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
blk_iopoll_disable(&ap->iopoll);
}
static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
......
......@@ -114,6 +114,9 @@ module_param_call(mpt_fwfault_debug, param_set_int, param_get_int,
MODULE_PARM_DESC(mpt_fwfault_debug, "Enable detection of Firmware fault"
" and halt Firmware on fault - (default=0)");
static int mpt_iopoll_w = 32;
module_param(mpt_iopoll_w, int, 0);
MODULE_PARM_DESC(mpt_iopoll_w, " blk iopoll budget (default=32");
#ifdef MFCNT
......@@ -515,6 +518,44 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa)
mb();
}
static void mpt_irq_disable(MPT_ADAPTER *ioc)
{
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_READ32(&ioc->chip->IntStatus);
}
static void mpt_irq_enable(MPT_ADAPTER *ioc)
{
CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
}
static inline void __mpt_handle_irq(MPT_ADAPTER *ioc, u32 pa)
{
if (pa & MPI_ADDRESS_REPLY_A_BIT)
mpt_reply(ioc, pa);
else
mpt_turbo_reply(ioc, pa);
}
static int mpt_handle_irq(MPT_ADAPTER *ioc, unsigned int budget)
{
int nr = 0;
u32 pa;
/*
* Drain the reply FIFO!
*/
while ((pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo)) != 0xffffffff) {
nr++;
__mpt_handle_irq(ioc, pa);
if (nr == budget)
break;
}
return nr;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
......@@ -536,23 +577,48 @@ static irqreturn_t
mpt_interrupt(int irq, void *bus_id)
{
MPT_ADAPTER *ioc = bus_id;
u32 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
int nr = 0;
if (!blk_iopoll_enabled)
nr = mpt_handle_irq(ioc, -1U);
else if (!blk_iopoll_sched_prep(&ioc->iopoll)) {
mpt_irq_disable(ioc);
ioc->iopoll.data =CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
blk_iopoll_sched(&ioc->iopoll);
nr = 1;
} else {
/*
* Not really handled, but it will be by iopoll.
*/
nr = 1;
}
if (pa == 0xFFFFFFFF)
return IRQ_NONE;
if (nr)
return IRQ_HANDLED;
/*
* Drain the reply FIFO!
*/
do {
if (pa & MPI_ADDRESS_REPLY_A_BIT)
mpt_reply(ioc, pa);
else
mpt_turbo_reply(ioc, pa);
pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
} while (pa != 0xFFFFFFFF);
return IRQ_NONE;
}
return IRQ_HANDLED;
static int mpt_iopoll(struct blk_iopoll *iop, int budget)
{
MPT_ADAPTER *ioc = container_of(iop, MPT_ADAPTER, iopoll);
int ret = 0;
u32 pa;
pa = iop->data;
iop->data = 0xffffffff;
if (pa != 0xffffffff) {
__mpt_handle_irq(ioc, pa);
ret = 1;
}
ret += mpt_handle_irq(ioc, budget - ret);
if (ret < budget) {
blk_iopoll_complete(iop);
mpt_irq_enable(ioc);
}
return ret;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
......@@ -2087,6 +2153,7 @@ mpt_suspend(struct pci_dev *pdev, pm_message_t state)
/* Clear any lingering interrupt */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
blk_iopoll_disable(&ioc->iopoll);
free_irq(ioc->pci_irq, ioc);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
......@@ -2352,6 +2419,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
ret = -EBUSY;
goto out;
}
blk_iopoll_init(&ioc->iopoll, mpt_iopoll_w, mpt_iopoll);
blk_iopoll_enable(&ioc->iopoll);
irq_allocated = 1;
ioc->pci_irq = ioc->pcidev->irq;
pci_set_master(ioc->pcidev); /* ?? */
......@@ -2544,6 +2613,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
out:
if ((ret != 0) && irq_allocated) {
blk_iopoll_disable(&ioc->iopoll);
free_irq(ioc->pci_irq, ioc);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
......@@ -2752,6 +2822,7 @@ mpt_adapter_dispose(MPT_ADAPTER *ioc)
mpt_adapter_disable(ioc);
if (ioc->pci_irq != -1) {
blk_iopoll_disable(&ioc->iopoll);
free_irq(ioc->pci_irq, ioc);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
......
......@@ -52,6 +52,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/blk-iopoll.h>
#include "lsi/mpi_type.h"
#include "lsi/mpi.h" /* Fusion MPI(nterface) basic defs */
......@@ -764,6 +765,8 @@ typedef struct _MPT_ADAPTER
struct workqueue_struct *reset_work_q;
struct delayed_work fault_reset_work;
struct blk_iopoll iopoll;
u8 sg_addr_size;
u8 in_rescan;
u8 SGE_size;
......
......@@ -366,6 +366,7 @@ config ISCSI_TCP
source "drivers/scsi/cxgb3i/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
source "drivers/scsi/be2iscsi/Kconfig"
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
......
......@@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_MVSAS) += mvsas/
obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
obj-$(CONFIG_ARM) += arm/
......
config BE2ISCSI
tristate "ServerEngines' 10Gbps iSCSI - BladeEngine 2"
depends on PCI && SCSI
select SCSI_ISCSI_ATTRS
help
This driver implements the iSCSI functionality for ServerEngines'
10Gbps Storage adapter - BladeEngine 2.
#
# Makefile to build the iSCSI driver for ServerEngine's BladeEngine.
#
#
obj-$(CONFIG_BE2ISCSI) += be2iscsi.o
be2iscsi-y := be_iscsi.o be_main.o be_mgmt.o be_cmds.o
/**
* Copyright (C) 2005 - 2009 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#ifndef BEISCSI_H
#define BEISCSI_H
#include <linux/pci.h>
#include <linux/if_vlan.h>
#define FW_VER_LEN 32
struct be_dma_mem {
void *va;
dma_addr_t dma;
u32 size;
};
struct be_queue_info {
struct be_dma_mem dma_mem;
u16 len;
u16 entry_size; /* Size of an element in the queue */
u16 id;
u16 tail, head;
bool created;
atomic_t used; /* Number of valid elements in the queue */
};
static inline u32 MODULO(u16 val, u16 limit)
{
WARN_ON(limit & (limit - 1));
return val & (limit - 1);
}
static inline void index_inc(u16 *index, u16 limit)
{
*index = MODULO((*index + 1), limit);
}
static inline void *queue_head_node(struct be_queue_info *q)
{
return q->dma_mem.va + q->head * q->entry_size;
}
static inline void *queue_tail_node(struct be_queue_info *q)
{
return q->dma_mem.va + q->tail * q->entry_size;
}
static inline void queue_head_inc(struct be_queue_info *q)
{
index_inc(&q->head, q->len);
}
static inline void queue_tail_inc(struct be_queue_info *q)
{
index_inc(&q->tail, q->len);
}
/*ISCSI */
struct be_eq_obj {
struct be_queue_info q;
char desc[32];
/* Adaptive interrupt coalescing (AIC) info */
bool enable_aic;
u16 min_eqd; /* in usecs */
u16 max_eqd; /* in usecs */
u16 cur_eqd; /* in usecs */
};
struct be_mcc_obj {
struct be_queue_info *q;
struct be_queue_info *cq;
};
struct be_ctrl_info {
u8 __iomem *csr;
u8 __iomem *db; /* Door Bell */
u8 __iomem *pcicfg; /* PCI config space */
struct pci_dev *pdev;
/* Mbox used for cmd request/response */
spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
* is stored for freeing purpose */
struct be_dma_mem mbox_mem_alloced;
/* MCC Rings */
struct be_mcc_obj mcc_obj;
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
/* MCC Async callback */
void (*async_cb) (void *adapter, bool link_up);
void *adapter_ctxt;
};
#include "be_cmds.h"
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) \
((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
/* Byte offset into the page corresponding to given address */
#define OFFSET_IN_PAGE(addr) \
((size_t)(addr) & (PAGE_SIZE_4K-1))
/* Returns bit offset within a DWORD of a bitfield */
#define AMAP_BIT_OFFSET(_struct, field) \
(((size_t)&(((_struct *)0)->field))%32)
/* Returns the bit mask of the field that is NOT shifted into location. */
static inline u32 amap_mask(u32 bitsize)
{
return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
}
static inline void amap_set(void *ptr, u32 dw_offset, u32 mask,
u32 offset, u32 value)
{
u32 *dw = (u32 *) ptr + dw_offset;
*dw &= ~(mask << offset);
*dw |= (mask & value) << offset;
}
#define AMAP_SET_BITS(_struct, field, ptr, val) \
amap_set(ptr, \
offsetof(_struct, field)/32, \
amap_mask(sizeof(((_struct *)0)->field)), \
AMAP_BIT_OFFSET(_struct, field), \
val)
static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
{
u32 *dw = ptr;
return mask & (*(dw + dw_offset) >> offset);
}
#define AMAP_GET_BITS(_struct, field, ptr) \
amap_get(ptr, \
offsetof(_struct, field)/32, \
amap_mask(sizeof(((_struct *)0)->field)), \
AMAP_BIT_OFFSET(_struct, field))
#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
static inline void swap_dws(void *wrb, int len)
{
#ifdef __BIG_ENDIAN
u32 *dw = wrb;
WARN_ON(len % 4);
do {
*dw = cpu_to_le32(*dw);
dw++;
len -= 4;
} while (len);
#endif /* __BIG_ENDIAN */
}
extern void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
u16 num_popped);
#endif /* BEISCSI_H */
/**
* Copyright (C) 2005 - 2009 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#include "be.h"
#include "be_mgmt.h"
#include "be_main.h"
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
{
if (compl->flags != 0) {
compl->flags = le32_to_cpu(compl->flags);
WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
return true;
} else
return false;
}
static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
{
compl->flags = 0;
}
static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
struct be_mcc_compl *compl)
{
u16 compl_status, extd_status;
be_dws_le_to_cpu(compl, 4);
compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
CQE_STATUS_COMPL_MASK;
if (compl_status != MCC_STATUS_SUCCESS) {
extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
CQE_STATUS_EXTD_MASK;
dev_err(&ctrl->pdev->dev,
"error in cmd completion: status(compl/extd)=%d/%d\n",
compl_status, extd_status);
return -1;
}
return 0;
}
static inline bool is_link_state_evt(u32 trailer)
{
return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
ASYNC_TRAILER_EVENT_CODE_MASK) == ASYNC_EVENT_CODE_LINK_STATE);
}
void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
u16 num_popped)
{
u32 val = 0;
val |= qid & DB_CQ_RING_ID_MASK;
if (arm)
val |= 1 << DB_CQ_REARM_SHIFT;
val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
iowrite32(val, ctrl->db + DB_CQ_OFFSET);
}
static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
{
#define long_delay 2000
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
int cnt = 0, wait = 5; /* in usecs */
u32 ready;
do {
ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
if (ready)
break;
if (cnt > 6000000) {
dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
return -1;
}
if (cnt > 50) {
wait = long_delay;
mdelay(long_delay / 1000);
} else
udelay(wait);
cnt += wait;
} while (true);
return 0;
}
int be_mbox_notify(struct be_ctrl_info *ctrl)
{
int status;
u32 val = 0;
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
struct be_mcc_mailbox *mbox = mbox_mem->va;
struct be_mcc_compl *compl = &mbox->compl;
val &= ~MPU_MAILBOX_DB_RDY_MASK;
val |= MPU_MAILBOX_DB_HI_MASK;
val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
iowrite32(val, db);
status = be_mbox_db_ready_wait(ctrl);
if (status != 0) {
SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 1\n");
return status;
}
val = 0;
val &= ~MPU_MAILBOX_DB_RDY_MASK;
val &= ~MPU_MAILBOX_DB_HI_MASK;
val |= (u32) (mbox_mem->dma >> 4) << 2;
iowrite32(val, db);
status = be_mbox_db_ready_wait(ctrl);
if (status != 0) {
SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 2\n");
return status;
}
if (be_mcc_compl_is_new(compl)) {
status = be_mcc_compl_process(ctrl, &mbox->compl);
be_mcc_compl_use(compl);
if (status) {
SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process \n");
return status;
}
} else {
dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
return -1;
}
return 0;
}
void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
bool embedded, u8 sge_cnt)
{
if (embedded)
wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
else
wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
MCC_WRB_SGE_CNT_SHIFT;
wrb->payload_length = payload_len;
be_dws_cpu_to_le(wrb, 8);
}
void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
u8 subsystem, u8 opcode, int cmd_len)
{
req_hdr->opcode = opcode;
req_hdr->subsystem = subsystem;
req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
}
static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
struct be_dma_mem *mem)
{
int i, buf_pages;
u64 dma = (u64) mem->dma;
buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
for (i = 0; i < buf_pages; i++) {
pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
pages[i].hi = cpu_to_le32(upper_32_bits(dma));
dma += PAGE_SIZE_4K;
}
}
static u32 eq_delay_to_mult(u32 usec_delay)
{
#define MAX_INTR_RATE 651042
const u32 round = 10;
u32 multiplier;
if (usec_delay == 0)
multiplier = 0;
else {
u32 interrupt_rate = 1000000 / usec_delay;
if (interrupt_rate == 0)
multiplier = 1023;
else {
multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
multiplier /= interrupt_rate;
multiplier = (multiplier + round / 2) / round;
multiplier = min(multiplier, (u32) 1023);
}
}
return multiplier;
}
struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
{
return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
}
int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *eq, int eq_delay)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_cmd_req_eq_create *req = embedded_payload(wrb);
struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
struct be_dma_mem *q_mem = &eq->dma_mem;
int status;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_EQ_CREATE, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
AMAP_SET_BITS(struct amap_eq_context, func, req->context,
PCI_FUNC(ctrl->pdev->devfn));
AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
AMAP_SET_BITS(struct amap_eq_context, count, req->context,
__ilog2_u32(eq->len / 256));
AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
eq_delay_to_mult(eq_delay));
be_dws_cpu_to_le(req->context, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
status = be_mbox_notify(ctrl);
if (!status) {
eq->id = le16_to_cpu(resp->eq_id);
eq->created = true;
}
spin_unlock(&ctrl->mbox_lock);
return status;
}
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
int status;
u8 *endian_check;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
endian_check = (u8 *) wrb;
*endian_check++ = 0xFF;
*endian_check++ = 0x12;
*endian_check++ = 0x34;
*endian_check++ = 0xFF;
*endian_check++ = 0xFF;
*endian_check++ = 0x56;
*endian_check++ = 0x78;
*endian_check++ = 0xFF;
be_dws_cpu_to_le(wrb, sizeof(*wrb));
status = be_mbox_notify(ctrl);
if (status)
SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed \n");
spin_unlock(&ctrl->mbox_lock);
return status;
}
int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *cq, struct be_queue_info *eq,
bool sol_evts, bool no_delay, int coalesce_wm)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_cmd_req_cq_create *req = embedded_payload(wrb);
struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
struct be_dma_mem *q_mem = &cq->dma_mem;
void *ctxt = &req->context;
int status;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_CQ_CREATE, sizeof(*req));
if (!q_mem->va)
SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
__ilog2_u32(cq->len / 256));
AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
PCI_FUNC(ctrl->pdev->devfn));
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
status = be_mbox_notify(ctrl);
if (!status) {
cq->id = le16_to_cpu(resp->cq_id);
cq->created = true;
} else
SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x \n",
status);
spin_unlock(&ctrl->mbox_lock);
return status;
}
static u32 be_encoded_q_len(int q_len)
{
u32 len_encoded = fls(q_len); /* log2(len) + 1 */
if (len_encoded == 16)
len_encoded = 0;
return len_encoded;
}
int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
int queue_type)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
u8 subsys = 0, opcode = 0;
int status;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
switch (queue_type) {
case QTYPE_EQ:
subsys = CMD_SUBSYSTEM_COMMON;
opcode = OPCODE_COMMON_EQ_DESTROY;
break;
case QTYPE_CQ:
subsys = CMD_SUBSYSTEM_COMMON;
opcode = OPCODE_COMMON_CQ_DESTROY;
break;
case QTYPE_WRBQ:
subsys = CMD_SUBSYSTEM_ISCSI;
opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
break;
case QTYPE_DPDUQ:
subsys = CMD_SUBSYSTEM_ISCSI;
opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
break;
case QTYPE_SGL:
subsys = CMD_SUBSYSTEM_ISCSI;
opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
break;
default:
spin_unlock(&ctrl->mbox_lock);
BUG();
return -1;
}
be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
if (queue_type != QTYPE_SGL)
req->id = cpu_to_le16(q->id);
status = be_mbox_notify(ctrl);
spin_unlock(&ctrl->mbox_lock);
return status;
}
int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb);
int status;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
sizeof(*req));
status = be_mbox_notify(ctrl);
if (!status) {
struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb);
memcpy(mac_addr, resp->mac_address, ETH_ALEN);
}
spin_unlock(&ctrl->mbox_lock);
return status;
}
int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
struct be_queue_info *cq,
struct be_queue_info *dq, int length,
int entry_size)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_defq_create_req *req = embedded_payload(wrb);
struct be_dma_mem *q_mem = &dq->dma_mem;
void *ctxt = &req->context;
int status;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
1);
AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
PCI_FUNC(ctrl->pdev->devfn));
AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
be_encoded_q_len(length / sizeof(struct phys_addr)));
AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
ctxt, entry_size);
AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
cq->id);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
status = be_mbox_notify(ctrl);
if (!status) {
struct be_defq_create_resp *resp = embedded_payload(wrb);
dq->id = le16_to_cpu(resp->id);
dq->created = true;
}
spin_unlock(&ctrl->mbox_lock);
return status;
}
int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
struct be_queue_info *wrbq)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_wrbq_create_req *req = embedded_payload(wrb);
struct be_wrbq_create_resp *resp = embedded_payload(wrb);
int status;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
status = be_mbox_notify(ctrl);
if (!status)
wrbq->id = le16_to_cpu(resp->cid);
spin_unlock(&ctrl->mbox_lock);
return status;
}
int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem,
u32 page_offset, u32 num_pages)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_post_sgl_pages_req *req = embedded_payload(wrb);
int status;
unsigned int curr_pages;
u32 internal_page_offset = 0;
u32 temp_num_pages = num_pages;
if (num_pages == 0xff)
num_pages = 1;
spin_lock(&ctrl->mbox_lock);
do {
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
sizeof(*req));
curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
pages);
req->num_pages = min(num_pages, curr_pages);
req->page_offset = page_offset;
be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
internal_page_offset += req->num_pages;
page_offset += req->num_pages;
num_pages -= req->num_pages;
if (temp_num_pages == 0xff)
req->num_pages = temp_num_pages;
status = be_mbox_notify(ctrl);
if (status) {
SE_DEBUG(DBG_LVL_1,
"FW CMD to map iscsi frags failed.\n");
goto error;
}
} while (num_pages > 0);
error:
spin_unlock(&ctrl->mbox_lock);
if (status != 0)
beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
return status;
}
/**
* Copyright (C) 2005 - 2009 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#ifndef BEISCSI_CMDS_H
#define BEISCSI_CMDS_H
/**
* The driver sends configuration and managements command requests to the
* firmware in the BE. These requests are communicated to the processor
* using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
* WRB inside a MAILBOX.
* The commands are serviced by the ARM processor in the BladeEngine's MPU.
*/
struct be_sge {
u32 pa_lo;
u32 pa_hi;
u32 len;
};
#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
struct be_mcc_wrb {
u32 embedded; /* dword 0 */
u32 payload_length; /* dword 1 */
u32 tag0; /* dword 2 */
u32 tag1; /* dword 3 */
u32 rsvd; /* dword 4 */
union {
u8 embedded_payload[236]; /* used by embedded cmds */
struct be_sge sgl[19]; /* used by non-embedded cmds */
} payload;
};
#define CQE_FLAGS_VALID_MASK (1 << 31)
#define CQE_FLAGS_ASYNC_MASK (1 << 30)
/* Completion Status */
#define MCC_STATUS_SUCCESS 0x0
#define CQE_STATUS_COMPL_MASK 0xFFFF
#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
#define CQE_STATUS_EXTD_MASK 0xFFFF
#define CQE_STATUS_EXTD_SHIFT 0 /* bits 0 - 15 */
struct be_mcc_compl {
u32 status; /* dword 0 */
u32 tag0; /* dword 1 */
u32 tag1; /* dword 2 */
u32 flags; /* dword 3 */
};
/********* Mailbox door bell *************/
/**
* Used for driver communication with the FW.
* The software must write this register twice to post any command. First,
* it writes the register with hi=1 and the upper bits of the physical address
* for the MAILBOX structure. Software must poll the ready bit until this
* is acknowledged. Then, sotware writes the register with hi=0 with the lower
* bits in the address. It must poll the ready bit until the command is
* complete. Upon completion, the MAILBOX will contain a valid completion
* queue entry.
*/
#define MPU_MAILBOX_DB_OFFSET 0x160
#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
/********** MPU semphore ******************/
#define MPU_EP_SEMAPHORE_OFFSET 0xac
#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
#define EP_SEMAPHORE_POST_ERR_MASK 0x1
#define EP_SEMAPHORE_POST_ERR_SHIFT 31
/********** MCC door bell ************/
#define DB_MCCQ_OFFSET 0x140
#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
/* Number of entries posted */
#define DB_MCCQ_NUM_POSTED_SHIFT 16 /* bits 16 - 29 */
/* MPU semphore POST stage values */
#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
/**
* When the async bit of mcc_compl is set, the last 4 bytes of
* mcc_compl is interpreted as follows:
*/
#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
#define ASYNC_EVENT_CODE_LINK_STATE 0x1
struct be_async_event_trailer {
u32 code;
};
enum {
ASYNC_EVENT_LINK_DOWN = 0x0,
ASYNC_EVENT_LINK_UP = 0x1
};
/**
* When the event code of an async trailer is link-state, the mcc_compl
* must be interpreted as follows
*/
struct be_async_event_link_state {
u8 physical_port;
u8 port_link_status;
u8 port_duplex;
u8 port_speed;
u8 port_fault;
u8 rsvd0[7];
struct be_async_event_trailer trailer;
} __packed;
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_compl compl;
};
/* Type of subsystems supported by FW */
#define CMD_SUBSYSTEM_COMMON 0x1
#define CMD_SUBSYSTEM_ISCSI 0x2
#define CMD_SUBSYSTEM_ETH 0x3
#define CMD_SUBSYSTEM_ISCSI_INI 0x6
#define CMD_COMMON_TCP_UPLOAD 0x1
/**
* List of common opcodes subsystem CMD_SUBSYSTEM_COMMON
* These opcodes are unique for each subsystem defined above
*/
#define OPCODE_COMMON_CQ_CREATE 12
#define OPCODE_COMMON_EQ_CREATE 13
#define OPCODE_COMMON_MCC_CREATE 21
#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
#define OPCODE_COMMON_GET_FW_VERSION 35
#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
#define OPCODE_COMMON_FIRMWARE_CONFIG 42
#define OPCODE_COMMON_MCC_DESTROY 53
#define OPCODE_COMMON_CQ_DESTROY 54
#define OPCODE_COMMON_EQ_DESTROY 55
#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
#define OPCODE_COMMON_FUNCTION_RESET 61
/**
* LIST of opcodes that are common between Initiator and Target
* used by CMD_SUBSYSTEM_ISCSI
* These opcodes are unique for each subsystem defined above
*/
#define OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES 2
#define OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES 3
#define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7
#define OPCODE_COMMON_ISCSI_SET_FRAGNUM_BITS_FOR_SGL_CRA 61
#define OPCODE_COMMON_ISCSI_DEFQ_CREATE 64
#define OPCODE_COMMON_ISCSI_DEFQ_DESTROY 65
#define OPCODE_COMMON_ISCSI_WRBQ_CREATE 66
#define OPCODE_COMMON_ISCSI_WRBQ_DESTROY 67
struct be_cmd_req_hdr {
u8 opcode; /* dword 0 */
u8 subsystem; /* dword 0 */
u8 port_number; /* dword 0 */
u8 domain; /* dword 0 */
u32 timeout; /* dword 1 */
u32 request_length; /* dword 2 */
u32 rsvd; /* dword 3 */
};
struct be_cmd_resp_hdr {
u32 info; /* dword 0 */
u32 status; /* dword 1 */
u32 response_length; /* dword 2 */
u32 actual_resp_len; /* dword 3 */
};
struct phys_addr {
u32 lo;
u32 hi;
};
/**************************
* BE Command definitions *
**************************/
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte - used to calculate offset/shift/mask of each field
*/
struct amap_eq_context {
u8 cidx[13]; /* dword 0 */
u8 rsvd0[3]; /* dword 0 */
u8 epidx[13]; /* dword 0 */
u8 valid; /* dword 0 */
u8 rsvd1; /* dword 0 */
u8 size; /* dword 0 */
u8 pidx[13]; /* dword 1 */
u8 rsvd2[3]; /* dword 1 */
u8 pd[10]; /* dword 1 */
u8 count[3]; /* dword 1 */
u8 solevent; /* dword 1 */
u8 stalled; /* dword 1 */
u8 armed; /* dword 1 */
u8 rsvd3[4]; /* dword 2 */
u8 func[8]; /* dword 2 */
u8 rsvd4; /* dword 2 */
u8 delaymult[10]; /* dword 2 */
u8 rsvd5[2]; /* dword 2 */
u8 phase[2]; /* dword 2 */
u8 nodelay; /* dword 2 */
u8 rsvd6[4]; /* dword 2 */
u8 rsvd7[32]; /* dword 3 */
} __packed;
struct be_cmd_req_eq_create {
struct be_cmd_req_hdr hdr; /* dw[4] */
u16 num_pages; /* sword */
u16 rsvd0; /* sword */
u8 context[sizeof(struct amap_eq_context) / 8]; /* dw[4] */
struct phys_addr pages[8];
} __packed;
struct be_cmd_resp_eq_create {
struct be_cmd_resp_hdr resp_hdr;
u16 eq_id; /* sword */
u16 rsvd0; /* sword */
} __packed;
struct mac_addr {
u16 size_of_struct;
u8 addr[ETH_ALEN];
} __packed;
struct be_cmd_req_mac_query {
struct be_cmd_req_hdr hdr;
u8 type;
u8 permanent;
u16 if_id;
} __packed;
struct be_cmd_resp_mac_query {
struct be_cmd_resp_hdr hdr;
struct mac_addr mac;
};
/******************** Create CQ ***************************/
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte - used to calculate offset/shift/mask of each field
*/
struct amap_cq_context {
u8 cidx[11]; /* dword 0 */
u8 rsvd0; /* dword 0 */
u8 coalescwm[2]; /* dword 0 */
u8 nodelay; /* dword 0 */
u8 epidx[11]; /* dword 0 */
u8 rsvd1; /* dword 0 */
u8 count[2]; /* dword 0 */
u8 valid; /* dword 0 */
u8 solevent; /* dword 0 */
u8 eventable; /* dword 0 */
u8 pidx[11]; /* dword 1 */
u8 rsvd2; /* dword 1 */
u8 pd[10]; /* dword 1 */
u8 eqid[8]; /* dword 1 */
u8 stalled; /* dword 1 */
u8 armed; /* dword 1 */
u8 rsvd3[4]; /* dword 2 */
u8 func[8]; /* dword 2 */
u8 rsvd4[20]; /* dword 2 */
u8 rsvd5[32]; /* dword 3 */
} __packed;
struct be_cmd_req_cq_create {
struct be_cmd_req_hdr hdr;
u16 num_pages;
u16 rsvd0;
u8 context[sizeof(struct amap_cq_context) / 8];
struct phys_addr pages[4];
} __packed;
struct be_cmd_resp_cq_create {
struct be_cmd_resp_hdr hdr;
u16 cq_id;
u16 rsvd0;
} __packed;
/******************** Create MCCQ ***************************/
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte - used to calculate offset/shift/mask of each field
*/
struct amap_mcc_context {
u8 con_index[14];
u8 rsvd0[2];
u8 ring_size[4];
u8 fetch_wrb;
u8 fetch_r2t;
u8 cq_id[10];
u8 prod_index[14];
u8 fid[8];
u8 pdid[9];
u8 valid;
u8 rsvd1[32];
u8 rsvd2[32];
} __packed;
struct be_cmd_req_mcc_create {
struct be_cmd_req_hdr hdr;
u16 num_pages;
u16 rsvd0;
u8 context[sizeof(struct amap_mcc_context) / 8];
struct phys_addr pages[8];
} __packed;
struct be_cmd_resp_mcc_create {
struct be_cmd_resp_hdr hdr;
u16 id;
u16 rsvd0;
} __packed;
/******************** Q Destroy ***************************/
/* Type of Queue to be destroyed */
enum {
QTYPE_EQ = 1,
QTYPE_CQ,
QTYPE_MCCQ,
QTYPE_WRBQ,
QTYPE_DPDUQ,
QTYPE_SGL
};
struct be_cmd_req_q_destroy {
struct be_cmd_req_hdr hdr;
u16 id;
u16 bypass_flush; /* valid only for rx q destroy */
} __packed;
struct macaddr {
u8 byte[ETH_ALEN];
};
struct be_cmd_req_mcast_mac_config {
struct be_cmd_req_hdr hdr;
u16 num_mac;
u8 promiscuous;
u8 interface_id;
struct macaddr mac[32];
} __packed;
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
return wrb->payload.embedded_payload;
}
static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
{
return &wrb->payload.sgl[0];
}
/******************** Modify EQ Delay *******************/
struct be_cmd_req_modify_eq_delay {
struct be_cmd_req_hdr hdr;
u32 num_eq;
struct {
u32 eq_id;
u32 phase;
u32 delay_multiplier;
} delay[8];
} __packed;
/******************** Get MAC ADDR *******************/
#define ETH_ALEN 6
struct be_cmd_req_get_mac_addr {
struct be_cmd_req_hdr hdr;
u32 nic_port_count;
u32 speed;
u32 max_speed;
u32 link_state;
u32 max_frame_size;
u16 size_of_structure;
u8 mac_address[ETH_ALEN];
u32 rsvd[23];
};
struct be_cmd_resp_get_mac_addr {
struct be_cmd_resp_hdr hdr;
u32 nic_port_count;
u32 speed;
u32 max_speed;
u32 link_state;
u32 max_frame_size;
u16 size_of_structure;
u8 mac_address[6];
u32 rsvd[23];
};
int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *eq, int eq_delay);
int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *cq, struct be_queue_info *eq,
bool sol_evts, bool no_delay,
int num_cqe_dma_coalesce);
int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
int type);
int be_poll_mcc(struct be_ctrl_info *ctrl);
unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl);
int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr);
/*ISCSI Functuions */
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
int be_mbox_notify(struct be_ctrl_info *ctrl);
int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
struct be_queue_info *cq,
struct be_queue_info *dq, int length,
int entry_size);
int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem, u32 page_offset,
u32 num_pages);
int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
struct be_queue_info *wrbq);
struct be_default_pdu_context {
u32 dw[4];
} __packed;
struct amap_be_default_pdu_context {
u8 dbuf_cindex[13]; /* dword 0 */
u8 rsvd0[3]; /* dword 0 */
u8 ring_size[4]; /* dword 0 */
u8 ring_state[4]; /* dword 0 */
u8 rsvd1[8]; /* dword 0 */
u8 dbuf_pindex[13]; /* dword 1 */
u8 rsvd2; /* dword 1 */
u8 pci_func_id[8]; /* dword 1 */
u8 rx_pdid[9]; /* dword 1 */
u8 rx_pdid_valid; /* dword 1 */
u8 default_buffer_size[16]; /* dword 2 */
u8 cq_id_recv[10]; /* dword 2 */
u8 rx_pdid_not_valid; /* dword 2 */
u8 rsvd3[5]; /* dword 2 */
u8 rsvd4[32]; /* dword 3 */
} __packed;
struct be_defq_create_req {
struct be_cmd_req_hdr hdr;
u16 num_pages;
u8 ulp_num;
u8 rsvd0;
struct be_default_pdu_context context;
struct phys_addr pages[8];
} __packed;
struct be_defq_create_resp {
struct be_cmd_req_hdr hdr;
u16 id;
u16 rsvd0;
} __packed;
struct be_post_sgl_pages_req {
struct be_cmd_req_hdr hdr;
u16 num_pages;
u16 page_offset;
u32 rsvd0;
struct phys_addr pages[26];
u32 rsvd1;
} __packed;
struct be_wrbq_create_req {
struct be_cmd_req_hdr hdr;
u16 num_pages;
u8 ulp_num;
u8 rsvd0;
struct phys_addr pages[8];
} __packed;
struct be_wrbq_create_resp {
struct be_cmd_resp_hdr resp_hdr;
u16 cid;
u16 rsvd0;
} __packed;
#define SOL_CID_MASK 0x0000FFC0
#define SOL_CODE_MASK 0x0000003F
#define SOL_WRB_INDEX_MASK 0x00FF0000
#define SOL_CMD_WND_MASK 0xFF000000
#define SOL_RES_CNT_MASK 0x7FFFFFFF
#define SOL_EXP_CMD_SN_MASK 0xFFFFFFFF
#define SOL_HW_STS_MASK 0x000000FF
#define SOL_STS_MASK 0x0000FF00
#define SOL_RESP_MASK 0x00FF0000
#define SOL_FLAGS_MASK 0x7F000000
#define SOL_S_MASK 0x80000000
struct sol_cqe {
u32 dw[4];
};
struct amap_sol_cqe {
u8 hw_sts[8]; /* dword 0 */
u8 i_sts[8]; /* dword 0 */
u8 i_resp[8]; /* dword 0 */
u8 i_flags[7]; /* dword 0 */
u8 s; /* dword 0 */
u8 i_exp_cmd_sn[32]; /* dword 1 */
u8 code[6]; /* dword 2 */
u8 cid[10]; /* dword 2 */
u8 wrb_index[8]; /* dword 2 */
u8 i_cmd_wnd[8]; /* dword 2 */
u8 i_res_cnt[31]; /* dword 3 */
u8 valid; /* dword 3 */
} __packed;
/**
* Post WRB Queue Doorbell Register used by the host Storage
* stack to notify the
* controller of a posted Work Request Block
*/
#define DB_WRB_POST_CID_MASK 0x3FF /* bits 0 - 9 */
#define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */
#define DB_DEF_PDU_WRB_INDEX_SHIFT 16
#define DB_DEF_PDU_NUM_POSTED_SHIFT 24
struct fragnum_bits_for_sgl_cra_in {
struct be_cmd_req_hdr hdr;
u32 num_bits;
} __packed;
struct iscsi_cleanup_req {
struct be_cmd_req_hdr hdr;
u16 chute;
u8 hdr_ring_id;
u8 data_ring_id;
} __packed;
struct eq_delay {
u32 eq_id;
u32 phase;
u32 delay_multiplier;
} __packed;
struct be_eq_delay_params_in {
struct be_cmd_req_hdr hdr;
u32 num_eq;
struct eq_delay delay[8];
} __packed;
struct ip_address_format {
u16 size_of_structure;
u8 reserved;
u8 ip_type;
u8 ip_address[16];
u32 rsvd0;
} __packed;
struct tcp_connect_and_offload_in {
struct be_cmd_req_hdr hdr;
struct ip_address_format ip_address;
u16 tcp_port;
u16 cid;
u16 cq_id;
u16 defq_id;
struct phys_addr dataout_template_pa;
u16 hdr_ring_id;
u16 data_ring_id;
u8 do_offload;
u8 rsvd0[3];
} __packed;
struct tcp_connect_and_offload_out {
struct be_cmd_resp_hdr hdr;
u32 connection_handle;
u16 cid;
u16 rsvd0;
} __packed;
struct be_mcc_wrb_context {
struct MCC_WRB *wrb;
int *users_final_status;
} __packed;
#define DB_DEF_PDU_RING_ID_MASK 0x3FF /* bits 0 - 9 */
#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 0 - 9 */
#define DB_DEF_PDU_REARM_SHIFT 14
#define DB_DEF_PDU_EVENT_SHIFT 15
#define DB_DEF_PDU_CQPROC_SHIFT 16
struct dmsg_cqe {
u32 dw[4];
} __packed;
struct tcp_upload_params_in {
struct be_cmd_req_hdr hdr;
u16 id;
u16 upload_type;
u32 reset_seq;
} __packed;
struct tcp_upload_params_out {
u32 dw[32];
} __packed;
union tcp_upload_params {
struct tcp_upload_params_in request;
struct tcp_upload_params_out response;
} __packed;
struct be_ulp_fw_cfg {
u32 ulp_mode;
u32 etx_base;
u32 etx_count;
u32 sq_base;
u32 sq_count;
u32 rq_base;
u32 rq_count;
u32 dq_base;
u32 dq_count;
u32 lro_base;
u32 lro_count;
u32 icd_base;
u32 icd_count;
};
struct be_fw_cfg {
struct be_cmd_req_hdr hdr;
u32 be_config_number;
u32 asic_revision;
u32 phys_port;
u32 function_mode;
struct be_ulp_fw_cfg ulp[2];
u32 function_caps;
} __packed;
#define CMD_ISCSI_COMMAND_INVALIDATE 1
#define ISCSI_OPCODE_SCSI_DATA_OUT 5
#define OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD 70
#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
#define OPCODE_COMMON_ISCSI_CLEANUP 59
#define OPCODE_COMMON_TCP_UPLOAD 56
#define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1
/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */
#define CMD_ISCSI_CONNECTION_INVALIDATE 1
#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 2
#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
#define INI_WR_CMD 1 /* Initiator write command */
#define INI_TMF_CMD 2 /* Initiator TMF command */
#define INI_NOPOUT_CMD 3 /* Initiator; Send a NOP-OUT */
#define INI_RD_CMD 5 /* Initiator requesting to send
* a read command
*/
#define TGT_CTX_UPDT_CMD 7 /* Target context update */
#define TGT_STS_CMD 8 /* Target R2T and other BHS
* where only the status number
* need to be updated
*/
#define TGT_DATAIN_CMD 9 /* Target Data-Ins in response
* to read command
*/
#define TGT_SOS_PDU 10 /* Target:standalone status
* response
*/
#define TGT_DM_CMD 11 /* Indicates that the bhs
* preparedby
* driver should not be touched
*/
/* --- CMD_CHUTE_TYPE --- */
#define CMD_CONNECTION_CHUTE_0 1
#define CMD_CONNECTION_CHUTE_1 2
#define CMD_CONNECTION_CHUTE_2 3
#define EQ_MAJOR_CODE_COMPLETION 0
#define CMD_ISCSI_SESSION_DEL_CFG_FROM_FLASH 0
#define CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH 1
/* --- CONNECTION_UPLOAD_PARAMS --- */
/* These parameters are used to define the type of upload desired. */
#define CONNECTION_UPLOAD_GRACEFUL 1 /* Graceful upload */
#define CONNECTION_UPLOAD_ABORT_RESET 2 /* Abortive upload with
* reset
*/
#define CONNECTION_UPLOAD_ABORT 3 /* Abortive upload without
* reset
*/
#define CONNECTION_UPLOAD_ABORT_WITH_SEQ 4 /* Abortive upload with reset,
* sequence number by driver */
/* Returns byte size of given field with a structure. */
/* Returns the number of items in the field array. */
#define BE_NUMBER_OF_FIELD(_type_, _field_) \
(FIELD_SIZEOF(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\
/**
* Different types of iSCSI completions to host driver for both initiator
* and taget mode
* of operation.
*/
#define SOL_CMD_COMPLETE 1 /* Solicited command completed
* normally
*/
#define SOL_CMD_KILLED_DATA_DIGEST_ERR 2 /* Solicited command got
* invalidated internally due
* to Data Digest error
*/
#define CXN_KILLED_PDU_SIZE_EXCEEDS_DSL 3 /* Connection got invalidated
* internally
* due to a recieved PDU
* size > DSL
*/
#define CXN_KILLED_BURST_LEN_MISMATCH 4 /* Connection got invalidated
* internally due ti received
* PDU sequence size >
* FBL/MBL.
*/
#define CXN_KILLED_AHS_RCVD 5 /* Connection got invalidated
* internally due to a recieved
* PDU Hdr that has
* AHS */
#define CXN_KILLED_HDR_DIGEST_ERR 6 /* Connection got invalidated
* internally due to Hdr Digest
* error
*/
#define CXN_KILLED_UNKNOWN_HDR 7 /* Connection got invalidated
* internally
* due to a bad opcode in the
* pdu hdr
*/
#define CXN_KILLED_STALE_ITT_TTT_RCVD 8 /* Connection got invalidated
* internally due to a recieved
* ITT/TTT that does not belong
* to this Connection
*/
#define CXN_KILLED_INVALID_ITT_TTT_RCVD 9 /* Connection got invalidated
* internally due to recieved
* ITT/TTT value > Max
* Supported ITTs/TTTs
*/
#define CXN_KILLED_RST_RCVD 10 /* Connection got invalidated
* internally due to an
* incoming TCP RST
*/
#define CXN_KILLED_TIMED_OUT 11 /* Connection got invalidated
* internally due to timeout on
* tcp segment 12 retransmit
* attempts failed
*/
#define CXN_KILLED_RST_SENT 12 /* Connection got invalidated
* internally due to TCP RST
* sent by the Tx side
*/
#define CXN_KILLED_FIN_RCVD 13 /* Connection got invalidated
* internally due to an
* incoming TCP FIN.
*/
#define CXN_KILLED_BAD_UNSOL_PDU_RCVD 14 /* Connection got invalidated
* internally due to bad
* unsolicited PDU Unsolicited
* PDUs are PDUs with
* ITT=0xffffffff
*/
#define CXN_KILLED_BAD_WRB_INDEX_ERROR 15 /* Connection got invalidated
* internally due to bad WRB
* index.
*/
#define CXN_KILLED_OVER_RUN_RESIDUAL 16 /* Command got invalidated
* internally due to recived
* command has residual
* over run bytes.
*/
#define CXN_KILLED_UNDER_RUN_RESIDUAL 17 /* Command got invalidated
* internally due to recived
* command has residual under
* run bytes.
*/
#define CMD_KILLED_INVALID_STATSN_RCVD 18 /* Command got invalidated
* internally due to a recieved
* PDU has an invalid StatusSN
*/
#define CMD_KILLED_INVALID_R2T_RCVD 19 /* Command got invalidated
* internally due to a recieved
* an R2T with some invalid
* fields in it
*/
#define CMD_CXN_KILLED_LUN_INVALID 20 /* Command got invalidated
* internally due to received
* PDU has an invalid LUN.
*/
#define CMD_CXN_KILLED_ICD_INVALID 21 /* Command got invalidated
* internally due to the
* corresponding ICD not in a
* valid state
*/
#define CMD_CXN_KILLED_ITT_INVALID 22 /* Command got invalidated due
* to received PDU has an
* invalid ITT.
*/
#define CMD_CXN_KILLED_SEQ_OUTOFORDER 23 /* Command got invalidated due
* to received sequence buffer
* offset is out of order.
*/
#define CMD_CXN_KILLED_INVALID_DATASN_RCVD 24 /* Command got invalidated
* internally due to a
* recieved PDU has an invalid
* DataSN
*/
#define CXN_INVALIDATE_NOTIFY 25 /* Connection invalidation
* completion notify.
*/
#define CXN_INVALIDATE_INDEX_NOTIFY 26 /* Connection invalidation
* completion
* with data PDU index.
*/
#define CMD_INVALIDATED_NOTIFY 27 /* Command invalidation
* completionnotifify.
*/
#define UNSOL_HDR_NOTIFY 28 /* Unsolicited header notify.*/
#define UNSOL_DATA_NOTIFY 29 /* Unsolicited data notify.*/
#define UNSOL_DATA_DIGEST_ERROR_NOTIFY 30 /* Unsolicited data digest
* error notify.
*/
#define DRIVERMSG_NOTIFY 31 /* TCP acknowledge based
* notification.
*/
#define CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN 32 /* Connection got invalidated
* internally due to command
* and data are not on same
* connection.
*/
#define SOL_CMD_KILLED_DIF_ERR 33 /* Solicited command got
* invalidated internally due
* to DIF error
*/
#define CXN_KILLED_SYN_RCVD 34 /* Connection got invalidated
* internally due to incoming
* TCP SYN
*/
#define CXN_KILLED_IMM_DATA_RCVD 35 /* Connection got invalidated
* internally due to an
* incoming Unsolicited PDU
* that has immediate data on
* the cxn
*/
void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
bool embedded, u8 sge_cnt);
void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
u8 subsystem, u8 opcode, int cmd_len);
#endif /* !BEISCSI_CMDS_H */
/**
* Copyright (C) 2005 - 2009 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*
*/
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include "be_iscsi.h"
extern struct iscsi_transport beiscsi_iscsi_transport;
/**
* beiscsi_session_create - creates a new iscsi session
* @cmds_max: max commands supported
* @qdepth: max queue depth supported
* @initial_cmdsn: initial iscsi CMDSN
*/
struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
u16 cmds_max,
u16 qdepth,
u32 initial_cmdsn)
{
struct Scsi_Host *shost;
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_cls_session *cls_session;
struct iscsi_session *sess;
struct beiscsi_hba *phba;
struct iscsi_task *task;
struct beiscsi_io_task *io_task;
unsigned int max_size, num_cmd;
dma_addr_t bus_add;
u64 pa_addr;
void *vaddr;
SE_DEBUG(DBG_LVL_8, "In beiscsi_session_create\n");
if (!ep) {
SE_DEBUG(DBG_LVL_1, "beiscsi_session_create: invalid ep \n");
return NULL;
}
beiscsi_ep = ep->dd_data;
phba = beiscsi_ep->phba;
shost = phba->shost;
if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
shost_printk(KERN_ERR, shost, "Cannot handle %d cmds."
"Max cmds per session supported is %d. Using %d. "
"\n", cmds_max,
beiscsi_ep->phba->params.wrbs_per_cxn,
beiscsi_ep->phba->params.wrbs_per_cxn);
cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
}
cls_session = iscsi_session_setup(&beiscsi_iscsi_transport,
shost, cmds_max,
sizeof(struct beiscsi_io_task),
initial_cmdsn, ISCSI_MAX_TARGET);
if (!cls_session)
return NULL;
sess = cls_session->dd_data;
max_size = ALIGN(sizeof(struct be_cmd_bhs), 64) * sess->cmds_max;
vaddr = pci_alloc_consistent(phba->pcidev, max_size, &bus_add);
pa_addr = (__u64) bus_add;
for (num_cmd = 0; num_cmd < sess->cmds_max; num_cmd++) {
task = sess->cmds[num_cmd];
io_task = task->dd_data;
io_task->cmd_bhs = vaddr;
io_task->bhs_pa.u.a64.address = pa_addr;
io_task->alloc_size = max_size;
vaddr += ALIGN(sizeof(struct be_cmd_bhs), 64);
pa_addr += ALIGN(sizeof(struct be_cmd_bhs), 64);
}
return cls_session;
}
/**
* beiscsi_session_destroy - destroys iscsi session
* @cls_session: pointer to iscsi cls session
*
* Destroys iSCSI session instance and releases
* resources allocated for it.
*/
void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
{
struct iscsi_task *task;
struct beiscsi_io_task *io_task;
struct iscsi_session *sess = cls_session->dd_data;
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
task = sess->cmds[0];
io_task = task->dd_data;
pci_free_consistent(phba->pcidev,
io_task->alloc_size,
io_task->cmd_bhs,
io_task->bhs_pa.u.a64.address);
iscsi_session_teardown(cls_session);
}
/**
* beiscsi_conn_create - create an instance of iscsi connection
* @cls_session: ptr to iscsi_cls_session
* @cid: iscsi cid
*/
struct iscsi_cls_conn *
beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid)
{
struct beiscsi_hba *phba;
struct Scsi_Host *shost;
struct iscsi_cls_conn *cls_conn;
struct beiscsi_conn *beiscsi_conn;
struct iscsi_conn *conn;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_create ,cid"
"from iscsi layer=%d\n", cid);
shost = iscsi_session_to_shost(cls_session);
phba = iscsi_host_priv(shost);
cls_conn = iscsi_conn_setup(cls_session, sizeof(*beiscsi_conn), cid);
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
beiscsi_conn = conn->dd_data;
beiscsi_conn->ep = NULL;
beiscsi_conn->phba = phba;
beiscsi_conn->conn = conn;
return cls_conn;
}
/**
* beiscsi_bindconn_cid - Bind the beiscsi_conn with phba connection table
* @beiscsi_conn: The pointer to beiscsi_conn structure
* @phba: The phba instance
* @cid: The cid to free
*/
static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
struct beiscsi_conn *beiscsi_conn,
unsigned int cid)
{
if (phba->conn_table[cid]) {
SE_DEBUG(DBG_LVL_1,
"Connection table already occupied. Detected clash\n");
return -EINVAL;
} else {
SE_DEBUG(DBG_LVL_8, "phba->conn_table[%d]=%p(beiscsi_conn) \n",
cid, beiscsi_conn);
phba->conn_table[cid] = beiscsi_conn;
}
return 0;
}
/**
* beiscsi_conn_bind - Binds iscsi session/connection with TCP connection
* @cls_session: pointer to iscsi cls session
* @cls_conn: pointer to iscsi cls conn
* @transport_fd: EP handle(64 bit)
*
* This function binds the TCP Conn with iSCSI Connection and Session.
*/
int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn,
u64 transport_fd, int is_leading)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct beiscsi_conn *beiscsi_conn = conn->dd_data;
struct Scsi_Host *shost =
(struct Scsi_Host *)iscsi_session_to_shost(cls_session);
struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_bind\n");
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
return -EINVAL;
beiscsi_ep = ep->dd_data;
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
return -EINVAL;
if (beiscsi_ep->phba != phba) {
SE_DEBUG(DBG_LVL_8,
"beiscsi_ep->hba=%p not equal to phba=%p \n",
beiscsi_ep->phba, phba);
return -EEXIST;
}
beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
beiscsi_conn->ep = beiscsi_ep;
beiscsi_ep->conn = beiscsi_conn;
SE_DEBUG(DBG_LVL_8, "beiscsi_conn=%p conn=%p ep_cid=%d \n",
beiscsi_conn, conn, beiscsi_ep->ep_cid);
return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
}
/**
* beiscsi_conn_get_param - get the iscsi parameter
* @cls_conn: pointer to iscsi cls conn
* @param: parameter type identifier
* @buf: buffer pointer
*
* returns iscsi parameter
*/
int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_conn *conn = cls_conn->dd_data;
struct beiscsi_conn *beiscsi_conn = conn->dd_data;
int len = 0;
beiscsi_ep = beiscsi_conn->ep;
if (!beiscsi_ep) {
SE_DEBUG(DBG_LVL_1,
"In beiscsi_conn_get_param , no beiscsi_ep\n");
return -1;
}
switch (param) {
case ISCSI_PARAM_CONN_PORT:
len = sprintf(buf, "%hu\n", beiscsi_ep->dst_tcpport);
break;
case ISCSI_PARAM_CONN_ADDRESS:
if (beiscsi_ep->ip_type == BE2_IPV4)
len = sprintf(buf, "%pI4\n", &beiscsi_ep->dst_addr);
else
len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
break;
default:
return iscsi_conn_get_param(cls_conn, param, buf);
}
return len;
}
int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf, int buflen)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
int ret;
ret = iscsi_set_param(cls_conn, param, buf, buflen);
if (ret)
return ret;
/*
* If userspace tried to set the value to higher than we can
* support override here.
*/
switch (param) {
case ISCSI_PARAM_FIRST_BURST:
if (session->first_burst > 8192)
session->first_burst = 8192;
break;
case ISCSI_PARAM_MAX_RECV_DLENGTH:
if (conn->max_recv_dlength > 65536)
conn->max_recv_dlength = 65536;
break;
case ISCSI_PARAM_MAX_BURST:
if (session->first_burst > 262144)
session->first_burst = 262144;
break;
default:
return 0;
}
return 0;
}
/**
* beiscsi_get_host_param - get the iscsi parameter
* @shost: pointer to scsi_host structure
* @param: parameter type identifier
* @buf: buffer pointer
*
* returns host parameter
*/
int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
{
struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
int len = 0;
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
be_cmd_get_mac_addr(&phba->ctrl, phba->mac_address);
len = sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
break;
default:
return iscsi_host_get_param(shost, param, buf);
}
return len;
}
/**
* beiscsi_conn_get_stats - get the iscsi stats
* @cls_conn: pointer to iscsi cls conn
* @stats: pointer to iscsi_stats structure
*
* returns iscsi stats
*/
void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
struct iscsi_stats *stats)
{
struct iscsi_conn *conn = cls_conn->dd_data;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_stats\n");
stats->txdata_octets = conn->txdata_octets;
stats->rxdata_octets = conn->rxdata_octets;
stats->dataout_pdus = conn->dataout_pdus_cnt;
stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
stats->datain_pdus = conn->datain_pdus_cnt;
stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
stats->r2t_pdus = conn->r2t_pdus_cnt;
stats->digest_err = 0;
stats->timeout_err = 0;
stats->custom_length = 0;
strcpy(stats->custom[0].desc, "eh_abort_cnt");
stats->custom[0].value = conn->eh_abort_cnt;
}
/**
* beiscsi_set_params_for_offld - get the parameters for offload
* @beiscsi_conn: pointer to beiscsi_conn
* @params: pointer to offload_params structure
*/
static void beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_offload_params *params)
{
struct iscsi_conn *conn = beiscsi_conn->conn;
struct iscsi_session *session = conn->session;
AMAP_SET_BITS(struct amap_beiscsi_offload_params, max_burst_length,
params, session->max_burst);
AMAP_SET_BITS(struct amap_beiscsi_offload_params,
max_send_data_segment_length, params,
conn->max_xmit_dlength);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, first_burst_length,
params, session->first_burst);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, erl, params,
session->erl);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, dde, params,
conn->datadgst_en);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, hde, params,
conn->hdrdgst_en);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, ir2t, params,
session->initial_r2t_en);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, imd, params,
session->imm_data_en);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params,
(conn->exp_statsn - 1));
}
/**
* beiscsi_conn_start - offload of session to chip
* @cls_conn: pointer to beiscsi_conn
*/
int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct beiscsi_conn *beiscsi_conn = conn->dd_data;
struct beiscsi_endpoint *beiscsi_ep;
struct beiscsi_offload_params params;
struct iscsi_session *session = conn->session;
struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
memset(&params, 0, sizeof(struct beiscsi_offload_params));
beiscsi_ep = beiscsi_conn->ep;
if (!beiscsi_ep)
SE_DEBUG(DBG_LVL_1, "In beiscsi_conn_start , no beiscsi_ep\n");
free_mgmt_sgl_handle(phba, beiscsi_conn->plogin_sgl_handle);
beiscsi_conn->login_in_progress = 0;
beiscsi_set_params_for_offld(beiscsi_conn, &params);
beiscsi_offload_connection(beiscsi_conn, &params);
iscsi_conn_start(cls_conn);
return 0;
}
/**
* beiscsi_get_cid - Allocate a cid
* @phba: The phba instance
*/
static int beiscsi_get_cid(struct beiscsi_hba *phba)
{
unsigned short cid = 0xFFFF;
if (!phba->avlbl_cids)
return cid;
cid = phba->cid_array[phba->cid_alloc++];
if (phba->cid_alloc == phba->params.cxns_per_ctrl)
phba->cid_alloc = 0;
phba->avlbl_cids--;
return cid;
}
/**
* beiscsi_open_conn - Ask FW to open a TCP connection
* @ep: endpoint to be used
* @src_addr: The source IP address
* @dst_addr: The Destination IP address
*
* Asks the FW to open a TCP connection
*/
static int beiscsi_open_conn(struct iscsi_endpoint *ep,
struct sockaddr *src_addr,
struct sockaddr *dst_addr, int non_blocking)
{
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
struct beiscsi_hba *phba = beiscsi_ep->phba;
int ret = -1;
beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
if (beiscsi_ep->ep_cid == 0xFFFF) {
SE_DEBUG(DBG_LVL_1, "No free cid available\n");
return ret;
}
SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d ",
beiscsi_ep->ep_cid);
phba->ep_array[beiscsi_ep->ep_cid] = ep;
if (beiscsi_ep->ep_cid >
(phba->fw_config.iscsi_cid_start + phba->params.cxns_per_ctrl)) {
SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
return ret;
}
beiscsi_ep->cid_vld = 0;
return mgmt_open_connection(phba, dst_addr, beiscsi_ep);
}
/**
* beiscsi_put_cid - Free the cid
* @phba: The phba for which the cid is being freed
* @cid: The cid to free
*/
static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
{
phba->avlbl_cids++;
phba->cid_array[phba->cid_free++] = cid;
if (phba->cid_free == phba->params.cxns_per_ctrl)
phba->cid_free = 0;
}
/**
* beiscsi_free_ep - free endpoint
* @ep: pointer to iscsi endpoint structure
*/
static void beiscsi_free_ep(struct iscsi_endpoint *ep)
{
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
struct beiscsi_hba *phba = beiscsi_ep->phba;
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
beiscsi_ep->phba = NULL;
iscsi_destroy_endpoint(ep);
}
/**
* beiscsi_ep_connect - Ask chip to create TCP Conn
* @scsi_host: Pointer to scsi_host structure
* @dst_addr: The IP address of Target
* @non_blocking: blocking or non-blocking call
*
* This routines first asks chip to create a connection and then allocates an EP
*/
struct iscsi_endpoint *
beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
{
struct beiscsi_hba *phba;
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
int ret;
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_connect \n");
if (shost)
phba = iscsi_host_priv(shost);
else {
ret = -ENXIO;
SE_DEBUG(DBG_LVL_1, "shost is NULL \n");
return ERR_PTR(ret);
}
ep = iscsi_create_endpoint(sizeof(struct beiscsi_endpoint));
if (!ep) {
ret = -ENOMEM;
return ERR_PTR(ret);
}
beiscsi_ep = ep->dd_data;
beiscsi_ep->phba = phba;
if (beiscsi_open_conn(ep, NULL, dst_addr, non_blocking)) {
SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
ret = -ENOMEM;
goto free_ep;
}
return ep;
free_ep:
beiscsi_free_ep(ep);
return ERR_PTR(ret);
}
/**
* beiscsi_ep_poll - Poll to see if connection is established
* @ep: endpoint to be used
* @timeout_ms: timeout specified in millisecs
*
* Poll to see if TCP connection established
*/
int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_poll\n");
if (beiscsi_ep->cid_vld == 1)
return 1;
else
return 0;
}
/**
* beiscsi_close_conn - Upload the connection
* @ep: The iscsi endpoint
* @flag: The type of connection closure
*/
static int beiscsi_close_conn(struct iscsi_endpoint *ep, int flag)
{
int ret = 0;
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
struct beiscsi_hba *phba = beiscsi_ep->phba;
if (MGMT_STATUS_SUCCESS !=
mgmt_upload_connection(phba, beiscsi_ep->ep_cid,
CONNECTION_UPLOAD_GRACEFUL)) {
SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x",
beiscsi_ep->ep_cid);
ret = -1;
}
return ret;
}
/**
* beiscsi_ep_disconnect - Tears down the TCP connection
* @ep: endpoint to be used
*
* Tears down the TCP connection
*/
void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
{
struct beiscsi_conn *beiscsi_conn;
struct beiscsi_endpoint *beiscsi_ep;
struct beiscsi_hba *phba;
int flag = 0;
beiscsi_ep = ep->dd_data;
phba = beiscsi_ep->phba;
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect\n");
if (beiscsi_ep->conn) {
beiscsi_conn = beiscsi_ep->conn;
iscsi_suspend_queue(beiscsi_conn->conn);
beiscsi_close_conn(ep, flag);
}
beiscsi_free_ep(ep);
}
/**
* beiscsi_unbind_conn_to_cid - Unbind the beiscsi_conn from phba conn table
* @phba: The phba instance
* @cid: The cid to free
*/
static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
unsigned int cid)
{
if (phba->conn_table[cid])
phba->conn_table[cid] = NULL;
else {
SE_DEBUG(DBG_LVL_8, "Connection table Not occupied. \n");
return -EINVAL;
}
return 0;
}
/**
* beiscsi_conn_stop - Invalidate and stop the connection
* @cls_conn: pointer to get iscsi_conn
* @flag: The type of connection closure
*/
void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct beiscsi_conn *beiscsi_conn = conn->dd_data;
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_session *session = conn->session;
struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
unsigned int status;
unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop\n");
beiscsi_ep = beiscsi_conn->ep;
if (!beiscsi_ep) {
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop , no beiscsi_ep\n");
return;
}
status = mgmt_invalidate_connection(phba, beiscsi_ep,
beiscsi_ep->ep_cid, 1,
savecfg_flag);
if (status != MGMT_STATUS_SUCCESS) {
SE_DEBUG(DBG_LVL_1,
"mgmt_invalidate_connection Failed for cid=%d \n",
beiscsi_ep->ep_cid);
}
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
iscsi_conn_stop(cls_conn, flag);
}
/**
* Copyright (C) 2005 - 2009 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*
*/
#ifndef _BE_ISCSI_
#define _BE_ISCSI_
#include "be_main.h"
#include "be_mgmt.h"
#define BE2_IPV4 0x1
#define BE2_IPV6 0x10
void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_offload_params *params);
void beiscsi_offload_iscsi(struct beiscsi_hba *phba, struct iscsi_conn *conn,
struct beiscsi_conn *beiscsi_conn,
unsigned int fw_handle);
struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
uint16_t cmds_max,
uint16_t qdepth,
uint32_t initial_cmdsn);
void beiscsi_session_destroy(struct iscsi_cls_session *cls_session);
struct iscsi_cls_conn *beiscsi_conn_create(struct iscsi_cls_session
*cls_session, uint32_t cid);
int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn,
uint64_t transport_fd, int is_leading);
int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf);
int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf);
int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf, int buflen);
int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn);
void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag);
struct iscsi_endpoint *beiscsi_ep_connect(struct Scsi_Host *shost,
struct sockaddr *dst_addr,
int non_blocking);
int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
void beiscsi_ep_disconnect(struct iscsi_endpoint *ep);
void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
struct iscsi_stats *stats);
#endif
This source diff could not be displayed because it is too large. You can view the blob instead.
/**
* Copyright (C) 2005 - 2009 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*
*/
#ifndef _BEISCSI_MAIN_
#define _BEISCSI_MAIN_
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/in.h>
#include <linux/blk-iopoll.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/iscsi_proto.h>
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
#include "be.h"
#define DRV_NAME "be2iscsi"
#define BUILD_STR "2.0.527.0"
#define BE_NAME "ServerEngines BladeEngine2" \
"Linux iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
#define BE_VENDOR_ID 0x19A2
#define BE_DEVICE_ID1 0x212
#define OC_DEVICE_ID1 0x702
#define OC_DEVICE_ID2 0x703
#define BE2_MAX_SESSIONS 64
#define BE2_CMDS_PER_CXN 128
#define BE2_LOGOUTS BE2_MAX_SESSIONS
#define BE2_TMFS 16
#define BE2_NOPOUT_REQ 16
#define BE2_ASYNCPDUS BE2_MAX_SESSIONS
#define BE2_MAX_ICDS 2048
#define BE2_SGE 32
#define BE2_DEFPDU_HDR_SZ 64
#define BE2_DEFPDU_DATA_SZ 8192
#define BE2_IO_DEPTH \
(BE2_MAX_ICDS / 2 - (BE2_LOGOUTS + BE2_TMFS + BE2_NOPOUT_REQ))
#define BEISCSI_SGLIST_ELEMENTS BE2_SGE
#define BEISCSI_MAX_CMNDS 1024 /* Max IO's per Ctrlr sht->can_queue */
#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */
#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
#define BEISCSI_NUM_DEVICES_SUPPORTED 0x01
#define BEISCSI_MAX_FRAGS_INIT 192
#define BE_NUM_MSIX_ENTRIES 1
#define MPU_EP_SEMAPHORE 0xac
#define BE_SENSE_INFO_SIZE 258
#define BE_ISCSI_PDU_HEADER_SIZE 64
#define BE_MIN_MEM_SIZE 16384
#define IIOC_SCSI_DATA 0x05 /* Write Operation */
#define DBG_LVL 0x00000001
#define DBG_LVL_1 0x00000001
#define DBG_LVL_2 0x00000002
#define DBG_LVL_3 0x00000004
#define DBG_LVL_4 0x00000008
#define DBG_LVL_5 0x00000010
#define DBG_LVL_6 0x00000020
#define DBG_LVL_7 0x00000040
#define DBG_LVL_8 0x00000080
#define SE_DEBUG(debug_mask, fmt, args...) \
do { \
if (debug_mask & DBG_LVL) { \
printk(KERN_ERR "(%s():%d):", __func__, __LINE__);\
printk(fmt, ##args); \
} \
} while (0);
/**
* hardware needs the async PDU buffers to be posted in multiples of 8
* So have atleast 8 of them by default
*/
#define HWI_GET_ASYNC_PDU_CTX(phwi) (phwi->phwi_ctxt->pasync_ctx)
/********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
/**
* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
* Disable" may still globally block interrupts in addition to individual
* interrupt masks; a mechanism for the device driver to block all interrupts
* atomically without having to arbitrate for the PCI Interrupt Disable bit
* with the OS.
*/
#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
/********* ISR0 Register offset **********/
#define CEV_ISR0_OFFSET 0xC18
#define CEV_ISR_SIZE 4
/**
* Macros for reading/writing a protection domain or CSR registers
* in BladeEngine.
*/
#define DB_TXULP0_OFFSET 0x40
#define DB_RXULP0_OFFSET 0xA0
/********* Event Q door bell *************/
#define DB_EQ_OFFSET DB_CQ_OFFSET
#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
/* Clear the interrupt for this eq */
#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
/* Must be 1 */
#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
/* Number of event entries processed */
#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
/********* Compl Q door bell *************/
#define DB_CQ_OFFSET 0x120
#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
/* Number of event entries processed */
#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
#define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr)
#define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\
(GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data.id)
#define HWI_GET_DEF_HDRQ_ID(pc) (((struct hwi_controller *)\
(GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr.id)
#define PAGES_REQUIRED(x) \
((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE))
enum be_mem_enum {
HWI_MEM_ADDN_CONTEXT,
HWI_MEM_CQ,
HWI_MEM_EQ,
HWI_MEM_WRB,
HWI_MEM_WRBH,
HWI_MEM_SGLH, /* 5 */
HWI_MEM_SGE,
HWI_MEM_ASYNC_HEADER_BUF,
HWI_MEM_ASYNC_DATA_BUF,
HWI_MEM_ASYNC_HEADER_RING,
HWI_MEM_ASYNC_DATA_RING, /* 10 */
HWI_MEM_ASYNC_HEADER_HANDLE,
HWI_MEM_ASYNC_DATA_HANDLE,
HWI_MEM_ASYNC_PDU_CONTEXT,
ISCSI_MEM_GLOBAL_HEADER,
SE_MEM_MAX /* 15 */
};
struct be_bus_address32 {
unsigned int address_lo;
unsigned int address_hi;
};
struct be_bus_address64 {
unsigned long long address;
};
struct be_bus_address {
union {
struct be_bus_address32 a32;
struct be_bus_address64 a64;
} u;
};
struct mem_array {
struct be_bus_address bus_address; /* Bus address of location */
void *virtual_address; /* virtual address to the location */
unsigned int size; /* Size required by memory block */
};
struct be_mem_descriptor {
unsigned int index; /* Index of this memory parameter */
unsigned int category; /* type indicates cached/non-cached */
unsigned int num_elements; /* number of elements in this
* descriptor
*/
unsigned int alignment_mask; /* Alignment mask for this block */
unsigned int size_in_bytes; /* Size required by memory block */
struct mem_array *mem_array;
};
struct sgl_handle {
unsigned int sgl_index;
struct iscsi_sge *pfrag;
};
struct hba_parameters {
unsigned int ios_per_ctrl;
unsigned int cxns_per_ctrl;
unsigned int asyncpdus_per_ctrl;
unsigned int icds_per_ctrl;
unsigned int num_sge_per_io;
unsigned int defpdu_hdr_sz;
unsigned int defpdu_data_sz;
unsigned int num_cq_entries;
unsigned int num_eq_entries;
unsigned int wrbs_per_cxn;
unsigned int crashmode;
unsigned int hba_num;
unsigned int mgmt_ws_sz;
unsigned int hwi_ws_sz;
unsigned int eto;
unsigned int ldto;
unsigned int dbg_flags;
unsigned int num_cxn;
unsigned int eq_timer;
/**
* These are calculated from other params. They're here
* for debug purposes
*/
unsigned int num_mcc_pages;
unsigned int num_mcc_cq_pages;
unsigned int num_cq_pages;
unsigned int num_eq_pages;
unsigned int num_async_pdu_buf_pages;
unsigned int num_async_pdu_buf_sgl_pages;
unsigned int num_async_pdu_buf_cq_pages;
unsigned int num_async_pdu_hdr_pages;
unsigned int num_async_pdu_hdr_sgl_pages;
unsigned int num_async_pdu_hdr_cq_pages;
unsigned int num_sge;
};
struct beiscsi_hba {
struct hba_parameters params;
struct hwi_controller *phwi_ctrlr;
unsigned int mem_req[SE_MEM_MAX];
/* PCI BAR mapped addresses */
u8 __iomem *csr_va; /* CSR */
u8 __iomem *db_va; /* Door Bell */
u8 __iomem *pci_va; /* PCI Config */
struct be_bus_address csr_pa; /* CSR */
struct be_bus_address db_pa; /* CSR */
struct be_bus_address pci_pa; /* CSR */
/* PCI representation of our HBA */
struct pci_dev *pcidev;
unsigned int state;
unsigned short asic_revision;
struct blk_iopoll iopoll;
struct be_mem_descriptor *init_mem;
unsigned short io_sgl_alloc_index;
unsigned short io_sgl_free_index;
unsigned short io_sgl_hndl_avbl;
struct sgl_handle **io_sgl_hndl_base;
unsigned short eh_sgl_alloc_index;
unsigned short eh_sgl_free_index;
unsigned short eh_sgl_hndl_avbl;
struct sgl_handle **eh_sgl_hndl_base;
spinlock_t io_sgl_lock;
spinlock_t mgmt_sgl_lock;
spinlock_t isr_lock;
unsigned int age;
unsigned short avlbl_cids;
unsigned short cid_alloc;
unsigned short cid_free;
struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2];
struct list_head hba_queue;
unsigned short *cid_array;
struct iscsi_endpoint **ep_array;
struct Scsi_Host *shost;
struct {
/**
* group together since they are used most frequently
* for cid to cri conversion
*/
unsigned int iscsi_cid_start;
unsigned int phys_port;
unsigned int isr_offset;
unsigned int iscsi_icd_start;
unsigned int iscsi_cid_count;
unsigned int iscsi_icd_count;
unsigned int pci_function;
unsigned short cid_alloc;
unsigned short cid_free;
unsigned short avlbl_cids;
spinlock_t cid_lock;
} fw_config;
u8 mac_address[ETH_ALEN];
unsigned short todo_cq;
unsigned short todo_mcc_cq;
char wq_name[20];
struct workqueue_struct *wq; /* The actuak work queue */
struct work_struct work_cqs; /* The work being queued */
struct be_ctrl_info ctrl;
};
/**
* struct beiscsi_conn - iscsi connection structure
*/
struct beiscsi_conn {
struct iscsi_conn *conn;
struct beiscsi_hba *phba;
u32 exp_statsn;
u32 beiscsi_conn_cid;
struct beiscsi_endpoint *ep;
unsigned short login_in_progress;
struct sgl_handle *plogin_sgl_handle;
};
/* This structure is used by the chip */
struct pdu_data_out {
u32 dw[12];
};
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
struct amap_pdu_data_out {
u8 opcode[6]; /* opcode */
u8 rsvd0[2]; /* should be 0 */
u8 rsvd1[7];
u8 final_bit; /* F bit */
u8 rsvd2[16];
u8 ahs_length[8]; /* no AHS */
u8 data_len_hi[8];
u8 data_len_lo[16]; /* DataSegmentLength */
u8 lun[64];
u8 itt[32]; /* ITT; initiator task tag */
u8 ttt[32]; /* TTT; valid for R2T or 0xffffffff */
u8 rsvd3[32];
u8 exp_stat_sn[32];
u8 rsvd4[32];
u8 data_sn[32];
u8 buffer_offset[32];
u8 rsvd5[32];
};
struct be_cmd_bhs {
struct iscsi_cmd iscsi_hdr;
unsigned char pad1[16];
struct pdu_data_out iscsi_data_pdu;
unsigned char pad2[BE_SENSE_INFO_SIZE -
sizeof(struct pdu_data_out)];
};
struct beiscsi_io_task {
struct wrb_handle *pwrb_handle;
struct sgl_handle *psgl_handle;
struct beiscsi_conn *conn;
struct scsi_cmnd *scsi_cmnd;
unsigned int cmd_sn;
unsigned int flags;
unsigned short cid;
unsigned short header_len;
unsigned int alloc_size;
struct be_cmd_bhs *cmd_bhs;
struct be_bus_address bhs_pa;
unsigned short bhs_len;
};
struct be_nonio_bhs {
struct iscsi_hdr iscsi_hdr;
unsigned char pad1[16];
struct pdu_data_out iscsi_data_pdu;
unsigned char pad2[BE_SENSE_INFO_SIZE -
sizeof(struct pdu_data_out)];
};
struct be_status_bhs {
struct iscsi_cmd iscsi_hdr;
unsigned char pad1[16];
/**
* The plus 2 below is to hold the sense info length that gets
* DMA'ed by RxULP
*/
unsigned char sense_info[BE_SENSE_INFO_SIZE];
};
struct iscsi_sge {
u32 dw[4];
};
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
struct amap_iscsi_sge {
u8 addr_hi[32];
u8 addr_lo[32];
u8 sge_offset[22]; /* DWORD 2 */
u8 rsvd0[9]; /* DWORD 2 */
u8 last_sge; /* DWORD 2 */
u8 len[17]; /* DWORD 3 */
u8 rsvd1[15]; /* DWORD 3 */
};
struct beiscsi_offload_params {
u32 dw[5];
};
#define OFFLD_PARAMS_ERL 0x00000003
#define OFFLD_PARAMS_DDE 0x00000004
#define OFFLD_PARAMS_HDE 0x00000008
#define OFFLD_PARAMS_IR2T 0x00000010
#define OFFLD_PARAMS_IMD 0x00000020
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
struct amap_beiscsi_offload_params {
u8 max_burst_length[32];
u8 max_send_data_segment_length[32];
u8 first_burst_length[32];
u8 erl[2];
u8 dde[1];
u8 hde[1];
u8 ir2t[1];
u8 imd[1];
u8 pad[26];
u8 exp_statsn[32];
};
/* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_hba *phba, struct sol_cqe *psol);*/
struct async_pdu_handle {
struct list_head link;
struct be_bus_address pa;
void *pbuffer;
unsigned int consumed;
unsigned char index;
unsigned char is_header;
unsigned short cri;
unsigned long buffer_len;
};
struct hwi_async_entry {
struct {
unsigned char hdr_received;
unsigned char hdr_len;
unsigned short bytes_received;
unsigned int bytes_needed;
struct list_head list;
} wait_queue;
struct list_head header_busy_list;
struct list_head data_busy_list;
};
#define BE_MIN_ASYNC_ENTRIES 128
struct hwi_async_pdu_context {
struct {
struct be_bus_address pa_base;
void *va_base;
void *ring_base;
struct async_pdu_handle *handle_base;
unsigned int host_write_ptr;
unsigned int ep_read_ptr;
unsigned int writables;
unsigned int free_entries;
unsigned int busy_entries;
unsigned int buffer_size;
unsigned int num_entries;
struct list_head free_list;
} async_header;
struct {
struct be_bus_address pa_base;
void *va_base;
void *ring_base;
struct async_pdu_handle *handle_base;
unsigned int host_write_ptr;
unsigned int ep_read_ptr;
unsigned int writables;
unsigned int free_entries;
unsigned int busy_entries;
unsigned int buffer_size;
struct list_head free_list;
unsigned int num_entries;
} async_data;
/**
* This is a varying size list! Do not add anything
* after this entry!!
*/
struct hwi_async_entry async_entry[BE_MIN_ASYNC_ENTRIES];
};
#define PDUCQE_CODE_MASK 0x0000003F
#define PDUCQE_DPL_MASK 0xFFFF0000
#define PDUCQE_INDEX_MASK 0x0000FFFF
struct i_t_dpdu_cqe {
u32 dw[4];
} __packed;
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
struct amap_i_t_dpdu_cqe {
u8 db_addr_hi[32];
u8 db_addr_lo[32];
u8 code[6];
u8 cid[10];
u8 dpl[16];
u8 index[16];
u8 num_cons[10];
u8 rsvd0[4];
u8 final;
u8 valid;
} __packed;
#define CQE_VALID_MASK 0x80000000
#define CQE_CODE_MASK 0x0000003F
#define CQE_CID_MASK 0x0000FFC0
#define EQE_VALID_MASK 0x00000001
#define EQE_MAJORCODE_MASK 0x0000000E
#define EQE_RESID_MASK 0xFFFF0000
struct be_eq_entry {
u32 dw[1];
} __packed;
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
struct amap_eq_entry {
u8 valid; /* DWORD 0 */
u8 major_code[3]; /* DWORD 0 */
u8 minor_code[12]; /* DWORD 0 */
u8 resource_id[16]; /* DWORD 0 */
} __packed;
struct cq_db {
u32 dw[1];
} __packed;
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
struct amap_cq_db {
u8 qid[10];
u8 event[1];
u8 rsvd0[5];
u8 num_popped[13];
u8 rearm[1];
u8 rsvd1[2];
} __packed;
void beiscsi_process_eq(struct beiscsi_hba *phba);
struct iscsi_wrb {
u32 dw[16];
} __packed;
#define WRB_TYPE_MASK 0xF0000000
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
struct amap_iscsi_wrb {
u8 lun[14]; /* DWORD 0 */
u8 lt; /* DWORD 0 */
u8 invld; /* DWORD 0 */
u8 wrb_idx[8]; /* DWORD 0 */
u8 dsp; /* DWORD 0 */
u8 dmsg; /* DWORD 0 */
u8 undr_run; /* DWORD 0 */
u8 over_run; /* DWORD 0 */
u8 type[4]; /* DWORD 0 */
u8 ptr2nextwrb[8]; /* DWORD 1 */
u8 r2t_exp_dtl[24]; /* DWORD 1 */
u8 sgl_icd_idx[12]; /* DWORD 2 */
u8 rsvd0[20]; /* DWORD 2 */
u8 exp_data_sn[32]; /* DWORD 3 */
u8 iscsi_bhs_addr_hi[32]; /* DWORD 4 */
u8 iscsi_bhs_addr_lo[32]; /* DWORD 5 */
u8 cmdsn_itt[32]; /* DWORD 6 */
u8 dif_ref_tag[32]; /* DWORD 7 */
u8 sge0_addr_hi[32]; /* DWORD 8 */
u8 sge0_addr_lo[32]; /* DWORD 9 */
u8 sge0_offset[22]; /* DWORD 10 */
u8 pbs; /* DWORD 10 */
u8 dif_mode[2]; /* DWORD 10 */
u8 rsvd1[6]; /* DWORD 10 */
u8 sge0_last; /* DWORD 10 */
u8 sge0_len[17]; /* DWORD 11 */
u8 dif_meta_tag[14]; /* DWORD 11 */
u8 sge0_in_ddr; /* DWORD 11 */
u8 sge1_addr_hi[32]; /* DWORD 12 */
u8 sge1_addr_lo[32]; /* DWORD 13 */
u8 sge1_r2t_offset[22]; /* DWORD 14 */
u8 rsvd2[9]; /* DWORD 14 */
u8 sge1_last; /* DWORD 14 */
u8 sge1_len[17]; /* DWORD 15 */
u8 ref_sgl_icd_idx[12]; /* DWORD 15 */
u8 rsvd3[2]; /* DWORD 15 */
u8 sge1_in_ddr; /* DWORD 15 */
} __packed;
struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
int index);
void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
struct pdu_nop_out {
u32 dw[12];
};
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
struct amap_pdu_nop_out {
u8 opcode[6]; /* opcode 0x00 */
u8 i_bit; /* I Bit */
u8 x_bit; /* reserved; should be 0 */
u8 fp_bit_filler1[7];
u8 f_bit; /* always 1 */
u8 reserved1[16];
u8 ahs_length[8]; /* no AHS */
u8 data_len_hi[8];
u8 data_len_lo[16]; /* DataSegmentLength */
u8 lun[64];
u8 itt[32]; /* initiator id for ping or 0xffffffff */
u8 ttt[32]; /* target id for ping or 0xffffffff */
u8 cmd_sn[32];
u8 exp_stat_sn[32];
u8 reserved5[128];
};
#define PDUBASE_OPCODE_MASK 0x0000003F
#define PDUBASE_DATALENHI_MASK 0x0000FF00
#define PDUBASE_DATALENLO_MASK 0xFFFF0000
struct pdu_base {
u32 dw[16];
} __packed;
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
struct amap_pdu_base {
u8 opcode[6];
u8 i_bit; /* immediate bit */
u8 x_bit; /* reserved, always 0 */
u8 reserved1[24]; /* opcode-specific fields */
u8 ahs_length[8]; /* length units is 4 byte words */
u8 data_len_hi[8];
u8 data_len_lo[16]; /* DatasegmentLength */
u8 lun[64]; /* lun or opcode-specific fields */
u8 itt[32]; /* initiator task tag */
u8 reserved4[224];
};
struct iscsi_target_context_update_wrb {
u32 dw[16];
} __packed;
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
struct amap_iscsi_target_context_update_wrb {
u8 lun[14]; /* DWORD 0 */
u8 lt; /* DWORD 0 */
u8 invld; /* DWORD 0 */
u8 wrb_idx[8]; /* DWORD 0 */
u8 dsp; /* DWORD 0 */
u8 dmsg; /* DWORD 0 */
u8 undr_run; /* DWORD 0 */
u8 over_run; /* DWORD 0 */
u8 type[4]; /* DWORD 0 */
u8 ptr2nextwrb[8]; /* DWORD 1 */
u8 max_burst_length[19]; /* DWORD 1 */
u8 rsvd0[5]; /* DWORD 1 */
u8 rsvd1[15]; /* DWORD 2 */
u8 max_send_data_segment_length[17]; /* DWORD 2 */
u8 first_burst_length[14]; /* DWORD 3 */
u8 rsvd2[2]; /* DWORD 3 */
u8 tx_wrbindex_drv_msg[8]; /* DWORD 3 */
u8 rsvd3[5]; /* DWORD 3 */
u8 session_state[3]; /* DWORD 3 */
u8 rsvd4[16]; /* DWORD 4 */
u8 tx_jumbo; /* DWORD 4 */
u8 hde; /* DWORD 4 */
u8 dde; /* DWORD 4 */
u8 erl[2]; /* DWORD 4 */
u8 domain_id[5]; /* DWORD 4 */
u8 mode; /* DWORD 4 */
u8 imd; /* DWORD 4 */
u8 ir2t; /* DWORD 4 */
u8 notpredblq[2]; /* DWORD 4 */
u8 compltonack; /* DWORD 4 */
u8 stat_sn[32]; /* DWORD 5 */
u8 pad_buffer_addr_hi[32]; /* DWORD 6 */
u8 pad_buffer_addr_lo[32]; /* DWORD 7 */
u8 pad_addr_hi[32]; /* DWORD 8 */
u8 pad_addr_lo[32]; /* DWORD 9 */
u8 rsvd5[32]; /* DWORD 10 */
u8 rsvd6[32]; /* DWORD 11 */
u8 rsvd7[32]; /* DWORD 12 */
u8 rsvd8[32]; /* DWORD 13 */
u8 rsvd9[32]; /* DWORD 14 */
u8 rsvd10[32]; /* DWORD 15 */
} __packed;
struct be_ring {
u32 pages; /* queue size in pages */
u32 id; /* queue id assigned by beklib */
u32 num; /* number of elements in queue */
u32 cidx; /* consumer index */
u32 pidx; /* producer index -- not used by most rings */
u32 item_size; /* size in bytes of one object */
void *va; /* The virtual address of the ring. This
* should be last to allow 32 & 64 bit debugger
* extensions to work.
*/
};
struct hwi_wrb_context {
struct list_head wrb_handle_list;
struct list_head wrb_handle_drvr_list;
struct wrb_handle **pwrb_handle_base;
struct wrb_handle **pwrb_handle_basestd;
struct iscsi_wrb *plast_wrb;
unsigned short alloc_index;
unsigned short free_index;
unsigned short wrb_handles_available;
unsigned short cid;
};
struct hwi_controller {
struct list_head io_sgl_list;
struct list_head eh_sgl_list;
struct sgl_handle *psgl_handle_base;
unsigned int wrb_mem_index;
struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2];
struct mcc_wrb *pmcc_wrb_base;
struct be_ring default_pdu_hdr;
struct be_ring default_pdu_data;
struct hwi_context_memory *phwi_ctxt;
unsigned short cq_errors[CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN];
};
enum hwh_type_enum {
HWH_TYPE_IO = 1,
HWH_TYPE_LOGOUT = 2,
HWH_TYPE_TMF = 3,
HWH_TYPE_NOP = 4,
HWH_TYPE_IO_RD = 5,
HWH_TYPE_LOGIN = 11,
HWH_TYPE_INVALID = 0xFFFFFFFF
};
struct wrb_handle {
enum hwh_type_enum type;
unsigned short wrb_index;
unsigned short nxt_wrb_index;
struct iscsi_task *pio_handle;
struct iscsi_wrb *pwrb;
};
struct hwi_context_memory {
struct be_eq_obj be_eq;
struct be_queue_info be_cq;
struct be_queue_info be_mcc_cq;
struct be_queue_info be_mcc;
struct be_queue_info be_def_hdrq;
struct be_queue_info be_def_dataq;
struct be_queue_info be_wrbq[BE2_MAX_SESSIONS];
struct be_mcc_wrb_context *pbe_mcc_context;
struct hwi_async_pdu_context *pasync_ctx;
};
#endif
/**
* Copyright (C) 2005 - 2009 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*
*/
#include "be_mgmt.h"
#include "be_iscsi.h"
unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_fw_cfg *req = embedded_payload(wrb);
int status = 0;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
status = be_mbox_notify(ctrl);
if (!status) {
struct be_fw_cfg *pfw_cfg;
pfw_cfg = req;
phba->fw_config.phys_port = pfw_cfg->phys_port;
phba->fw_config.iscsi_icd_start =
pfw_cfg->ulp[0].icd_base;
phba->fw_config.iscsi_icd_count =
pfw_cfg->ulp[0].icd_count;
phba->fw_config.iscsi_cid_start =
pfw_cfg->ulp[0].sq_base;
phba->fw_config.iscsi_cid_count =
pfw_cfg->ulp[0].sq_count;
} else {
shost_printk(KERN_WARNING, phba->shost,
"Failed in mgmt_get_fw_config \n");
}
spin_unlock(&ctrl->mbox_lock);
return status;
}
unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl)
{
struct be_dma_mem nonemb_cmd;
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_mgmt_controller_attributes *req;
struct be_sge *sge = nonembedded_sgl(wrb);
int status = 0;
nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
sizeof(struct be_mgmt_controller_attributes),
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
SE_DEBUG(DBG_LVL_1,
"Failed to allocate memory for mgmt_check_supported_fw"
"\n");
return -1;
}
nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
req = nonemb_cmd.va;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd.size);
status = be_mbox_notify(ctrl);
if (!status) {
struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
SE_DEBUG(DBG_LVL_8, "Firmware version of CMD: %s\n",
resp->params.hba_attribs.flashrom_version_string);
SE_DEBUG(DBG_LVL_8, "Firmware version is : %s\n",
resp->params.hba_attribs.firmware_version_string);
SE_DEBUG(DBG_LVL_8,
"Developer Build, not performing version check...\n");
} else
SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n");
if (nonemb_cmd.va)
pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
spin_unlock(&ctrl->mbox_lock);
return status;
}
unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct iscsi_cleanup_req *req = embedded_payload(wrb);
int status = 0;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
req->chute = chute;
req->hdr_ring_id = 0;
req->data_ring_id = 0;
status = be_mbox_notify(ctrl);
if (status)
shost_printk(KERN_WARNING, phba->shost,
" mgmt_epfw_cleanup , FAILED\n");
spin_unlock(&ctrl->mbox_lock);
return status;
}
unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
unsigned int icd, unsigned int cid)
{
struct be_dma_mem nonemb_cmd;
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_sge *sge = nonembedded_sgl(wrb);
struct invalidate_commands_params_in *req;
int status = 0;
nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
sizeof(struct invalidate_commands_params_in),
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
SE_DEBUG(DBG_LVL_1,
"Failed to allocate memory for"
"mgmt_invalidate_icds \n");
return -1;
}
nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
req = nonemb_cmd.va;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS,
sizeof(*req));
req->ref_handle = 0;
req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
req->icd_count = 0;
req->table[req->icd_count].icd = icd;
req->table[req->icd_count].cid = cid;
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd.size);
status = be_mbox_notify(ctrl);
if (status)
SE_DEBUG(DBG_LVL_1, "ICDS Invalidation Failed\n");
spin_unlock(&ctrl->mbox_lock);
if (nonemb_cmd.va)
pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return status;
}
unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
struct beiscsi_endpoint *beiscsi_ep,
unsigned short cid,
unsigned short issue_reset,
unsigned short savecfg_flag)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct iscsi_invalidate_connection_params_in *req =
embedded_payload(wrb);
int status = 0;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION,
sizeof(*req));
req->session_handle = beiscsi_ep->fw_handle;
req->cid = cid;
if (issue_reset)
req->cleanup_type = CMD_ISCSI_CONNECTION_ISSUE_TCP_RST;
else
req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
req->save_cfg = savecfg_flag;
status = be_mbox_notify(ctrl);
if (status)
SE_DEBUG(DBG_LVL_1, "Invalidation Failed\n");
spin_unlock(&ctrl->mbox_lock);
return status;
}
unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
unsigned short cid, unsigned int upload_flag)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct tcp_upload_params_in *req = embedded_payload(wrb);
int status = 0;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
req->id = (unsigned short)cid;
req->upload_type = (unsigned char)upload_flag;
status = be_mbox_notify(ctrl);
if (status)
SE_DEBUG(DBG_LVL_1, "mgmt_upload_connection Failed\n");
spin_unlock(&ctrl->mbox_lock);
return status;
}
int mgmt_open_connection(struct beiscsi_hba *phba,
struct sockaddr *dst_addr,
struct beiscsi_endpoint *beiscsi_ep)
{
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr;
struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct tcp_connect_and_offload_in *req = embedded_payload(wrb);
unsigned short def_hdr_id;
unsigned short def_data_id;
struct phys_addr template_address = { 0, 0 };
struct phys_addr *ptemplate_address;
int status = 0;
unsigned short cid = beiscsi_ep->ep_cid;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba);
def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba);
ptemplate_address = &template_address;
ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
sizeof(*req));
if (dst_addr->sa_family == PF_INET) {
__be32 s_addr = daddr_in->sin_addr.s_addr;
req->ip_address.ip_type = BE2_IPV4;
req->ip_address.ip_address[0] = s_addr & 0x000000ff;
req->ip_address.ip_address[1] = (s_addr & 0x0000ff00) >> 8;
req->ip_address.ip_address[2] = (s_addr & 0x00ff0000) >> 16;
req->ip_address.ip_address[3] = (s_addr & 0xff000000) >> 24;
req->tcp_port = ntohs(daddr_in->sin_port);
beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
beiscsi_ep->ip_type = BE2_IPV4;
} else if (dst_addr->sa_family == PF_INET6) {
req->ip_address.ip_type = BE2_IPV6;
memcpy(&req->ip_address.ip_address,
&daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
req->tcp_port = ntohs(daddr_in6->sin6_port);
beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port);
memcpy(&beiscsi_ep->dst6_addr,
&daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
beiscsi_ep->ip_type = BE2_IPV6;
} else{
shost_printk(KERN_ERR, phba->shost, "unknown addr family %d\n",
dst_addr->sa_family);
spin_unlock(&ctrl->mbox_lock);
return -EINVAL;
}
req->cid = cid;
req->cq_id = phwi_context->be_cq.id;
req->defq_id = def_hdr_id;
req->hdr_ring_id = def_hdr_id;
req->data_ring_id = def_data_id;
req->do_offload = 1;
req->dataout_template_pa.lo = ptemplate_address->lo;
req->dataout_template_pa.hi = ptemplate_address->hi;
status = be_mbox_notify(ctrl);
if (!status) {
struct iscsi_endpoint *ep;
struct tcp_connect_and_offload_out *ptcpcnct_out =
embedded_payload(wrb);
ep = phba->ep_array[ptcpcnct_out->cid];
beiscsi_ep = ep->dd_data;
beiscsi_ep->fw_handle = 0;
beiscsi_ep->cid_vld = 1;
SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
} else
SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed\n");
spin_unlock(&ctrl->mbox_lock);
return status;
}
/**
* Copyright (C) 2005 - 2009 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*
*/
#ifndef _BEISCSI_MGMT_
#define _BEISCSI_MGMT_
#include <linux/types.h>
#include <linux/list.h>
#include "be_iscsi.h"
#include "be_main.h"
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
struct amap_mcc_sge {
u8 pa_lo[32]; /* dword 0 */
u8 pa_hi[32]; /* dword 1 */
u8 length[32]; /* DWORD 2 */
} __packed;
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
struct amap_mcc_wrb_payload {
union {
struct amap_mcc_sge sgl[19];
u8 embedded[59 * 32]; /* DWORDS 57 to 115 */
} u;
} __packed;
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
struct amap_mcc_wrb {
u8 embedded; /* DWORD 0 */
u8 rsvd0[2]; /* DWORD 0 */
u8 sge_count[5]; /* DWORD 0 */
u8 rsvd1[16]; /* DWORD 0 */
u8 special[8]; /* DWORD 0 */
u8 payload_length[32];
u8 tag[64]; /* DWORD 2 */
u8 rsvd2[32]; /* DWORD 4 */
struct amap_mcc_wrb_payload payload;
};
struct mcc_sge {
u32 pa_lo; /* dword 0 */
u32 pa_hi; /* dword 1 */
u32 length; /* DWORD 2 */
} __packed;
struct mcc_wrb_payload {
union {
struct mcc_sge sgl[19];
u32 embedded[59]; /* DWORDS 57 to 115 */
} u;
} __packed;
#define MCC_WRB_EMBEDDED_MASK 0x00000001
struct mcc_wrb {
u32 dw[0]; /* DWORD 0 */
u32 payload_length;
u32 tag[2]; /* DWORD 2 */
u32 rsvd2[1]; /* DWORD 4 */
struct mcc_wrb_payload payload;
};
unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute);
int mgmt_open_connection(struct beiscsi_hba *phba, struct sockaddr *dst_addr,
struct beiscsi_endpoint *beiscsi_ep);
unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
unsigned short cid,
unsigned int upload_flag);
unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
unsigned int icd, unsigned int cid);
struct iscsi_invalidate_connection_params_in {
struct be_cmd_req_hdr hdr;
unsigned int session_handle;
unsigned short cid;
unsigned short unused;
unsigned short cleanup_type;
unsigned short save_cfg;
} __packed;
struct iscsi_invalidate_connection_params_out {
unsigned int session_handle;
unsigned short cid;
unsigned short unused;
} __packed;
union iscsi_invalidate_connection_params {
struct iscsi_invalidate_connection_params_in request;
struct iscsi_invalidate_connection_params_out response;
} __packed;
struct invalidate_command_table {
unsigned short icd;
unsigned short cid;
} __packed;
struct invalidate_commands_params_in {
struct be_cmd_req_hdr hdr;
unsigned int ref_handle;
unsigned int icd_count;
struct invalidate_command_table table[128];
unsigned short cleanup_type;
unsigned short unused;
} __packed;
struct invalidate_commands_params_out {
unsigned int ref_handle;
unsigned int icd_count;
unsigned int icd_status[128];
} __packed;
union invalidate_commands_params {
struct invalidate_commands_params_in request;
struct invalidate_commands_params_out response;
} __packed;
struct mgmt_hba_attributes {
u8 flashrom_version_string[32];
u8 manufacturer_name[32];
u32 supported_modes;
u8 seeprom_version_lo;
u8 seeprom_version_hi;
u8 rsvd0[2];
u32 fw_cmd_data_struct_version;
u32 ep_fw_data_struct_version;
u32 future_reserved[12];
u32 default_extended_timeout;
u8 controller_model_number[32];
u8 controller_description[64];
u8 controller_serial_number[32];
u8 ip_version_string[32];
u8 firmware_version_string[32];
u8 bios_version_string[32];
u8 redboot_version_string[32];
u8 driver_version_string[32];
u8 fw_on_flash_version_string[32];
u32 functionalities_supported;
u16 max_cdblength;
u8 asic_revision;
u8 generational_guid[16];
u8 hba_port_count;
u16 default_link_down_timeout;
u8 iscsi_ver_min_max;
u8 multifunction_device;
u8 cache_valid;
u8 hba_status;
u8 max_domains_supported;
u8 phy_port;
u32 firmware_post_status;
u32 hba_mtu[8];
u32 future_u32[4];
} __packed;
struct mgmt_controller_attributes {
struct mgmt_hba_attributes hba_attribs;
u16 pci_vendor_id;
u16 pci_device_id;
u16 pci_sub_vendor_id;
u16 pci_sub_system_id;
u8 pci_bus_number;
u8 pci_device_number;
u8 pci_function_number;
u8 interface_type;
u64 unique_identifier;
u8 netfilters;
u8 rsvd0[3];
u8 future_u32[4];
} __packed;
struct be_mgmt_controller_attributes {
struct be_cmd_req_hdr hdr;
struct mgmt_controller_attributes params;
} __packed;
struct be_mgmt_controller_attributes_resp {
struct be_cmd_resp_hdr hdr;
struct mgmt_controller_attributes params;
} __packed;
/* configuration management */
#define GET_MGMT_CONTROLLER_WS(phba) (phba->pmgmt_ws)
/* MGMT CMD flags */
#define MGMT_CMDH_FREE (1<<0)
/* --- MGMT_ERROR_CODES --- */
/* Error Codes returned in the status field of the CMD response header */
#define MGMT_STATUS_SUCCESS 0 /* The CMD completed without errors */
#define MGMT_STATUS_FAILED 1 /* Error status in the Status field of */
/* the CMD_RESPONSE_HEADER */
#define ISCSI_GET_PDU_TEMPLATE_ADDRESS(pc, pa) {\
pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
bus_address.u.a32.address_lo; \
pa->hi = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
bus_address.u.a32.address_hi; \
}
struct beiscsi_endpoint {
struct beiscsi_hba *phba;
struct beiscsi_sess *sess;
struct beiscsi_conn *conn;
unsigned short ip_type;
char dst6_addr[ISCSI_ADDRESS_BUF_LEN];
unsigned long dst_addr;
unsigned short ep_cid;
unsigned int fw_handle;
u16 dst_tcpport;
u16 cid_vld;
};
unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba);
unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
struct beiscsi_endpoint *beiscsi_ep,
unsigned short cid,
unsigned short issue_reset,
unsigned short savecfg_flag);
#endif
......@@ -377,6 +377,24 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
return ptr + sizeof(struct qla2xxx_mq_chain);
}
static void
qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
{
struct qla_hw_data *ha = vha->hw;
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Failed to dump firmware (%x)!!!\n", rval);
ha->fw_dumped = 0;
} else {
qla_printk(KERN_INFO, ha,
"Firmware dump saved to temp buffer (%ld/%p).\n",
vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
}
/**
* qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
* @ha: HA context
......@@ -530,17 +548,7 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
if (rval == QLA_SUCCESS)
qla2xxx_copy_queues(ha, nxt);
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Failed to dump firmware (%x)!!!\n", rval);
ha->fw_dumped = 0;
} else {
qla_printk(KERN_INFO, ha,
"Firmware dump saved to temp buffer (%ld/%p).\n",
base_vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
}
qla2xxx_dump_post_process(base_vha, rval);
qla2300_fw_dump_failed:
if (!hardware_locked)
......@@ -737,17 +745,7 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
if (rval == QLA_SUCCESS)
qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Failed to dump firmware (%x)!!!\n", rval);
ha->fw_dumped = 0;
} else {
qla_printk(KERN_INFO, ha,
"Firmware dump saved to temp buffer (%ld/%p).\n",
base_vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
}
qla2xxx_dump_post_process(base_vha, rval);
qla2100_fw_dump_failed:
if (!hardware_locked)
......@@ -984,17 +982,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_copy_eft(ha, nxt);
qla24xx_fw_dump_failed_0:
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Failed to dump firmware (%x)!!!\n", rval);
ha->fw_dumped = 0;
} else {
qla_printk(KERN_INFO, ha,
"Firmware dump saved to temp buffer (%ld/%p).\n",
base_vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
}
qla2xxx_dump_post_process(base_vha, rval);
qla24xx_fw_dump_failed:
if (!hardware_locked)
......@@ -1305,17 +1293,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
}
qla25xx_fw_dump_failed_0:
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Failed to dump firmware (%x)!!!\n", rval);
ha->fw_dumped = 0;
} else {
qla_printk(KERN_INFO, ha,
"Firmware dump saved to temp buffer (%ld/%p).\n",
base_vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
}
qla2xxx_dump_post_process(base_vha, rval);
qla25xx_fw_dump_failed:
if (!hardware_locked)
......@@ -1628,17 +1606,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
}
qla81xx_fw_dump_failed_0:
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Failed to dump firmware (%x)!!!\n", rval);
ha->fw_dumped = 0;
} else {
qla_printk(KERN_INFO, ha,
"Firmware dump saved to temp buffer (%ld/%p).\n",
base_vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
}
qla2xxx_dump_post_process(base_vha, rval);
qla81xx_fw_dump_failed:
if (!hardware_locked)
......
......@@ -2123,6 +2123,7 @@ enum qla_work_type {
QLA_EVT_ASYNC_LOGIN_DONE,
QLA_EVT_ASYNC_LOGOUT,
QLA_EVT_ASYNC_LOGOUT_DONE,
QLA_EVT_UEVENT,
};
......@@ -2146,6 +2147,10 @@ struct qla_work_evt {
#define QLA_LOGIO_LOGIN_RETRIED BIT_0
u16 data[2];
} logio;
struct {
u32 code;
#define QLA_UEVENT_CODE_FW_DUMP 0
} uevent;
} u;
};
......
......@@ -92,6 +92,7 @@ extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
fc_port_t *, uint16_t *);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
......
......@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
......@@ -2653,6 +2654,38 @@ qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
int
qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
{
struct qla_work_evt *e;
e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
if (!e)
return QLA_FUNCTION_FAILED;
e->u.uevent.code = code;
return qla2x00_post_work(vha, e);
}
static void
qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
{
char event_string[40];
char *envp[] = { event_string, NULL };
switch (code) {
case QLA_UEVENT_CODE_FW_DUMP:
snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
vha->host_no);
break;
default:
/* do nothing */
break;
}
kobject_uevent_env(&(&vha->hw->pdev->driver->driver)->owner->mkobj.kobj,
KOBJ_CHANGE, envp);
}
void
qla2x00_do_work(struct scsi_qla_host *vha)
{
......@@ -2690,6 +2723,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla2x00_async_logout_done(vha, e->u.logio.fcport,
e->u.logio.data);
break;
case QLA_EVT_UEVENT:
qla2x00_uevent_emit(vha, e->u.uevent.code);
break;
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
......
......@@ -37,6 +37,7 @@
#include <scsi/scsi_host.h>
#include <linux/acpi.h>
#include <linux/cdrom.h>
#include <linux/blk-iopoll.h>
/*
* Define if arch has non-standard setup. This is a _PCI_ standard
......@@ -763,6 +764,7 @@ struct ata_port {
#endif
/* owned by EH */
u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
struct blk_iopoll iopoll;
};
/* The following initializer overrides a method to NULL whether one of
......
......@@ -2,3 +2,4 @@ header-y += scsi.h
header-y += scsi_netlink.h
header-y += scsi_netlink_fc.h
header-y += scsi_bsg_fc.h
header-y += fc/
header-y += fc_els.h
header-y += fc_fs.h
header-y += fc_gs.h
header-y += fc_ns.h
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment