Commit 1420b5d7 authored by Vipin Bhandari's avatar Vipin Bhandari Committed by Kevin Hilman

ARM: DaVinci: Cleaning of DaVinci MMC driver

The scatterlist traversal is made proper. The patch has some minor comments
modification and also sets the value written to the DAVINCI_MMCTOR register
properly. The timeout calculation is made proper by deriving it from the
clock values.

This patch incorporates most of review comments given on LKLM by
Pierre Ossman.
Signed-off-by: default avatarVipin Bhandari <vipin.bhandari@ti.com>
Signed-off-by: default avatarKevin Hilman <khilman@deeprootsystems.com>
parent a4833cd8
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/mmc/mmc.h>
#include <mach/mmc.h> #include <mach/mmc.h>
#include <mach/cpu.h> #include <mach/cpu.h>
...@@ -145,7 +146,9 @@ ...@@ -145,7 +146,9 @@
#define NR_SG 16 #define NR_SG 16
#define DAVINCI_MMC_FIFO_SIZE_BYTE 32
static unsigned rw_threshold = 32; static unsigned rw_threshold = 32;
static unsigned ns_in_one_cycle = 1;
module_param(rw_threshold, uint, S_IRUGO); module_param(rw_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(rw_threshold, MODULE_PARM_DESC(rw_threshold,
"Read/Write threshold. Default = 32"); "Read/Write threshold. Default = 32");
...@@ -194,7 +197,6 @@ struct mmc_davinci_host { ...@@ -194,7 +197,6 @@ struct mmc_davinci_host {
/* For PIO we walk scatterlists one segment at a time. */ /* For PIO we walk scatterlists one segment at a time. */
unsigned int sg_len; unsigned int sg_len;
int sg_idx;
/* Version of the MMC/SD controller */ /* Version of the MMC/SD controller */
u8 version; u8 version;
...@@ -205,8 +207,7 @@ struct mmc_davinci_host { ...@@ -205,8 +207,7 @@ struct mmc_davinci_host {
static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
{ {
struct scatterlist *sg; struct scatterlist *sg;
sg = host->data->sg;
sg = host->data->sg + host->sg_idx;
host->buffer_bytes_left = sg_dma_len(sg); host->buffer_bytes_left = sg_dma_len(sg);
host->buffer = sg_virt(sg); host->buffer = sg_virt(sg);
if (host->buffer_bytes_left > host->bytes_left) if (host->buffer_bytes_left > host->bytes_left)
...@@ -220,8 +221,7 @@ static void davinci_fifo_data_trans(struct mmc_davinci_host *host, ...@@ -220,8 +221,7 @@ static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
unsigned int i; unsigned int i;
if (host->buffer_bytes_left == 0) { if (host->buffer_bytes_left == 0) {
host->sg_idx++; host->data->sg = sg_next(host->data->sg);
BUG_ON(host->sg_idx == host->sg_len);
mmc_davinci_sg_to_buf(host); mmc_davinci_sg_to_buf(host);
} }
...@@ -312,10 +312,6 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host, ...@@ -312,10 +312,6 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host,
/* Set command index */ /* Set command index */
cmd_reg |= cmd->opcode; cmd_reg |= cmd->opcode;
/* Setting initialize clock */
if (cmd->opcode == 0)
cmd_reg |= MMCCMD_INITCK;
/* Enable EDMA transfer triggers */ /* Enable EDMA transfer triggers */
if (host->do_dma) if (host->do_dma)
cmd_reg |= MMCCMD_DMATRIG; cmd_reg |= MMCCMD_DMATRIG;
...@@ -340,7 +336,7 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host, ...@@ -340,7 +336,7 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host,
cmd_reg |= MMCCMD_PPLEN; cmd_reg |= MMCCMD_PPLEN;
/* set Command timeout */ /* set Command timeout */
writel(0xFFFF, host->base + DAVINCI_MMCTOR); writel(0x1FFF, host->base + DAVINCI_MMCTOR);
/* Enable interrupt (calculate here, defer until FIFO is stuffed). */ /* Enable interrupt (calculate here, defer until FIFO is stuffed). */
im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS;
...@@ -358,10 +354,12 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host, ...@@ -358,10 +354,12 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host,
/* /*
* Before non-DMA WRITE commands the controller needs priming: * Before non-DMA WRITE commands the controller needs priming:
* FIFO should be populated with 32 bytes * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size
*/ */
if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) &&
davinci_fifo_data_trans(host, 32); ((cmd->opcode == MMC_WRITE_BLOCK) ||
(cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)))
davinci_fifo_data_trans(host, DAVINCI_MMC_FIFO_SIZE_BYTE);
writel(cmd->arg, host->base + DAVINCI_MMCARGHL); writel(cmd->arg, host->base + DAVINCI_MMCARGHL);
writel(cmd_reg, host->base + DAVINCI_MMCCMD); writel(cmd_reg, host->base + DAVINCI_MMCCMD);
...@@ -489,13 +487,14 @@ static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host, ...@@ -489,13 +487,14 @@ static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
} }
/* We know sg_len and ccnt will never be out of range because /* We know sg_len and ccnt will never be out of range because
* we told the block layer to ensure that it only hands us one * we told the mmc layer which in turn tells the block layer
* scatterlist segment per EDMA PARAM entry. Update the PARAM * to ensure that it only hands us one scatterlist segment
* per EDMA PARAM entry. Update the PARAM
* entries needed for each segment of this scatterlist. * entries needed for each segment of this scatterlist.
*/ */
for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len; for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len;
sg_len-- != 0 && bytes_left; sg_len-- != 0 && bytes_left;
sg++, slot = host->links[link++]) { sg = sg_next(sg), slot = host->links[link++]) {
u32 buf = sg_dma_address(sg); u32 buf = sg_dma_address(sg);
unsigned count = sg_dma_len(sg); unsigned count = sg_dma_len(sg);
...@@ -638,11 +637,7 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) ...@@ -638,11 +637,7 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
data->blocks, data->blksz); data->blocks, data->blksz);
dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n",
data->timeout_clks, data->timeout_ns); data->timeout_clks, data->timeout_ns);
timeout = data->timeout_clks + data->timeout_ns / ns_in_one_cycle;
/* Convert ns to clock cycles by assuming 20MHz frequency
* 1 cycle at 20MHz = 500 ns
*/
timeout = data->timeout_clks + data->timeout_ns / 500;
if (timeout > 0xffff) if (timeout > 0xffff)
timeout = 0xffff; timeout = 0xffff;
...@@ -686,7 +681,6 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) ...@@ -686,7 +681,6 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
host->bytes_left = 0; host->bytes_left = 0;
} else { } else {
/* Revert to CPU Copy */ /* Revert to CPU Copy */
host->sg_idx = 0;
host->sg_len = data->sg_len; host->sg_len = data->sg_len;
mmc_davinci_sg_to_buf(host); mmc_davinci_sg_to_buf(host);
} }
...@@ -736,6 +730,14 @@ static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, ...@@ -736,6 +730,14 @@ static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host,
if (mmc_freq > mmc_req_freq) if (mmc_freq > mmc_req_freq)
mmc_push_pull = mmc_push_pull + 1; mmc_push_pull = mmc_push_pull + 1;
/* Convert ns to clock cycles */
if (mmc_req_freq < 400000)
ns_in_one_cycle = 1000000 / ((cpu_arm_clk
/ (2 * (mmc_push_pull + 1)))/1000);
else
ns_in_one_cycle = 1000000 / ((cpu_arm_clk
/ (2 * (mmc_push_pull + 1)))/1000000);
return mmc_push_pull; return mmc_push_pull;
} }
...@@ -762,11 +764,22 @@ static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ...@@ -762,11 +764,22 @@ static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
u32 temp; u32 temp;
/* Ignoring the init clock value passed for fixing the inter
* operability with different cards.
*/
open_drain_freq = ((unsigned int)cpu_arm_clk open_drain_freq = ((unsigned int)cpu_arm_clk
/ (2 * MMCSD_INIT_CLOCK)) - 1; / (2 * MMCSD_INIT_CLOCK)) - 1;
temp = readl(host->base + DAVINCI_MMCCLK) & ~0xFF;
if (open_drain_freq > 0xFF)
open_drain_freq = 0xFF;
temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
temp |= open_drain_freq; temp |= open_drain_freq;
writel(temp, host->base + DAVINCI_MMCCLK); writel(temp, host->base + DAVINCI_MMCCLK);
/* Convert ns to clock cycles */
ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000);
} else { } else {
u32 temp; u32 temp;
mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock);
...@@ -863,10 +876,6 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) ...@@ -863,10 +876,6 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
{ {
u32 temp; u32 temp;
/* record how much data we transferred */
temp = readl(host->base + DAVINCI_MMCNBLC);
data->bytes_xfered += (data->blocks - temp) * data->blksz;
/* reset command and data state machines */ /* reset command and data state machines */
temp = readl(host->base + DAVINCI_MMCCTL); temp = readl(host->base + DAVINCI_MMCCTL);
writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST, writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST,
...@@ -922,7 +931,7 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) ...@@ -922,7 +931,7 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
davinci_fifo_data_trans(host, host->bytes_left); davinci_fifo_data_trans(host, host->bytes_left);
} }
end_transfer = 1; end_transfer = 1;
data->bytes_xfered += data->blocks * data->blksz; data->bytes_xfered = data->blocks * data->blksz;
} else { } else {
dev_err(mmc_dev(host->mmc), dev_err(mmc_dev(host->mmc),
"DATDNE with no host->data\n"); "DATDNE with no host->data\n");
...@@ -984,9 +993,7 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) ...@@ -984,9 +993,7 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
/* Command CRC error */ /* Command CRC error */
dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); dev_dbg(mmc_dev(host->mmc), "Command CRC error\n");
if (host->cmd) { if (host->cmd) {
/* Ignore CMD CRC errors during high speed operation */ host->cmd->error = -EILSEQ;
if (host->mmc->ios.clock <= 25000000)
host->cmd->error = -EILSEQ;
end_command = 1; end_command = 1;
} }
} }
...@@ -1047,7 +1054,7 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host) ...@@ -1047,7 +1054,7 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host)
writel(0, host->base + DAVINCI_MMCCLK); writel(0, host->base + DAVINCI_MMCCLK);
writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
writel(0xFFFF, host->base + DAVINCI_MMCTOR); writel(0x1FFF, host->base + DAVINCI_MMCTOR);
writel(0xFFFF, host->base + DAVINCI_MMCTOD); writel(0xFFFF, host->base + DAVINCI_MMCTOD);
writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST, writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST,
...@@ -1149,7 +1156,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev) ...@@ -1149,7 +1156,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
mmc->max_seg_size = MAX_CCNT * rw_threshold; mmc->max_seg_size = MAX_CCNT * rw_threshold;
/* MMC/SD controller limits for multiblock requests */ /* MMC/SD controller limits for multiblock requests */
mmc->max_blk_size = 4095; /* BLEN is 11 bits */ mmc->max_blk_size = 4095; /* BLEN is 12 bits */
mmc->max_blk_count = 65535; /* NBLK is 16 bits */ mmc->max_blk_count = 65535; /* NBLK is 16 bits */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment