summaryrefslogtreecommitdiff
path: root/drivers/mmc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/mmc
downloadlinux-3.10-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz
linux-3.10-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.bz2
linux-3.10-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/Kconfig63
-rw-r--r--drivers/mmc/Makefile22
-rw-r--r--drivers/mmc/mmc.c914
-rw-r--r--drivers/mmc/mmc.h16
-rw-r--r--drivers/mmc/mmc_block.c509
-rw-r--r--drivers/mmc/mmc_queue.c238
-rw-r--r--drivers/mmc/mmc_queue.h33
-rw-r--r--drivers/mmc/mmc_sysfs.c238
-rw-r--r--drivers/mmc/mmci.c680
-rw-r--r--drivers/mmc/mmci.h179
-rw-r--r--drivers/mmc/pxamci.c610
-rw-r--r--drivers/mmc/pxamci.h124
-rw-r--r--drivers/mmc/wbsd.c1651
-rw-r--r--drivers/mmc/wbsd.h178
14 files changed, 5455 insertions, 0 deletions
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
new file mode 100644
index 00000000000..72f2b466b81
--- /dev/null
+++ b/drivers/mmc/Kconfig
@@ -0,0 +1,63 @@
+#
+# MMC subsystem configuration
+#
+
+menu "MMC/SD Card support"
+
+config MMC
+ tristate "MMC support"
+ help
+ MMC is the "multi-media card" bus protocol.
+
+ If you want MMC support, you should say Y here and also
+ to the specific driver for your MMC interface.
+
+config MMC_DEBUG
+ bool "MMC debugging"
+ depends on MMC != n
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables MMC core and driver debugging.
+
+config MMC_BLOCK
+ tristate "MMC block device driver"
+ depends on MMC
+ default y
+ help
+ Say Y here to enable the MMC block device driver support.
+ This provides a block device driver, which you can use to
+ mount the filesystem. Almost everyone wishing MMC support
+ should say Y or M here.
+
+config MMC_ARMMMCI
+ tristate "ARM AMBA Multimedia Card Interface support"
+ depends on ARM_AMBA && MMC
+ help
+ This selects the ARM(R) AMBA(R) PrimeCell Multimedia Card
+ Interface (PL180 and PL181) support. If you have an ARM(R)
+ platform with a Multimedia Card slot, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_PXA
+ tristate "Intel PXA255 Multimedia Card Interface support"
+ depends on ARCH_PXA && MMC
+ help
+ This selects the Intel(R) PXA(R) Multimedia card Interface.
+ If you have a PXA(R) platform with a Multimedia Card slot,
+ say Y or M here.
+
+ If unsure, say N.
+
+config MMC_WBSD
+ tristate "Winbond W83L51xD SD/MMC Card Interface support"
+ depends on MMC && ISA
+ help
+ This selects the Winbond(R) W83L51xD Secure digital and
+ Multimedia card Interface.
+ If you have a machine with a integrated W83L518D or W83L519D
+ SD/MMC card reader, say Y or M here.
+
+ If unsure, say N.
+
+endmenu
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
new file mode 100644
index 00000000000..89510c2086c
--- /dev/null
+++ b/drivers/mmc/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for the kernel mmc device drivers.
+#
+
+#
+# Core
+#
+obj-$(CONFIG_MMC) += mmc_core.o
+
+#
+# Media drivers
+#
+obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
+
+#
+# Host drivers
+#
+obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
+obj-$(CONFIG_MMC_PXA) += pxamci.o
+obj-$(CONFIG_MMC_WBSD) += wbsd.o
+
+mmc_core-y := mmc.o mmc_queue.o mmc_sysfs.o
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
new file mode 100644
index 00000000000..e4002632825
--- /dev/null
+++ b/drivers/mmc/mmc.c
@@ -0,0 +1,914 @@
+/*
+ * linux/drivers/mmc/mmc.c
+ *
+ * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/pagemap.h>
+#include <linux/err.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/protocol.h>
+
+#include "mmc.h"
+
+#ifdef CONFIG_MMC_DEBUG
+#define DBG(x...) printk(KERN_DEBUG x)
+#else
+#define DBG(x...) do { } while (0)
+#endif
+
+#define CMD_RETRIES 3
+
+/*
+ * OCR Bit positions to 10s of Vdd mV.
+ */
+static const unsigned short mmc_ocr_bit_to_vdd[] = {
+ 150, 155, 160, 165, 170, 180, 190, 200,
+ 210, 220, 230, 240, 250, 260, 270, 280,
+ 290, 300, 310, 320, 330, 340, 350, 360
+};
+
+static const unsigned int tran_exp[] = {
+ 10000, 100000, 1000000, 10000000,
+ 0, 0, 0, 0
+};
+
+static const unsigned char tran_mant[] = {
+ 0, 10, 12, 13, 15, 20, 25, 30,
+ 35, 40, 45, 50, 55, 60, 70, 80,
+};
+
+static const unsigned int tacc_exp[] = {
+ 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
+};
+
+static const unsigned int tacc_mant[] = {
+ 0, 10, 12, 13, 15, 20, 25, 30,
+ 35, 40, 45, 50, 55, 60, 70, 80,
+};
+
+
+/**
+ * mmc_request_done - finish processing an MMC command
+ * @host: MMC host which completed command
+ * @mrq: MMC request which completed
+ *
+ * MMC drivers should call this function when they have completed
+ * their processing of a command. This should be called before the
+ * data part of the command has completed.
+ */
+void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ struct mmc_command *cmd = mrq->cmd;
+ int err = mrq->cmd->error;
+ DBG("MMC: req done (%02x): %d: %08x %08x %08x %08x\n", cmd->opcode,
+ err, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
+
+ if (err && cmd->retries) {
+ cmd->retries--;
+ cmd->error = 0;
+ host->ops->request(host, mrq);
+ } else if (mrq->done) {
+ mrq->done(mrq);
+ }
+}
+
+EXPORT_SYMBOL(mmc_request_done);
+
+/**
+ * mmc_start_request - start a command on a host
+ * @host: MMC host to start command on
+ * @mrq: MMC request to start
+ *
+ * Queue a command on the specified host. We expect the
+ * caller to be holding the host lock with interrupts disabled.
+ */
+void
+mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
+{
+ DBG("MMC: starting cmd %02x arg %08x flags %08x\n",
+ mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
+
+ WARN_ON(host->card_busy == NULL);
+
+ mrq->cmd->error = 0;
+ mrq->cmd->mrq = mrq;
+ if (mrq->data) {
+ mrq->cmd->data = mrq->data;
+ mrq->data->error = 0;
+ mrq->data->mrq = mrq;
+ if (mrq->stop) {
+ mrq->data->stop = mrq->stop;
+ mrq->stop->error = 0;
+ mrq->stop->mrq = mrq;
+ }
+ }
+ host->ops->request(host, mrq);
+}
+
+EXPORT_SYMBOL(mmc_start_request);
+
+static void mmc_wait_done(struct mmc_request *mrq)
+{
+ complete(mrq->done_data);
+}
+
+int mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ DECLARE_COMPLETION(complete);
+
+ mrq->done_data = &complete;
+ mrq->done = mmc_wait_done;
+
+ mmc_start_request(host, mrq);
+
+ wait_for_completion(&complete);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(mmc_wait_for_req);
+
+/**
+ * mmc_wait_for_cmd - start a command and wait for completion
+ * @host: MMC host to start command
+ * @cmd: MMC command to start
+ * @retries: maximum number of retries
+ *
+ * Start a new MMC command for a host, and wait for the command
+ * to complete. Return any error that occurred while the command
+ * was executing. Do not attempt to parse the response.
+ */
+int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
+{
+ struct mmc_request mrq;
+
+ BUG_ON(host->card_busy == NULL);
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+
+ memset(cmd->resp, 0, sizeof(cmd->resp));
+ cmd->retries = retries;
+
+ mrq.cmd = cmd;
+ cmd->data = NULL;
+
+ mmc_wait_for_req(host, &mrq);
+
+ return cmd->error;
+}
+
+EXPORT_SYMBOL(mmc_wait_for_cmd);
+
+
+
+/**
+ * __mmc_claim_host - exclusively claim a host
+ * @host: mmc host to claim
+ * @card: mmc card to claim host for
+ *
+ * Claim a host for a set of operations. If a valid card
+ * is passed and this wasn't the last card selected, select
+ * the card before returning.
+ *
+ * Note: you should use mmc_card_claim_host or mmc_claim_host.
+ */
+int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long flags;
+ int err = 0;
+
+ add_wait_queue(&host->wq, &wait);
+ spin_lock_irqsave(&host->lock, flags);
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (host->card_busy == NULL)
+ break;
+ spin_unlock_irqrestore(&host->lock, flags);
+ schedule();
+ spin_lock_irqsave(&host->lock, flags);
+ }
+ set_current_state(TASK_RUNNING);
+ host->card_busy = card;
+ spin_unlock_irqrestore(&host->lock, flags);
+ remove_wait_queue(&host->wq, &wait);
+
+ if (card != (void *)-1 && host->card_selected != card) {
+ struct mmc_command cmd;
+
+ host->card_selected = card;
+
+ cmd.opcode = MMC_SELECT_CARD;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1;
+
+ err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+ }
+
+ return err;
+}
+
+EXPORT_SYMBOL(__mmc_claim_host);
+
+/**
+ * mmc_release_host - release a host
+ * @host: mmc host to release
+ *
+ * Release a MMC host, allowing others to claim the host
+ * for their operations.
+ */
+void mmc_release_host(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ BUG_ON(host->card_busy == NULL);
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->card_busy = NULL;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ wake_up(&host->wq);
+}
+
+EXPORT_SYMBOL(mmc_release_host);
+
+/*
+ * Ensure that no card is selected.
+ */
+static void mmc_deselect_cards(struct mmc_host *host)
+{
+ struct mmc_command cmd;
+
+ if (host->card_selected) {
+ host->card_selected = NULL;
+
+ cmd.opcode = MMC_SELECT_CARD;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_NONE;
+
+ mmc_wait_for_cmd(host, &cmd, 0);
+ }
+}
+
+
+static inline void mmc_delay(unsigned int ms)
+{
+ if (ms < HZ / 1000) {
+ yield();
+ mdelay(ms);
+ } else {
+ msleep_interruptible (ms);
+ }
+}
+
+/*
+ * Mask off any voltages we don't support and select
+ * the lowest voltage
+ */
+static u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
+{
+ int bit;
+
+ ocr &= host->ocr_avail;
+
+ bit = ffs(ocr);
+ if (bit) {
+ bit -= 1;
+
+ ocr = 3 << bit;
+
+ host->ios.vdd = bit;
+ host->ops->set_ios(host, &host->ios);
+ } else {
+ ocr = 0;
+ }
+
+ return ocr;
+}
+
+#define UNSTUFF_BITS(resp,start,size) \
+ ({ \
+ const int __size = size; \
+ const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
+ const int __off = 3 - ((start) / 32); \
+ const int __shft = (start) & 31; \
+ u32 __res; \
+ \
+ __res = resp[__off] >> __shft; \
+ if (__size + __shft > 32) \
+ __res |= resp[__off-1] << ((32 - __shft) % 32); \
+ __res & __mask; \
+ })
+
+/*
+ * Given the decoded CSD structure, decode the raw CID to our CID structure.
+ */
+static void mmc_decode_cid(struct mmc_card *card)
+{
+ u32 *resp = card->raw_cid;
+
+ memset(&card->cid, 0, sizeof(struct mmc_cid));
+
+ /*
+ * The selection of the format here is guesswork based upon
+ * information people have sent to date.
+ */
+ switch (card->csd.mmca_vsn) {
+ case 0: /* MMC v1.? */
+ case 1: /* MMC v1.4 */
+ card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
+ card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
+ card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
+ card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
+ card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
+ card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
+ card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
+ card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
+ card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
+ card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
+ card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
+ card->cid.month = UNSTUFF_BITS(resp, 12, 4);
+ card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
+ break;
+
+ case 2: /* MMC v2.x ? */
+ case 3: /* MMC v3.x ? */
+ card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
+ card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
+ card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
+ card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
+ card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
+ card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
+ card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
+ card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
+ card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
+ card->cid.month = UNSTUFF_BITS(resp, 12, 4);
+ card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
+ break;
+
+ default:
+ printk("%s: card has unknown MMCA version %d\n",
+ card->host->host_name, card->csd.mmca_vsn);
+ mmc_card_set_bad(card);
+ break;
+ }
+}
+
+/*
+ * Given a 128-bit response, decode to our card CSD structure.
+ */
+static void mmc_decode_csd(struct mmc_card *card)
+{
+ struct mmc_csd *csd = &card->csd;
+ unsigned int e, m, csd_struct;
+ u32 *resp = card->raw_csd;
+
+ /*
+ * We only understand CSD structure v1.1 and v2.
+ * v2 has extra information in bits 15, 11 and 10.
+ */
+ csd_struct = UNSTUFF_BITS(resp, 126, 2);
+ if (csd_struct != 1 && csd_struct != 2) {
+ printk("%s: unrecognised CSD structure version %d\n",
+ card->host->host_name, csd_struct);
+ mmc_card_set_bad(card);
+ return;
+ }
+
+ csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
+ m = UNSTUFF_BITS(resp, 115, 4);
+ e = UNSTUFF_BITS(resp, 112, 3);
+ csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
+ csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
+
+ m = UNSTUFF_BITS(resp, 99, 4);
+ e = UNSTUFF_BITS(resp, 96, 3);
+ csd->max_dtr = tran_exp[e] * tran_mant[m];
+ csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
+
+ e = UNSTUFF_BITS(resp, 47, 3);
+ m = UNSTUFF_BITS(resp, 62, 12);
+ csd->capacity = (1 + m) << (e + 2);
+
+ csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
+}
+
+/*
+ * Locate a MMC card on this MMC host given a raw CID.
+ */
+static struct mmc_card *mmc_find_card(struct mmc_host *host, u32 *raw_cid)
+{
+ struct mmc_card *card;
+
+ list_for_each_entry(card, &host->cards, node) {
+ if (memcmp(card->raw_cid, raw_cid, sizeof(card->raw_cid)) == 0)
+ return card;
+ }
+ return NULL;
+}
+
+/*
+ * Allocate a new MMC card, and assign a unique RCA.
+ */
+static struct mmc_card *
+mmc_alloc_card(struct mmc_host *host, u32 *raw_cid, unsigned int *frca)
+{
+ struct mmc_card *card, *c;
+ unsigned int rca = *frca;
+
+ card = kmalloc(sizeof(struct mmc_card), GFP_KERNEL);
+ if (!card)
+ return ERR_PTR(-ENOMEM);
+
+ mmc_init_card(card, host);
+ memcpy(card->raw_cid, raw_cid, sizeof(card->raw_cid));
+
+ again:
+ list_for_each_entry(c, &host->cards, node)
+ if (c->rca == rca) {
+ rca++;
+ goto again;
+ }
+
+ card->rca = rca;
+
+ *frca = rca;
+
+ return card;
+}
+
+/*
+ * Tell attached cards to go to IDLE state
+ */
+static void mmc_idle_cards(struct mmc_host *host)
+{
+ struct mmc_command cmd;
+
+ cmd.opcode = MMC_GO_IDLE_STATE;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_NONE;
+
+ mmc_wait_for_cmd(host, &cmd, 0);
+
+ mmc_delay(1);
+}
+
+/*
+ * Apply power to the MMC stack.
+ */
+static void mmc_power_up(struct mmc_host *host)
+{
+ int bit = fls(host->ocr_avail) - 1;
+
+ host->ios.vdd = bit;
+ host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+ host->ios.power_mode = MMC_POWER_UP;
+ host->ops->set_ios(host, &host->ios);
+
+ mmc_delay(1);
+
+ host->ios.clock = host->f_min;
+ host->ios.power_mode = MMC_POWER_ON;
+ host->ops->set_ios(host, &host->ios);
+
+ mmc_delay(2);
+}
+
+static void mmc_power_off(struct mmc_host *host)
+{
+ host->ios.clock = 0;
+ host->ios.vdd = 0;
+ host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+ host->ios.power_mode = MMC_POWER_OFF;
+ host->ops->set_ios(host, &host->ios);
+}
+
+static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
+{
+ struct mmc_command cmd;
+ int i, err = 0;
+
+ cmd.opcode = MMC_SEND_OP_COND;
+ cmd.arg = ocr;
+ cmd.flags = MMC_RSP_R3;
+
+ for (i = 100; i; i--) {
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err != MMC_ERR_NONE)
+ break;
+
+ if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0)
+ break;
+
+ err = MMC_ERR_TIMEOUT;
+
+ mmc_delay(10);
+ }
+
+ if (rocr)
+ *rocr = cmd.resp[0];
+
+ return err;
+}
+
+/*
+ * Discover cards by requesting their CID. If this command
+ * times out, it is not an error; there are no further cards
+ * to be discovered. Add new cards to the list.
+ *
+ * Create a mmc_card entry for each discovered card, assigning
+ * it an RCA, and save the raw CID for decoding later.
+ */
+static void mmc_discover_cards(struct mmc_host *host)
+{
+ struct mmc_card *card;
+ unsigned int first_rca = 1, err;
+
+ while (1) {
+ struct mmc_command cmd;
+
+ cmd.opcode = MMC_ALL_SEND_CID;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R2;
+
+ err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+ if (err == MMC_ERR_TIMEOUT) {
+ err = MMC_ERR_NONE;
+ break;
+ }
+ if (err != MMC_ERR_NONE) {
+ printk(KERN_ERR "%s: error requesting CID: %d\n",
+ host->host_name, err);
+ break;
+ }
+
+ card = mmc_find_card(host, cmd.resp);
+ if (!card) {
+ card = mmc_alloc_card(host, cmd.resp, &first_rca);
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ break;
+ }
+ list_add(&card->node, &host->cards);
+ }
+
+ card->state &= ~MMC_STATE_DEAD;
+
+ cmd.opcode = MMC_SET_RELATIVE_ADDR;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1;
+
+ err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+ if (err != MMC_ERR_NONE)
+ mmc_card_set_dead(card);
+ }
+}
+
+static void mmc_read_csds(struct mmc_host *host)
+{
+ struct mmc_card *card;
+
+ list_for_each_entry(card, &host->cards, node) {
+ struct mmc_command cmd;
+ int err;
+
+ if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT))
+ continue;
+
+ cmd.opcode = MMC_SEND_CSD;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R2;
+
+ err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+ if (err != MMC_ERR_NONE) {
+ mmc_card_set_dead(card);
+ continue;
+ }
+
+ memcpy(card->raw_csd, cmd.resp, sizeof(card->raw_csd));
+
+ mmc_decode_csd(card);
+ mmc_decode_cid(card);
+ }
+}
+
+static unsigned int mmc_calculate_clock(struct mmc_host *host)
+{
+ struct mmc_card *card;
+ unsigned int max_dtr = host->f_max;
+
+ list_for_each_entry(card, &host->cards, node)
+ if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr)
+ max_dtr = card->csd.max_dtr;
+
+ DBG("MMC: selected %d.%03dMHz transfer rate\n",
+ max_dtr / 1000000, (max_dtr / 1000) % 1000);
+
+ return max_dtr;
+}
+
+/*
+ * Check whether cards we already know about are still present.
+ * We do this by requesting status, and checking whether a card
+ * responds.
+ *
+ * A request for status does not cause a state change in data
+ * transfer mode.
+ */
+static void mmc_check_cards(struct mmc_host *host)
+{
+ struct list_head *l, *n;
+
+ mmc_deselect_cards(host);
+
+ list_for_each_safe(l, n, &host->cards) {
+ struct mmc_card *card = mmc_list_to_card(l);
+ struct mmc_command cmd;
+ int err;
+
+ cmd.opcode = MMC_SEND_STATUS;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1;
+
+ err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+ if (err == MMC_ERR_NONE)
+ continue;
+
+ mmc_card_set_dead(card);
+ }
+}
+
+static void mmc_setup(struct mmc_host *host)
+{
+ if (host->ios.power_mode != MMC_POWER_ON) {
+ int err;
+ u32 ocr;
+
+ mmc_power_up(host);
+ mmc_idle_cards(host);
+
+ err = mmc_send_op_cond(host, 0, &ocr);
+ if (err != MMC_ERR_NONE)
+ return;
+
+ host->ocr = mmc_select_voltage(host, ocr);
+
+ /*
+ * Since we're changing the OCR value, we seem to
+ * need to tell some cards to go back to the idle
+ * state. We wait 1ms to give cards time to
+ * respond.
+ */
+ if (host->ocr)
+ mmc_idle_cards(host);
+ } else {
+ host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+ host->ios.clock = host->f_min;
+ host->ops->set_ios(host, &host->ios);
+
+ /*
+ * We should remember the OCR mask from the existing
+ * cards, and detect the new cards OCR mask, combine
+ * the two and re-select the VDD. However, if we do
+ * change VDD, we should do an idle, and then do a
+ * full re-initialisation. We would need to notify
+ * drivers so that they can re-setup the cards as
+ * well, while keeping their queues at bay.
+ *
+ * For the moment, we take the easy way out - if the
+ * new cards don't like our currently selected VDD,
+ * they drop off the bus.
+ */
+ }
+
+ if (host->ocr == 0)
+ return;
+
+ /*
+ * Send the selected OCR multiple times... until the cards
+ * all get the idea that they should be ready for CMD2.
+ * (My SanDisk card seems to need this.)
+ */
+ mmc_send_op_cond(host, host->ocr, NULL);
+
+ mmc_discover_cards(host);
+
+ /*
+ * Ok, now switch to push-pull mode.
+ */
+ host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+ host->ops->set_ios(host, &host->ios);
+
+ mmc_read_csds(host);
+}
+
+
+/**
+ * mmc_detect_change - process change of state on a MMC socket
+ * @host: host which changed state.
+ *
+ * All we know is that card(s) have been inserted or removed
+ * from the socket(s). We don't know which socket or cards.
+ */
+void mmc_detect_change(struct mmc_host *host)
+{
+ schedule_work(&host->detect);
+}
+
+EXPORT_SYMBOL(mmc_detect_change);
+
+
+static void mmc_rescan(void *data)
+{
+ struct mmc_host *host = data;
+ struct list_head *l, *n;
+
+ mmc_claim_host(host);
+
+ if (host->ios.power_mode == MMC_POWER_ON)
+ mmc_check_cards(host);
+
+ mmc_setup(host);
+
+ if (!list_empty(&host->cards)) {
+ /*
+ * (Re-)calculate the fastest clock rate which the
+ * attached cards and the host support.
+ */
+ host->ios.clock = mmc_calculate_clock(host);
+ host->ops->set_ios(host, &host->ios);
+ }
+
+ mmc_release_host(host);
+
+ list_for_each_safe(l, n, &host->cards) {
+ struct mmc_card *card = mmc_list_to_card(l);
+
+ /*
+ * If this is a new and good card, register it.
+ */
+ if (!mmc_card_present(card) && !mmc_card_dead(card)) {
+ if (mmc_register_card(card))
+ mmc_card_set_dead(card);
+ else
+ mmc_card_set_present(card);
+ }
+
+ /*
+ * If this card is dead, destroy it.
+ */
+ if (mmc_card_dead(card)) {
+ list_del(&card->node);
+ mmc_remove_card(card);
+ }
+ }
+
+ /*
+ * If we discover that there are no cards on the
+ * bus, turn off the clock and power down.
+ */
+ if (list_empty(&host->cards))
+ mmc_power_off(host);
+}
+
+
+/**
+ * mmc_alloc_host - initialise the per-host structure.
+ * @extra: sizeof private data structure
+ * @dev: pointer to host device model structure
+ *
+ * Initialise the per-host structure.
+ */
+struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
+{
+ struct mmc_host *host;
+
+ host = kmalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
+ if (host) {
+ memset(host, 0, sizeof(struct mmc_host) + extra);
+
+ spin_lock_init(&host->lock);
+ init_waitqueue_head(&host->wq);
+ INIT_LIST_HEAD(&host->cards);
+ INIT_WORK(&host->detect, mmc_rescan, host);
+
+ host->dev = dev;
+
+ /*
+ * By default, hosts do not support SGIO or large requests.
+ * They have to set these according to their abilities.
+ */
+ host->max_hw_segs = 1;
+ host->max_phys_segs = 1;
+ host->max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
+ host->max_seg_size = PAGE_CACHE_SIZE;
+ }
+
+ return host;
+}
+
+EXPORT_SYMBOL(mmc_alloc_host);
+
+/**
+ * mmc_add_host - initialise host hardware
+ * @host: mmc host
+ */
+int mmc_add_host(struct mmc_host *host)
+{
+ static unsigned int host_num;
+
+ snprintf(host->host_name, sizeof(host->host_name),
+ "mmc%d", host_num++);
+
+ mmc_power_off(host);
+ mmc_detect_change(host);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(mmc_add_host);
+
+/**
+ * mmc_remove_host - remove host hardware
+ * @host: mmc host
+ *
+ * Unregister and remove all cards associated with this host,
+ * and power down the MMC bus.
+ */
+void mmc_remove_host(struct mmc_host *host)
+{
+ struct list_head *l, *n;
+
+ list_for_each_safe(l, n, &host->cards) {
+ struct mmc_card *card = mmc_list_to_card(l);
+
+ mmc_remove_card(card);
+ }
+
+ mmc_power_off(host);
+}
+
+EXPORT_SYMBOL(mmc_remove_host);
+
+/**
+ * mmc_free_host - free the host structure
+ * @host: mmc host
+ *
+ * Free the host once all references to it have been dropped.
+ */
+void mmc_free_host(struct mmc_host *host)
+{
+ flush_scheduled_work();
+ kfree(host);
+}
+
+EXPORT_SYMBOL(mmc_free_host);
+
+#ifdef CONFIG_PM
+
+/**
+ * mmc_suspend_host - suspend a host
+ * @host: mmc host
+ * @state: suspend mode (PM_SUSPEND_xxx)
+ */
+int mmc_suspend_host(struct mmc_host *host, u32 state)
+{
+ mmc_claim_host(host);
+ mmc_deselect_cards(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(mmc_suspend_host);
+
+/**
+ * mmc_resume_host - resume a previously suspended host
+ * @host: mmc host
+ */
+int mmc_resume_host(struct mmc_host *host)
+{
+ mmc_detect_change(host);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(mmc_resume_host);
+
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/mmc.h b/drivers/mmc/mmc.h
new file mode 100644
index 00000000000..b498dffe0b1
--- /dev/null
+++ b/drivers/mmc/mmc.h
@@ -0,0 +1,16 @@
+/*
+ * linux/drivers/mmc/mmc.h
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _MMC_H
+#define _MMC_H
+/* core-internal functions */
+void mmc_init_card(struct mmc_card *card, struct mmc_host *host);
+int mmc_register_card(struct mmc_card *card);
+void mmc_remove_card(struct mmc_card *card);
+#endif
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
new file mode 100644
index 00000000000..b5b4a7b1190
--- /dev/null
+++ b/drivers/mmc/mmc_block.c
@@ -0,0 +1,509 @@
+/*
+ * Block driver for media (i.e., flash cards)
+ *
+ * Copyright 2002 Hewlett-Packard Company
+ *
+ * Use consistent with the GNU GPL is permitted,
+ * provided that this copyright notice is
+ * preserved in its entirety in all copies and derived works.
+ *
+ * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
+ * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
+ * FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ * Many thanks to Alessandro Rubini and Jonathan Corbet!
+ *
+ * Author: Andrew Christian
+ * 28 May 2002
+ */
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/devfs_fs_kernel.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/protocol.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#include "mmc_queue.h"
+
+/*
+ * max 8 partitions per card
+ */
+#define MMC_SHIFT 3
+
+static int major;
+
+/*
+ * There is one mmc_blk_data per slot.
+ */
+struct mmc_blk_data {
+ spinlock_t lock;
+ struct gendisk *disk;
+ struct mmc_queue queue;
+
+ unsigned int usage;
+ unsigned int block_bits;
+};
+
+static DECLARE_MUTEX(open_lock);
+
+static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
+{
+ struct mmc_blk_data *md;
+
+ down(&open_lock);
+ md = disk->private_data;
+ if (md && md->usage == 0)
+ md = NULL;
+ if (md)
+ md->usage++;
+ up(&open_lock);
+
+ return md;
+}
+
+static void mmc_blk_put(struct mmc_blk_data *md)
+{
+ down(&open_lock);
+ md->usage--;
+ if (md->usage == 0) {
+ put_disk(md->disk);
+ mmc_cleanup_queue(&md->queue);
+ kfree(md);
+ }
+ up(&open_lock);
+}
+
+static int mmc_blk_open(struct inode *inode, struct file *filp)
+{
+ struct mmc_blk_data *md;
+ int ret = -ENXIO;
+
+ md = mmc_blk_get(inode->i_bdev->bd_disk);
+ if (md) {
+ if (md->usage == 2)
+ check_disk_change(inode->i_bdev);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int mmc_blk_release(struct inode *inode, struct file *filp)
+{
+ struct mmc_blk_data *md = inode->i_bdev->bd_disk->private_data;
+
+ mmc_blk_put(md);
+ return 0;
+}
+
+static int
+mmc_blk_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct block_device *bdev = inode->i_bdev;
+
+ if (cmd == HDIO_GETGEO) {
+ struct hd_geometry geo;
+
+ memset(&geo, 0, sizeof(struct hd_geometry));
+
+ geo.cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+ geo.heads = 4;
+ geo.sectors = 16;
+ geo.start = get_start_sect(bdev);
+
+ return copy_to_user((void __user *)arg, &geo, sizeof(geo))
+ ? -EFAULT : 0;
+ }
+
+ return -ENOTTY;
+}
+
+static struct block_device_operations mmc_bdops = {
+ .open = mmc_blk_open,
+ .release = mmc_blk_release,
+ .ioctl = mmc_blk_ioctl,
+ .owner = THIS_MODULE,
+};
+
+struct mmc_blk_request {
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+};
+
+static int mmc_blk_prep_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ int stat = BLKPREP_OK;
+
+ /*
+ * If we have no device, we haven't finished initialising.
+ */
+ if (!md || !mq->card) {
+ printk(KERN_ERR "%s: killing request - no device/host\n",
+ req->rq_disk->disk_name);
+ stat = BLKPREP_KILL;
+ }
+
+ return stat;
+}
+
+static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ int ret;
+
+ if (mmc_card_claim_host(card))
+ goto cmd_err;
+
+ do {
+ struct mmc_blk_request brq;
+ struct mmc_command cmd;
+
+ memset(&brq, 0, sizeof(struct mmc_blk_request));
+ brq.mrq.cmd = &brq.cmd;
+ brq.mrq.data = &brq.data;
+
+ brq.cmd.arg = req->sector << 9;
+ brq.cmd.flags = MMC_RSP_R1;
+ brq.data.timeout_ns = card->csd.tacc_ns * 10;
+ brq.data.timeout_clks = card->csd.tacc_clks * 10;
+ brq.data.blksz_bits = md->block_bits;
+ brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
+ brq.stop.opcode = MMC_STOP_TRANSMISSION;
+ brq.stop.arg = 0;
+ brq.stop.flags = MMC_RSP_R1B;
+
+ if (rq_data_dir(req) == READ) {
+ brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
+ brq.data.flags |= MMC_DATA_READ;
+ } else {
+ brq.cmd.opcode = MMC_WRITE_BLOCK;
+ brq.cmd.flags = MMC_RSP_R1B;
+ brq.data.flags |= MMC_DATA_WRITE;
+ brq.data.blocks = 1;
+ }
+ brq.mrq.stop = brq.data.blocks > 1 ? &brq.stop : NULL;
+
+ brq.data.sg = mq->sg;
+ brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg);
+
+ mmc_wait_for_req(card->host, &brq.mrq);
+ if (brq.cmd.error) {
+ printk(KERN_ERR "%s: error %d sending read/write command\n",
+ req->rq_disk->disk_name, brq.cmd.error);
+ goto cmd_err;
+ }
+
+ if (brq.data.error) {
+ printk(KERN_ERR "%s: error %d transferring data\n",
+ req->rq_disk->disk_name, brq.data.error);
+ goto cmd_err;
+ }
+
+ if (brq.stop.error) {
+ printk(KERN_ERR "%s: error %d sending stop command\n",
+ req->rq_disk->disk_name, brq.stop.error);
+ goto cmd_err;
+ }
+
+ do {
+ int err;
+
+ cmd.opcode = MMC_SEND_STATUS;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1;
+ err = mmc_wait_for_cmd(card->host, &cmd, 5);
+ if (err) {
+ printk(KERN_ERR "%s: error %d requesting status\n",
+ req->rq_disk->disk_name, err);
+ goto cmd_err;
+ }
+ } while (!(cmd.resp[0] & R1_READY_FOR_DATA));
+
+#if 0
+ if (cmd.resp[0] & ~0x00000900)
+ printk(KERN_ERR "%s: status = %08x\n",
+ req->rq_disk->disk_name, cmd.resp[0]);
+ if (mmc_decode_status(cmd.resp))
+ goto cmd_err;
+#endif
+
+ /*
+ * A block was successfully transferred.
+ */
+ spin_lock_irq(&md->lock);
+ ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
+ if (!ret) {
+ /*
+ * The whole request completed successfully.
+ */
+ add_disk_randomness(req->rq_disk);
+ blkdev_dequeue_request(req);
+ end_that_request_last(req);
+ }
+ spin_unlock_irq(&md->lock);
+ } while (ret);
+
+ mmc_card_release_host(card);
+
+ return 1;
+
+ cmd_err:
+ mmc_card_release_host(card);
+
+ /*
+ * This is a little draconian, but until we get proper
+ * error handling sorted out here, its the best we can
+ * do - especially as some hosts have no idea how much
+ * data was transferred before the error occurred.
+ */
+ spin_lock_irq(&md->lock);
+ do {
+ ret = end_that_request_chunk(req, 0,
+ req->current_nr_sectors << 9);
+ } while (ret);
+
+ add_disk_randomness(req->rq_disk);
+ blkdev_dequeue_request(req);
+ end_that_request_last(req);
+ spin_unlock_irq(&md->lock);
+
+ return 0;
+}
+
+#define MMC_NUM_MINORS (256 >> MMC_SHIFT)
+
+static unsigned long dev_use[MMC_NUM_MINORS/(8*sizeof(unsigned long))];
+
+static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
+{
+ struct mmc_blk_data *md;
+ int devidx, ret;
+
+ devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
+ if (devidx >= MMC_NUM_MINORS)
+ return ERR_PTR(-ENOSPC);
+ __set_bit(devidx, dev_use);
+
+ md = kmalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
+ if (md) {
+ memset(md, 0, sizeof(struct mmc_blk_data));
+
+ md->disk = alloc_disk(1 << MMC_SHIFT);
+ if (md->disk == NULL) {
+ kfree(md);
+ md = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ spin_lock_init(&md->lock);
+ md->usage = 1;
+
+ ret = mmc_init_queue(&md->queue, card, &md->lock);
+ if (ret) {
+ put_disk(md->disk);
+ kfree(md);
+ md = ERR_PTR(ret);
+ goto out;
+ }
+ md->queue.prep_fn = mmc_blk_prep_rq;
+ md->queue.issue_fn = mmc_blk_issue_rq;
+ md->queue.data = md;
+
+ md->disk->major = major;
+ md->disk->first_minor = devidx << MMC_SHIFT;
+ md->disk->fops = &mmc_bdops;
+ md->disk->private_data = md;
+ md->disk->queue = md->queue.queue;
+ md->disk->driverfs_dev = &card->dev;
+
+ /*
+ * As discussed on lkml, GENHD_FL_REMOVABLE should:
+ *
+ * - be set for removable media with permanent block devices
+ * - be unset for removable block devices with permanent media
+ *
+ * Since MMC block devices clearly fall under the second
+ * case, we do not set GENHD_FL_REMOVABLE. Userspace
+ * should use the block device creation/destruction hotplug
+ * messages to tell when the card is present.
+ */
+
+ sprintf(md->disk->disk_name, "mmcblk%d", devidx);
+ sprintf(md->disk->devfs_name, "mmc/blk%d", devidx);
+
+ md->block_bits = card->csd.read_blkbits;
+
+ blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits);
+ set_capacity(md->disk, card->csd.capacity);
+ }
+ out:
+ return md;
+}
+
+static int
+mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
+{
+ struct mmc_command cmd;
+ int err;
+
+ mmc_card_claim_host(card);
+ cmd.opcode = MMC_SET_BLOCKLEN;
+ cmd.arg = 1 << card->csd.read_blkbits;
+ cmd.flags = MMC_RSP_R1;
+ err = mmc_wait_for_cmd(card->host, &cmd, 5);
+ mmc_card_release_host(card);
+
+ if (err) {
+ printk(KERN_ERR "%s: unable to set block size to %d: %d\n",
+ md->disk->disk_name, cmd.arg, err);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mmc_blk_probe(struct mmc_card *card)
+{
+ struct mmc_blk_data *md;
+ int err;
+
+ if (card->csd.cmdclass & ~0x1ff)
+ return -ENODEV;
+
+ if (card->csd.read_blkbits < 9) {
+ printk(KERN_WARNING "%s: read blocksize too small (%u)\n",
+ mmc_card_id(card), 1 << card->csd.read_blkbits);
+ return -ENODEV;
+ }
+
+ md = mmc_blk_alloc(card);
+ if (IS_ERR(md))
+ return PTR_ERR(md);
+
+ err = mmc_blk_set_blksize(md, card);
+ if (err)
+ goto out;
+
+ printk(KERN_INFO "%s: %s %s %dKiB\n",
+ md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
+ (card->csd.capacity << card->csd.read_blkbits) / 1024);
+
+ mmc_set_drvdata(card, md);
+ add_disk(md->disk);
+ return 0;
+
+ out:
+ mmc_blk_put(md);
+
+ return err;
+}
+
+static void mmc_blk_remove(struct mmc_card *card)
+{
+ struct mmc_blk_data *md = mmc_get_drvdata(card);
+
+ if (md) {
+ int devidx;
+
+ del_gendisk(md->disk);
+
+ /*
+ * I think this is needed.
+ */
+ md->disk->queue = NULL;
+
+ devidx = md->disk->first_minor >> MMC_SHIFT;
+ __clear_bit(devidx, dev_use);
+
+ mmc_blk_put(md);
+ }
+ mmc_set_drvdata(card, NULL);
+}
+
+#ifdef CONFIG_PM
+static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
+{
+ struct mmc_blk_data *md = mmc_get_drvdata(card);
+
+ if (md) {
+ mmc_queue_suspend(&md->queue);
+ }
+ return 0;
+}
+
+static int mmc_blk_resume(struct mmc_card *card)
+{
+ struct mmc_blk_data *md = mmc_get_drvdata(card);
+
+ if (md) {
+ mmc_blk_set_blksize(md, card);
+ mmc_queue_resume(&md->queue);
+ }
+ return 0;
+}
+#else
+#define mmc_blk_suspend NULL
+#define mmc_blk_resume NULL
+#endif
+
+static struct mmc_driver mmc_driver = {
+ .drv = {
+ .name = "mmcblk",
+ },
+ .probe = mmc_blk_probe,
+ .remove = mmc_blk_remove,
+ .suspend = mmc_blk_suspend,
+ .resume = mmc_blk_resume,
+};
+
+static int __init mmc_blk_init(void)
+{
+ int res = -ENOMEM;
+
+ res = register_blkdev(major, "mmc");
+ if (res < 0) {
+ printk(KERN_WARNING "Unable to get major %d for MMC media: %d\n",
+ major, res);
+ goto out;
+ }
+ if (major == 0)
+ major = res;
+
+ devfs_mk_dir("mmc");
+ return mmc_register_driver(&mmc_driver);
+
+ out:
+ return res;
+}
+
+static void __exit mmc_blk_exit(void)
+{
+ mmc_unregister_driver(&mmc_driver);
+ devfs_remove("mmc");
+ unregister_blkdev(major, "mmc");
+}
+
+module_init(mmc_blk_init);
+module_exit(mmc_blk_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
+
+module_param(major, int, 0444);
+MODULE_PARM_DESC(major, "specify the major device number for MMC block driver");
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c
new file mode 100644
index 00000000000..0b9682e9a35
--- /dev/null
+++ b/drivers/mmc/mmc_queue.c
@@ -0,0 +1,238 @@
+/*
+ * linux/drivers/mmc/mmc_queue.c
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/blkdev.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include "mmc_queue.h"
+
+#define MMC_QUEUE_EXIT (1 << 0)
+#define MMC_QUEUE_SUSPENDED (1 << 1)
+
+/*
+ * Prepare a MMC request. Essentially, this means passing the
+ * preparation off to the media driver. The media driver will
+ * create a mmc_io_request in req->special.
+ */
+static int mmc_prep_request(struct request_queue *q, struct request *req)
+{
+ struct mmc_queue *mq = q->queuedata;
+ int ret = BLKPREP_KILL;
+
+ if (req->flags & REQ_SPECIAL) {
+ /*
+ * Special commands already have the command
+ * blocks already setup in req->special.
+ */
+ BUG_ON(!req->special);
+
+ ret = BLKPREP_OK;
+ } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
+ /*
+ * Block I/O requests need translating according
+ * to the protocol.
+ */
+ ret = mq->prep_fn(mq, req);
+ } else {
+ /*
+ * Everything else is invalid.
+ */
+ blk_dump_rq_flags(req, "MMC bad request");
+ }
+
+ if (ret == BLKPREP_OK)
+ req->flags |= REQ_DONTPREP;
+
+ return ret;
+}
+
+static int mmc_queue_thread(void *d)
+{
+ struct mmc_queue *mq = d;
+ struct request_queue *q = mq->queue;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /*
+ * Set iothread to ensure that we aren't put to sleep by
+ * the process freezing. We handle suspension ourselves.
+ */
+ current->flags |= PF_MEMALLOC|PF_NOFREEZE;
+
+ daemonize("mmcqd");
+
+ complete(&mq->thread_complete);
+
+ down(&mq->thread_sem);
+ add_wait_queue(&mq->thread_wq, &wait);
+ do {
+ struct request *req = NULL;
+
+ spin_lock_irq(q->queue_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!blk_queue_plugged(q))
+ mq->req = req = elv_next_request(q);
+ spin_unlock_irq(q->queue_lock);
+
+ if (!req) {
+ if (mq->flags & MMC_QUEUE_EXIT)
+ break;
+ up(&mq->thread_sem);
+ schedule();
+ down(&mq->thread_sem);
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+
+ mq->issue_fn(mq, req);
+ } while (1);
+ remove_wait_queue(&mq->thread_wq, &wait);
+ up(&mq->thread_sem);
+
+ complete_and_exit(&mq->thread_complete, 0);
+ return 0;
+}
+
+/*
+ * Generic MMC request handler. This is called for any queue on a
+ * particular host. When the host is not busy, we look for a request
+ * on any queue on this host, and attempt to issue it. This may
+ * not be the queue we were asked to process.
+ */
+static void mmc_request(request_queue_t *q)
+{
+ struct mmc_queue *mq = q->queuedata;
+
+ if (!mq->req)
+ wake_up(&mq->thread_wq);
+}
+
+/**
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+ * @card: mmc card to attach this queue
+ * @lock: queue lock
+ *
+ * Initialise a MMC card request queue.
+ */
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
+{
+ struct mmc_host *host = card->host;
+ u64 limit = BLK_BOUNCE_HIGH;
+ int ret;
+
+ if (host->dev->dma_mask && *host->dev->dma_mask)
+ limit = *host->dev->dma_mask;
+
+ mq->card = card;
+ mq->queue = blk_init_queue(mmc_request, lock);
+ if (!mq->queue)
+ return -ENOMEM;
+
+ blk_queue_prep_rq(mq->queue, mmc_prep_request);
+ blk_queue_bounce_limit(mq->queue, limit);
+ blk_queue_max_sectors(mq->queue, host->max_sectors);
+ blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
+ blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
+ blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+ mq->queue->queuedata = mq;
+ mq->req = NULL;
+
+ mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs,
+ GFP_KERNEL);
+ if (!mq->sg) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ init_completion(&mq->thread_complete);
+ init_waitqueue_head(&mq->thread_wq);
+ init_MUTEX(&mq->thread_sem);
+
+ ret = kernel_thread(mmc_queue_thread, mq, CLONE_KERNEL);
+ if (ret >= 0) {
+ wait_for_completion(&mq->thread_complete);
+ init_completion(&mq->thread_complete);
+ ret = 0;
+ goto out;
+ }
+
+ cleanup:
+ kfree(mq->sg);
+ mq->sg = NULL;
+
+ blk_cleanup_queue(mq->queue);
+ out:
+ return ret;
+}
+EXPORT_SYMBOL(mmc_init_queue);
+
+void mmc_cleanup_queue(struct mmc_queue *mq)
+{
+ mq->flags |= MMC_QUEUE_EXIT;
+ wake_up(&mq->thread_wq);
+ wait_for_completion(&mq->thread_complete);
+
+ kfree(mq->sg);
+ mq->sg = NULL;
+
+ blk_cleanup_queue(mq->queue);
+
+ mq->card = NULL;
+}
+EXPORT_SYMBOL(mmc_cleanup_queue);
+
+/**
+ * mmc_queue_suspend - suspend a MMC request queue
+ * @mq: MMC queue to suspend
+ *
+ * Stop the block request queue, and wait for our thread to
+ * complete any outstanding requests. This ensures that we
+ * won't suspend while a request is being processed.
+ */
+void mmc_queue_suspend(struct mmc_queue *mq)
+{
+ request_queue_t *q = mq->queue;
+ unsigned long flags;
+
+ if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
+ mq->flags |= MMC_QUEUE_SUSPENDED;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ down(&mq->thread_sem);
+ }
+}
+EXPORT_SYMBOL(mmc_queue_suspend);
+
+/**
+ * mmc_queue_resume - resume a previously suspended MMC request queue
+ * @mq: MMC queue to resume
+ */
+void mmc_queue_resume(struct mmc_queue *mq)
+{
+ request_queue_t *q = mq->queue;
+ unsigned long flags;
+
+ if (mq->flags & MMC_QUEUE_SUSPENDED) {
+ mq->flags &= ~MMC_QUEUE_SUSPENDED;
+
+ up(&mq->thread_sem);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+}
+EXPORT_SYMBOL(mmc_queue_resume);
diff --git a/drivers/mmc/mmc_queue.h b/drivers/mmc/mmc_queue.h
new file mode 100644
index 00000000000..7182d2f69b4
--- /dev/null
+++ b/drivers/mmc/mmc_queue.h
@@ -0,0 +1,33 @@
+#ifndef MMC_QUEUE_H
+#define MMC_QUEUE_H
+
+struct request;
+struct task_struct;
+
+struct mmc_queue {
+ struct mmc_card *card;
+ struct completion thread_complete;
+ wait_queue_head_t thread_wq;
+ struct semaphore thread_sem;
+ unsigned int flags;
+ struct request *req;
+ int (*prep_fn)(struct mmc_queue *, struct request *);
+ int (*issue_fn)(struct mmc_queue *, struct request *);
+ void *data;
+ struct request_queue *queue;
+ struct scatterlist *sg;
+};
+
+struct mmc_io_request {
+ struct request *rq;
+ int num;
+ struct mmc_command selcmd; /* mmc_queue private */
+ struct mmc_command cmd[4]; /* max 4 commands */
+};
+
+extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
+extern void mmc_cleanup_queue(struct mmc_queue *);
+extern void mmc_queue_suspend(struct mmc_queue *);
+extern void mmc_queue_resume(struct mmc_queue *);
+
+#endif
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
new file mode 100644
index 00000000000..29a56e9cd5b
--- /dev/null
+++ b/drivers/mmc/mmc_sysfs.c
@@ -0,0 +1,238 @@
+/*
+ * linux/drivers/mmc/mmc_sysfs.c
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * MMC sysfs/driver model support.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include "mmc.h"
+
+#define dev_to_mmc_card(d) container_of(d, struct mmc_card, dev)
+#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
+
+#define MMC_ATTR(name, fmt, args...) \
+static ssize_t mmc_##name##_show (struct device *dev, char *buf) \
+{ \
+ struct mmc_card *card = dev_to_mmc_card(dev); \
+ return sprintf(buf, fmt, args); \
+}
+
+MMC_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
+ card->raw_cid[2], card->raw_cid[3]);
+MMC_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
+ card->raw_csd[2], card->raw_csd[3]);
+MMC_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
+MMC_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
+MMC_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
+MMC_ATTR(manfid, "0x%06x\n", card->cid.manfid);
+MMC_ATTR(name, "%s\n", card->cid.prod_name);
+MMC_ATTR(oemid, "0x%04x\n", card->cid.oemid);
+MMC_ATTR(serial, "0x%08x\n", card->cid.serial);
+
+#define MMC_ATTR_RO(name) __ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
+
+static struct device_attribute mmc_dev_attrs[] = {
+ MMC_ATTR_RO(cid),
+ MMC_ATTR_RO(csd),
+ MMC_ATTR_RO(date),
+ MMC_ATTR_RO(fwrev),
+ MMC_ATTR_RO(hwrev),
+ MMC_ATTR_RO(manfid),
+ MMC_ATTR_RO(name),
+ MMC_ATTR_RO(oemid),
+ MMC_ATTR_RO(serial),
+ __ATTR_NULL
+};
+
+
+static void mmc_release_card(struct device *dev)
+{
+ struct mmc_card *card = dev_to_mmc_card(dev);
+
+ kfree(card);
+}
+
+/*
+ * This currently matches any MMC driver to any MMC card - drivers
+ * themselves make the decision whether to drive this card in their
+ * probe method. However, we force "bad" cards to fail.
+ */
+static int mmc_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct mmc_card *card = dev_to_mmc_card(dev);
+ return !mmc_card_bad(card);
+}
+
+static int
+mmc_bus_hotplug(struct device *dev, char **envp, int num_envp, char *buf,
+ int buf_size)
+{
+ struct mmc_card *card = dev_to_mmc_card(dev);
+ char ccc[13];
+ int i = 0;
+
+#define add_env(fmt,val) \
+ ({ \
+ int len, ret = -ENOMEM; \
+ if (i < num_envp) { \
+ envp[i++] = buf; \
+ len = snprintf(buf, buf_size, fmt, val) + 1; \
+ buf_size -= len; \
+ buf += len; \
+ if (buf_size >= 0) \
+ ret = 0; \
+ } \
+ ret; \
+ })
+
+ for (i = 0; i < 12; i++)
+ ccc[i] = card->csd.cmdclass & (1 << i) ? '1' : '0';
+ ccc[12] = '\0';
+
+ i = 0;
+ add_env("MMC_CCC=%s", ccc);
+ add_env("MMC_MANFID=%06x", card->cid.manfid);
+ add_env("MMC_NAME=%s", mmc_card_name(card));
+ add_env("MMC_OEMID=%04x", card->cid.oemid);
+
+ return 0;
+}
+
+static int mmc_bus_suspend(struct device *dev, pm_message_t state)
+{
+ struct mmc_driver *drv = to_mmc_driver(dev->driver);
+ struct mmc_card *card = dev_to_mmc_card(dev);
+ int ret = 0;
+
+ if (dev->driver && drv->suspend)
+ ret = drv->suspend(card, state);
+ return ret;
+}
+
+static int mmc_bus_resume(struct device *dev)
+{
+ struct mmc_driver *drv = to_mmc_driver(dev->driver);
+ struct mmc_card *card = dev_to_mmc_card(dev);
+ int ret = 0;
+
+ if (dev->driver && drv->resume)
+ ret = drv->resume(card);
+ return ret;
+}
+
+static struct bus_type mmc_bus_type = {
+ .name = "mmc",
+ .dev_attrs = mmc_dev_attrs,
+ .match = mmc_bus_match,
+ .hotplug = mmc_bus_hotplug,
+ .suspend = mmc_bus_suspend,
+ .resume = mmc_bus_resume,
+};
+
+
+static int mmc_drv_probe(struct device *dev)
+{
+ struct mmc_driver *drv = to_mmc_driver(dev->driver);
+ struct mmc_card *card = dev_to_mmc_card(dev);
+
+ return drv->probe(card);
+}
+
+static int mmc_drv_remove(struct device *dev)
+{
+ struct mmc_driver *drv = to_mmc_driver(dev->driver);
+ struct mmc_card *card = dev_to_mmc_card(dev);
+
+ drv->remove(card);
+
+ return 0;
+}
+
+
+/**
+ * mmc_register_driver - register a media driver
+ * @drv: MMC media driver
+ */
+int mmc_register_driver(struct mmc_driver *drv)
+{
+ drv->drv.bus = &mmc_bus_type;
+ drv->drv.probe = mmc_drv_probe;
+ drv->drv.remove = mmc_drv_remove;
+ return driver_register(&drv->drv);
+}
+
+EXPORT_SYMBOL(mmc_register_driver);
+
+/**
+ * mmc_unregister_driver - unregister a media driver
+ * @drv: MMC media driver
+ */
+void mmc_unregister_driver(struct mmc_driver *drv)
+{
+ drv->drv.bus = &mmc_bus_type;
+ driver_unregister(&drv->drv);
+}
+
+EXPORT_SYMBOL(mmc_unregister_driver);
+
+
+/*
+ * Internal function. Initialise a MMC card structure.
+ */
+void mmc_init_card(struct mmc_card *card, struct mmc_host *host)
+{
+ memset(card, 0, sizeof(struct mmc_card));
+ card->host = host;
+ device_initialize(&card->dev);
+ card->dev.parent = card->host->dev;
+ card->dev.bus = &mmc_bus_type;
+ card->dev.release = mmc_release_card;
+}
+
+/*
+ * Internal function. Register a new MMC card with the driver model.
+ */
+int mmc_register_card(struct mmc_card *card)
+{
+ snprintf(card->dev.bus_id, sizeof(card->dev.bus_id),
+ "%s:%04x", card->host->host_name, card->rca);
+
+ return device_add(&card->dev);
+}
+
+/*
+ * Internal function. Unregister a new MMC card with the
+ * driver model, and (eventually) free it.
+ */
+void mmc_remove_card(struct mmc_card *card)
+{
+ if (mmc_card_present(card))
+ device_del(&card->dev);
+
+ put_device(&card->dev);
+}
+
+
+static int __init mmc_init(void)
+{
+ return bus_register(&mmc_bus_type);
+}
+
+static void __exit mmc_exit(void)
+{
+ bus_unregister(&mmc_bus_type);
+}
+
+module_init(mmc_init);
+module_exit(mmc_exit);
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
new file mode 100644
index 00000000000..3ee961c206b
--- /dev/null
+++ b/drivers/mmc/mmci.c
@@ -0,0 +1,680 @@
+/*
+ * linux/drivers/mmc/mmci.c - ARM PrimeCell MMCI PL180/1 driver
+ *
+ * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/protocol.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/scatterlist.h>
+#include <asm/hardware/amba.h>
+#include <asm/hardware/clock.h>
+#include <asm/mach/mmc.h>
+
+#include "mmci.h"
+
+#define DRIVER_NAME "mmci-pl18x"
+
+#ifdef CONFIG_MMC_DEBUG
+#define DBG(host,fmt,args...) \
+ pr_debug("%s: %s: " fmt, host->mmc->host_name, __func__ , args)
+#else
+#define DBG(host,fmt,args...) do { } while (0)
+#endif
+
+static unsigned int fmax = 515633;
+
+static void
+mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
+{
+ writel(0, host->base + MMCICOMMAND);
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+
+ if (mrq->data)
+ mrq->data->bytes_xfered = host->data_xfered;
+
+ /*
+ * Need to drop the host lock here; mmc_request_done may call
+ * back into the driver...
+ */
+ spin_unlock(&host->lock);
+ mmc_request_done(host->mmc, mrq);
+ spin_lock(&host->lock);
+}
+
+static void mmci_stop_data(struct mmci_host *host)
+{
+ writel(0, host->base + MMCIDATACTRL);
+ writel(0, host->base + MMCIMASK1);
+ host->data = NULL;
+}
+
+static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
+{
+ unsigned int datactrl, timeout, irqmask;
+ void __iomem *base;
+
+ DBG(host, "blksz %04x blks %04x flags %08x\n",
+ 1 << data->blksz_bits, data->blocks, data->flags);
+
+ host->data = data;
+ host->size = data->blocks << data->blksz_bits;
+ host->data_xfered = 0;
+
+ mmci_init_sg(host, data);
+
+ timeout = data->timeout_clks +
+ ((unsigned long long)data->timeout_ns * host->cclk) /
+ 1000000000ULL;
+
+ base = host->base;
+ writel(timeout, base + MMCIDATATIMER);
+ writel(host->size, base + MMCIDATALENGTH);
+
+ datactrl = MCI_DPSM_ENABLE | data->blksz_bits << 4;
+ if (data->flags & MMC_DATA_READ) {
+ datactrl |= MCI_DPSM_DIRECTION;
+ irqmask = MCI_RXFIFOHALFFULLMASK;
+ } else {
+ /*
+ * We don't actually need to include "FIFO empty" here
+ * since its implicit in "FIFO half empty".
+ */
+ irqmask = MCI_TXFIFOHALFEMPTYMASK;
+ }
+
+ writel(datactrl, base + MMCIDATACTRL);
+ writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
+ writel(irqmask, base + MMCIMASK1);
+}
+
+static void
+mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
+{
+ void __iomem *base = host->base;
+
+ DBG(host, "op %02x arg %08x flags %08x\n",
+ cmd->opcode, cmd->arg, cmd->flags);
+
+ if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
+ writel(0, base + MMCICOMMAND);
+ udelay(1);
+ }
+
+ c |= cmd->opcode | MCI_CPSM_ENABLE;
+ switch (cmd->flags & MMC_RSP_MASK) {
+ case MMC_RSP_NONE:
+ default:
+ break;
+ case MMC_RSP_LONG:
+ c |= MCI_CPSM_LONGRSP;
+ case MMC_RSP_SHORT:
+ c |= MCI_CPSM_RESPONSE;
+ break;
+ }
+ if (/*interrupt*/0)
+ c |= MCI_CPSM_INTERRUPT;
+
+ host->cmd = cmd;
+
+ writel(cmd->arg, base + MMCIARGUMENT);
+ writel(c, base + MMCICOMMAND);
+}
+
+static void
+mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
+ unsigned int status)
+{
+ if (status & MCI_DATABLOCKEND) {
+ host->data_xfered += 1 << data->blksz_bits;
+ }
+ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ if (status & MCI_DATACRCFAIL)
+ data->error = MMC_ERR_BADCRC;
+ else if (status & MCI_DATATIMEOUT)
+ data->error = MMC_ERR_TIMEOUT;
+ else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
+ data->error = MMC_ERR_FIFO;
+ status |= MCI_DATAEND;
+ }
+ if (status & MCI_DATAEND) {
+ mmci_stop_data(host);
+
+ if (!data->stop) {
+ mmci_request_end(host, data->mrq);
+ } else {
+ mmci_start_command(host, data->stop, 0);
+ }
+ }
+}
+
+static void
+mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
+ unsigned int status)
+{
+ void __iomem *base = host->base;
+
+ host->cmd = NULL;
+
+ cmd->resp[0] = readl(base + MMCIRESPONSE0);
+ cmd->resp[1] = readl(base + MMCIRESPONSE1);
+ cmd->resp[2] = readl(base + MMCIRESPONSE2);
+ cmd->resp[3] = readl(base + MMCIRESPONSE3);
+
+ if (status & MCI_CMDTIMEOUT) {
+ cmd->error = MMC_ERR_TIMEOUT;
+ } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
+ cmd->error = MMC_ERR_BADCRC;
+ }
+
+ if (!cmd->data || cmd->error != MMC_ERR_NONE) {
+ mmci_request_end(host, cmd->mrq);
+ } else if (!(cmd->data->flags & MMC_DATA_READ)) {
+ mmci_start_data(host, cmd->data);
+ }
+}
+
+static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
+{
+ void __iomem *base = host->base;
+ char *ptr = buffer;
+ u32 status;
+
+ do {
+ int count = host->size - (readl(base + MMCIFIFOCNT) << 2);
+
+ if (count > remain)
+ count = remain;
+
+ if (count <= 0)
+ break;
+
+ readsl(base + MMCIFIFO, ptr, count >> 2);
+
+ ptr += count;
+ remain -= count;
+
+ if (remain == 0)
+ break;
+
+ status = readl(base + MMCISTATUS);
+ } while (status & MCI_RXDATAAVLBL);
+
+ return ptr - buffer;
+}
+
+static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
+{
+ void __iomem *base = host->base;
+ char *ptr = buffer;
+
+ do {
+ unsigned int count, maxcnt;
+
+ maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
+ count = min(remain, maxcnt);
+
+ writesl(base + MMCIFIFO, ptr, count >> 2);
+
+ ptr += count;
+ remain -= count;
+
+ if (remain == 0)
+ break;
+
+ status = readl(base + MMCISTATUS);
+ } while (status & MCI_TXFIFOHALFEMPTY);
+
+ return ptr - buffer;
+}
+
+/*
+ * PIO data transfer IRQ handler.
+ */
+static irqreturn_t mmci_pio_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct mmci_host *host = dev_id;
+ void __iomem *base = host->base;
+ u32 status;
+
+ status = readl(base + MMCISTATUS);
+
+ DBG(host, "irq1 %08x\n", status);
+
+ do {
+ unsigned long flags;
+ unsigned int remain, len;
+ char *buffer;
+
+ /*
+ * For write, we only need to test the half-empty flag
+ * here - if the FIFO is completely empty, then by
+ * definition it is more than half empty.
+ *
+ * For read, check for data available.
+ */
+ if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
+ break;
+
+ /*
+ * Map the current scatter buffer.
+ */
+ buffer = mmci_kmap_atomic(host, &flags) + host->sg_off;
+ remain = host->sg_ptr->length - host->sg_off;
+
+ len = 0;
+ if (status & MCI_RXACTIVE)
+ len = mmci_pio_read(host, buffer, remain);
+ if (status & MCI_TXACTIVE)
+ len = mmci_pio_write(host, buffer, remain, status);
+
+ /*
+ * Unmap the buffer.
+ */
+ mmci_kunmap_atomic(host, &flags);
+
+ host->sg_off += len;
+ host->size -= len;
+ remain -= len;
+
+ if (remain)
+ break;
+
+ if (!mmci_next_sg(host))
+ break;
+
+ status = readl(base + MMCISTATUS);
+ } while (1);
+
+ /*
+ * If we're nearing the end of the read, switch to
+ * "any data available" mode.
+ */
+ if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
+ writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
+
+ /*
+ * If we run out of data, disable the data IRQs; this
+ * prevents a race where the FIFO becomes empty before
+ * the chip itself has disabled the data path, and
+ * stops us racing with our data end IRQ.
+ */
+ if (host->size == 0) {
+ writel(0, base + MMCIMASK1);
+ writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Handle completion of command and data transfers.
+ */
+static irqreturn_t mmci_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct mmci_host *host = dev_id;
+ u32 status;
+ int ret = 0;
+
+ spin_lock(&host->lock);
+
+ do {
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ status = readl(host->base + MMCISTATUS);
+ status &= readl(host->base + MMCIMASK0);
+ writel(status, host->base + MMCICLEAR);
+
+ DBG(host, "irq0 %08x\n", status);
+
+ data = host->data;
+ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
+ MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
+ mmci_data_irq(host, data, status);
+
+ cmd = host->cmd;
+ if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
+ mmci_cmd_irq(host, cmd, status);
+
+ ret = 1;
+ } while (status);
+
+ spin_unlock(&host->lock);
+
+ return IRQ_RETVAL(ret);
+}
+
+static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+
+ WARN_ON(host->mrq != NULL);
+
+ spin_lock_irq(&host->lock);
+
+ host->mrq = mrq;
+
+ if (mrq->data && mrq->data->flags & MMC_DATA_READ)
+ mmci_start_data(host, mrq->data);
+
+ mmci_start_command(host, mrq->cmd, 0);
+
+ spin_unlock_irq(&host->lock);
+}
+
+static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ u32 clk = 0, pwr = 0;
+
+ DBG(host, "clock %uHz busmode %u powermode %u Vdd %u\n",
+ ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
+
+ if (ios->clock) {
+ if (ios->clock >= host->mclk) {
+ clk = MCI_CLK_BYPASS;
+ host->cclk = host->mclk;
+ } else {
+ clk = host->mclk / (2 * ios->clock) - 1;
+ if (clk > 256)
+ clk = 255;
+ host->cclk = host->mclk / (2 * (clk + 1));
+ }
+ clk |= MCI_CLK_ENABLE;
+ }
+
+ if (host->plat->translate_vdd)
+ pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ break;
+ case MMC_POWER_UP:
+ pwr |= MCI_PWR_UP;
+ break;
+ case MMC_POWER_ON:
+ pwr |= MCI_PWR_ON;
+ break;
+ }
+
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ pwr |= MCI_ROD;
+
+ writel(clk, host->base + MMCICLOCK);
+
+ if (host->pwr != pwr) {
+ host->pwr = pwr;
+ writel(pwr, host->base + MMCIPOWER);
+ }
+}
+
+static struct mmc_host_ops mmci_ops = {
+ .request = mmci_request,
+ .set_ios = mmci_set_ios,
+};
+
+static void mmci_check_status(unsigned long data)
+{
+ struct mmci_host *host = (struct mmci_host *)data;
+ unsigned int status;
+
+ status = host->plat->status(mmc_dev(host->mmc));
+ if (status ^ host->oldstat)
+ mmc_detect_change(host->mmc);
+
+ host->oldstat = status;
+ mod_timer(&host->timer, jiffies + HZ);
+}
+
+static int mmci_probe(struct amba_device *dev, void *id)
+{
+ struct mmc_platform_data *plat = dev->dev.platform_data;
+ struct mmci_host *host;
+ struct mmc_host *mmc;
+ int ret;
+
+ /* must have platform data */
+ if (!plat) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = amba_request_regions(dev, DRIVER_NAME);
+ if (ret)
+ goto out;
+
+ mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto rel_regions;
+ }
+
+ host = mmc_priv(mmc);
+ host->clk = clk_get(&dev->dev, "MCLK");
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ host->clk = NULL;
+ goto host_free;
+ }
+
+ ret = clk_use(host->clk);
+ if (ret)
+ goto clk_free;
+
+ ret = clk_enable(host->clk);
+ if (ret)
+ goto clk_unuse;
+
+ host->plat = plat;
+ host->mclk = clk_get_rate(host->clk);
+ host->mmc = mmc;
+ host->base = ioremap(dev->res.start, SZ_4K);
+ if (!host->base) {
+ ret = -ENOMEM;
+ goto clk_disable;
+ }
+
+ mmc->ops = &mmci_ops;
+ mmc->f_min = (host->mclk + 511) / 512;
+ mmc->f_max = min(host->mclk, fmax);
+ mmc->ocr_avail = plat->ocr_mask;
+
+ /*
+ * We can do SGIO
+ */
+ mmc->max_hw_segs = 16;
+ mmc->max_phys_segs = NR_SG;
+
+ /*
+ * Since we only have a 16-bit data length register, we must
+ * ensure that we don't exceed 2^16-1 bytes in a single request.
+ * Choose 64 (512-byte) sectors as the limit.
+ */
+ mmc->max_sectors = 64;
+
+ /*
+ * Set the maximum segment size. Since we aren't doing DMA
+ * (yet) we are only limited by the data length register.
+ */
+ mmc->max_seg_size = mmc->max_sectors << 9;
+
+ spin_lock_init(&host->lock);
+
+ writel(0, host->base + MMCIMASK0);
+ writel(0, host->base + MMCIMASK1);
+ writel(0xfff, host->base + MMCICLEAR);
+
+ ret = request_irq(dev->irq[0], mmci_irq, SA_SHIRQ, DRIVER_NAME " (cmd)", host);
+ if (ret)
+ goto unmap;
+
+ ret = request_irq(dev->irq[1], mmci_pio_irq, SA_SHIRQ, DRIVER_NAME " (pio)", host);
+ if (ret)
+ goto irq0_free;
+
+ writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+
+ amba_set_drvdata(dev, mmc);
+
+ mmc_add_host(mmc);
+
+ printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%08lx irq %d,%d\n",
+ mmc->host_name, amba_rev(dev), amba_config(dev),
+ dev->res.start, dev->irq[0], dev->irq[1]);
+
+ init_timer(&host->timer);
+ host->timer.data = (unsigned long)host;
+ host->timer.function = mmci_check_status;
+ host->timer.expires = jiffies + HZ;
+ add_timer(&host->timer);
+
+ return 0;
+
+ irq0_free:
+ free_irq(dev->irq[0], host);
+ unmap:
+ iounmap(host->base);
+ clk_disable:
+ clk_disable(host->clk);
+ clk_unuse:
+ clk_unuse(host->clk);
+ clk_free:
+ clk_put(host->clk);
+ host_free:
+ mmc_free_host(mmc);
+ rel_regions:
+ amba_release_regions(dev);
+ out:
+ return ret;
+}
+
+static int mmci_remove(struct amba_device *dev)
+{
+ struct mmc_host *mmc = amba_get_drvdata(dev);
+
+ amba_set_drvdata(dev, NULL);
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+
+ del_timer_sync(&host->timer);
+
+ mmc_remove_host(mmc);
+
+ writel(0, host->base + MMCIMASK0);
+ writel(0, host->base + MMCIMASK1);
+
+ writel(0, host->base + MMCICOMMAND);
+ writel(0, host->base + MMCIDATACTRL);
+
+ free_irq(dev->irq[0], host);
+ free_irq(dev->irq[1], host);
+
+ iounmap(host->base);
+ clk_disable(host->clk);
+ clk_unuse(host->clk);
+ clk_put(host->clk);
+
+ mmc_free_host(mmc);
+
+ amba_release_regions(dev);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mmci_suspend(struct amba_device *dev, u32 state)
+{
+ struct mmc_host *mmc = amba_get_drvdata(dev);
+ int ret = 0;
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+
+ ret = mmc_suspend_host(mmc, state);
+ if (ret == 0)
+ writel(0, host->base + MMCIMASK0);
+ }
+
+ return ret;
+}
+
+static int mmci_resume(struct amba_device *dev)
+{
+ struct mmc_host *mmc = amba_get_drvdata(dev);
+ int ret = 0;
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+
+ writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+
+ ret = mmc_resume_host(mmc);
+ }
+
+ return ret;
+}
+#else
+#define mmci_suspend NULL
+#define mmci_resume NULL
+#endif
+
+static struct amba_id mmci_ids[] = {
+ {
+ .id = 0x00041180,
+ .mask = 0x000fffff,
+ },
+ {
+ .id = 0x00041181,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver mmci_driver = {
+ .drv = {
+ .name = DRIVER_NAME,
+ },
+ .probe = mmci_probe,
+ .remove = mmci_remove,
+ .suspend = mmci_suspend,
+ .resume = mmci_resume,
+ .id_table = mmci_ids,
+};
+
+static int __init mmci_init(void)
+{
+ return amba_driver_register(&mmci_driver);
+}
+
+static void __exit mmci_exit(void)
+{
+ amba_driver_unregister(&mmci_driver);
+}
+
+module_init(mmci_init);
+module_exit(mmci_exit);
+module_param(fmax, uint, 0444);
+
+MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/mmci.h b/drivers/mmc/mmci.h
new file mode 100644
index 00000000000..4589bbd6819
--- /dev/null
+++ b/drivers/mmc/mmci.h
@@ -0,0 +1,179 @@
+/*
+ * linux/drivers/mmc/mmci.h - ARM PrimeCell MMCI PL180/1 driver
+ *
+ * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define MMCIPOWER 0x000
+#define MCI_PWR_OFF 0x00
+#define MCI_PWR_UP 0x02
+#define MCI_PWR_ON 0x03
+#define MCI_OD (1 << 6)
+#define MCI_ROD (1 << 7)
+
+#define MMCICLOCK 0x004
+#define MCI_CLK_ENABLE (1 << 8)
+#define MCI_CLK_PWRSAVE (1 << 9)
+#define MCI_CLK_BYPASS (1 << 10)
+
+#define MMCIARGUMENT 0x008
+#define MMCICOMMAND 0x00c
+#define MCI_CPSM_RESPONSE (1 << 6)
+#define MCI_CPSM_LONGRSP (1 << 7)
+#define MCI_CPSM_INTERRUPT (1 << 8)
+#define MCI_CPSM_PENDING (1 << 9)
+#define MCI_CPSM_ENABLE (1 << 10)
+
+#define MMCIRESPCMD 0x010
+#define MMCIRESPONSE0 0x014
+#define MMCIRESPONSE1 0x018
+#define MMCIRESPONSE2 0x01c
+#define MMCIRESPONSE3 0x020
+#define MMCIDATATIMER 0x024
+#define MMCIDATALENGTH 0x028
+#define MMCIDATACTRL 0x02c
+#define MCI_DPSM_ENABLE (1 << 0)
+#define MCI_DPSM_DIRECTION (1 << 1)
+#define MCI_DPSM_MODE (1 << 2)
+#define MCI_DPSM_DMAENABLE (1 << 3)
+
+#define MMCIDATACNT 0x030
+#define MMCISTATUS 0x034
+#define MCI_CMDCRCFAIL (1 << 0)
+#define MCI_DATACRCFAIL (1 << 1)
+#define MCI_CMDTIMEOUT (1 << 2)
+#define MCI_DATATIMEOUT (1 << 3)
+#define MCI_TXUNDERRUN (1 << 4)
+#define MCI_RXOVERRUN (1 << 5)
+#define MCI_CMDRESPEND (1 << 6)
+#define MCI_CMDSENT (1 << 7)
+#define MCI_DATAEND (1 << 8)
+#define MCI_DATABLOCKEND (1 << 10)
+#define MCI_CMDACTIVE (1 << 11)
+#define MCI_TXACTIVE (1 << 12)
+#define MCI_RXACTIVE (1 << 13)
+#define MCI_TXFIFOHALFEMPTY (1 << 14)
+#define MCI_RXFIFOHALFFULL (1 << 15)
+#define MCI_TXFIFOFULL (1 << 16)
+#define MCI_RXFIFOFULL (1 << 17)
+#define MCI_TXFIFOEMPTY (1 << 18)
+#define MCI_RXFIFOEMPTY (1 << 19)
+#define MCI_TXDATAAVLBL (1 << 20)
+#define MCI_RXDATAAVLBL (1 << 21)
+
+#define MMCICLEAR 0x038
+#define MCI_CMDCRCFAILCLR (1 << 0)
+#define MCI_DATACRCFAILCLR (1 << 1)
+#define MCI_CMDTIMEOUTCLR (1 << 2)
+#define MCI_DATATIMEOUTCLR (1 << 3)
+#define MCI_TXUNDERRUNCLR (1 << 4)
+#define MCI_RXOVERRUNCLR (1 << 5)
+#define MCI_CMDRESPENDCLR (1 << 6)
+#define MCI_CMDSENTCLR (1 << 7)
+#define MCI_DATAENDCLR (1 << 8)
+#define MCI_DATABLOCKENDCLR (1 << 10)
+
+#define MMCIMASK0 0x03c
+#define MCI_CMDCRCFAILMASK (1 << 0)
+#define MCI_DATACRCFAILMASK (1 << 1)
+#define MCI_CMDTIMEOUTMASK (1 << 2)
+#define MCI_DATATIMEOUTMASK (1 << 3)
+#define MCI_TXUNDERRUNMASK (1 << 4)
+#define MCI_RXOVERRUNMASK (1 << 5)
+#define MCI_CMDRESPENDMASK (1 << 6)
+#define MCI_CMDSENTMASK (1 << 7)
+#define MCI_DATAENDMASK (1 << 8)
+#define MCI_DATABLOCKENDMASK (1 << 10)
+#define MCI_CMDACTIVEMASK (1 << 11)
+#define MCI_TXACTIVEMASK (1 << 12)
+#define MCI_RXACTIVEMASK (1 << 13)
+#define MCI_TXFIFOHALFEMPTYMASK (1 << 14)
+#define MCI_RXFIFOHALFFULLMASK (1 << 15)
+#define MCI_TXFIFOFULLMASK (1 << 16)
+#define MCI_RXFIFOFULLMASK (1 << 17)
+#define MCI_TXFIFOEMPTYMASK (1 << 18)
+#define MCI_RXFIFOEMPTYMASK (1 << 19)
+#define MCI_TXDATAAVLBLMASK (1 << 20)
+#define MCI_RXDATAAVLBLMASK (1 << 21)
+
+#define MMCIMASK1 0x040
+#define MMCIFIFOCNT 0x048
+#define MMCIFIFO 0x080 /* to 0x0bc */
+
+#define MCI_IRQENABLE \
+ (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
+ MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
+ MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK)
+
+/*
+ * The size of the FIFO in bytes.
+ */
+#define MCI_FIFOSIZE (16*4)
+
+#define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2)
+
+#define NR_SG 16
+
+struct clk;
+
+struct mmci_host {
+ void __iomem *base;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ struct mmc_host *mmc;
+ struct clk *clk;
+
+ unsigned int data_xfered;
+
+ spinlock_t lock;
+
+ unsigned int mclk;
+ unsigned int cclk;
+ u32 pwr;
+ struct mmc_platform_data *plat;
+
+ struct timer_list timer;
+ unsigned int oldstat;
+
+ unsigned int sg_len;
+
+ /* pio stuff */
+ struct scatterlist *sg_ptr;
+ unsigned int sg_off;
+ unsigned int size;
+};
+
+static inline void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
+{
+ /*
+ * Ideally, we want the higher levels to pass us a scatter list.
+ */
+ host->sg_len = data->sg_len;
+ host->sg_ptr = data->sg;
+ host->sg_off = 0;
+}
+
+static inline int mmci_next_sg(struct mmci_host *host)
+{
+ host->sg_ptr++;
+ host->sg_off = 0;
+ return --host->sg_len;
+}
+
+static inline char *mmci_kmap_atomic(struct mmci_host *host, unsigned long *flags)
+{
+ struct scatterlist *sg = host->sg_ptr;
+
+ local_irq_save(*flags);
+ return kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
+}
+
+static inline void mmci_kunmap_atomic(struct mmci_host *host, unsigned long *flags)
+{
+ kunmap_atomic(host->sg_ptr->page, KM_BIO_SRC_IRQ);
+ local_irq_restore(*flags);
+}
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
new file mode 100644
index 00000000000..f76deedf535
--- /dev/null
+++ b/drivers/mmc/pxamci.c
@@ -0,0 +1,610 @@
+/*
+ * linux/drivers/mmc/pxa.c - PXA MMCI driver
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This hardware is really sick:
+ * - No way to clear interrupts.
+ * - Have to turn off the clock whenever we touch the device.
+ * - Doesn't tell you how many data blocks were transferred.
+ * Yuck!
+ *
+ * 1 and 3 byte data transfers not supported
+ * max block length up to 1023
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/protocol.h>
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/scatterlist.h>
+#include <asm/sizes.h>
+
+#include <asm/arch/pxa-regs.h>
+#include <asm/arch/mmc.h>
+
+#include "pxamci.h"
+
+#ifdef CONFIG_MMC_DEBUG
+#define DBG(x...) printk(KERN_DEBUG x)
+#else
+#define DBG(x...) do { } while (0)
+#endif
+
+#define DRIVER_NAME "pxa2xx-mci"
+
+#define NR_SG 1
+
+struct pxamci_host {
+ struct mmc_host *mmc;
+ spinlock_t lock;
+ struct resource *res;
+ void __iomem *base;
+ int irq;
+ int dma;
+ unsigned int clkrt;
+ unsigned int cmdat;
+ unsigned int imask;
+ unsigned int power_mode;
+ struct pxamci_platform_data *pdata;
+
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ dma_addr_t sg_dma;
+ struct pxa_dma_desc *sg_cpu;
+ unsigned int dma_len;
+
+ unsigned int dma_dir;
+};
+
+static inline unsigned int ns_to_clocks(unsigned int ns)
+{
+ return (ns * (CLOCKRATE / 1000000) + 999) / 1000;
+}
+
+static void pxamci_stop_clock(struct pxamci_host *host)
+{
+ if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
+ unsigned long timeout = 10000;
+ unsigned int v;
+
+ writel(STOP_CLOCK, host->base + MMC_STRPCL);
+
+ do {
+ v = readl(host->base + MMC_STAT);
+ if (!(v & STAT_CLK_EN))
+ break;
+ udelay(1);
+ } while (timeout--);
+
+ if (v & STAT_CLK_EN)
+ dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
+ }
+}
+
+static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->imask &= ~mask;
+ writel(host->imask, host->base + MMC_I_MASK);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->imask |= mask;
+ writel(host->imask, host->base + MMC_I_MASK);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
+{
+ unsigned int nob = data->blocks;
+ unsigned int timeout;
+ u32 dcmd;
+ int i;
+
+ host->data = data;
+
+ if (data->flags & MMC_DATA_STREAM)
+ nob = 0xffff;
+
+ writel(nob, host->base + MMC_NOB);
+ writel(1 << data->blksz_bits, host->base + MMC_BLKLEN);
+
+ timeout = ns_to_clocks(data->timeout_ns) + data->timeout_clks;
+ writel((timeout + 255) / 256, host->base + MMC_RDTO);
+
+ if (data->flags & MMC_DATA_READ) {
+ host->dma_dir = DMA_FROM_DEVICE;
+ dcmd = DCMD_INCTRGADDR | DCMD_FLOWTRG;
+ DRCMRTXMMC = 0;
+ DRCMRRXMMC = host->dma | DRCMR_MAPVLD;
+ } else {
+ host->dma_dir = DMA_TO_DEVICE;
+ dcmd = DCMD_INCSRCADDR | DCMD_FLOWSRC;
+ DRCMRRXMMC = 0;
+ DRCMRTXMMC = host->dma | DRCMR_MAPVLD;
+ }
+
+ dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
+
+ host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ host->dma_dir);
+
+ for (i = 0; i < host->dma_len; i++) {
+ if (data->flags & MMC_DATA_READ) {
+ host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
+ host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
+ } else {
+ host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
+ host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
+ }
+ host->sg_cpu[i].dcmd = dcmd | sg_dma_len(&data->sg[i]);
+ host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
+ sizeof(struct pxa_dma_desc);
+ }
+ host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
+ wmb();
+
+ DDADR(host->dma) = host->sg_dma;
+ DCSR(host->dma) = DCSR_RUN;
+}
+
+static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
+{
+ WARN_ON(host->cmd != NULL);
+ host->cmd = cmd;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ cmdat |= CMDAT_BUSY;
+
+ switch (cmd->flags & (MMC_RSP_MASK | MMC_RSP_CRC)) {
+ case MMC_RSP_SHORT | MMC_RSP_CRC:
+ cmdat |= CMDAT_RESP_SHORT;
+ break;
+ case MMC_RSP_SHORT:
+ cmdat |= CMDAT_RESP_R3;
+ break;
+ case MMC_RSP_LONG | MMC_RSP_CRC:
+ cmdat |= CMDAT_RESP_R2;
+ break;
+ default:
+ break;
+ }
+
+ writel(cmd->opcode, host->base + MMC_CMD);
+ writel(cmd->arg >> 16, host->base + MMC_ARGH);
+ writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
+ writel(cmdat, host->base + MMC_CMDAT);
+ writel(host->clkrt, host->base + MMC_CLKRT);
+
+ writel(START_CLOCK, host->base + MMC_STRPCL);
+
+ pxamci_enable_irq(host, END_CMD_RES);
+}
+
+static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
+{
+ DBG("PXAMCI: request done\n");
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
+static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
+{
+ struct mmc_command *cmd = host->cmd;
+ int i;
+ u32 v;
+
+ if (!cmd)
+ return 0;
+
+ host->cmd = NULL;
+
+ /*
+ * Did I mention this is Sick. We always need to
+ * discard the upper 8 bits of the first 16-bit word.
+ */
+ v = readl(host->base + MMC_RES) & 0xffff;
+ for (i = 0; i < 4; i++) {
+ u32 w1 = readl(host->base + MMC_RES) & 0xffff;
+ u32 w2 = readl(host->base + MMC_RES) & 0xffff;
+ cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
+ v = w2;
+ }
+
+ if (stat & STAT_TIME_OUT_RESPONSE) {
+ cmd->error = MMC_ERR_TIMEOUT;
+ } else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
+#ifdef CONFIG_PXA27x
+ /*
+ * workaround for erratum #42:
+ * Intel PXA27x Family Processor Specification Update Rev 001
+ */
+ if (cmd->opcode == MMC_ALL_SEND_CID ||
+ cmd->opcode == MMC_SEND_CSD ||
+ cmd->opcode == MMC_SEND_CID) {
+ /* a bogus CRC error can appear if the msb of
+ the 15 byte response is a one */
+ if ((cmd->resp[0] & 0x80000000) == 0)
+ cmd->error = MMC_ERR_BADCRC;
+ } else {
+ DBG("ignoring CRC from command %d - *risky*\n",cmd->opcode);
+ }
+#else
+ cmd->error = MMC_ERR_BADCRC;
+#endif
+ }
+
+ pxamci_disable_irq(host, END_CMD_RES);
+ if (host->data && cmd->error == MMC_ERR_NONE) {
+ pxamci_enable_irq(host, DATA_TRAN_DONE);
+ } else {
+ pxamci_finish_request(host, host->mrq);
+ }
+
+ return 1;
+}
+
+static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
+{
+ struct mmc_data *data = host->data;
+
+ if (!data)
+ return 0;
+
+ DCSR(host->dma) = 0;
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
+ host->dma_dir);
+
+ if (stat & STAT_READ_TIME_OUT)
+ data->error = MMC_ERR_TIMEOUT;
+ else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
+ data->error = MMC_ERR_BADCRC;
+
+ /*
+ * There appears to be a hardware design bug here. There seems to
+ * be no way to find out how much data was transferred to the card.
+ * This means that if there was an error on any block, we mark all
+ * data blocks as being in error.
+ */
+ if (data->error == MMC_ERR_NONE)
+ data->bytes_xfered = data->blocks << data->blksz_bits;
+ else
+ data->bytes_xfered = 0;
+
+ pxamci_disable_irq(host, DATA_TRAN_DONE);
+
+ host->data = NULL;
+ if (host->mrq->stop && data->error == MMC_ERR_NONE) {
+ pxamci_stop_clock(host);
+ pxamci_start_cmd(host, host->mrq->stop, 0);
+ } else {
+ pxamci_finish_request(host, host->mrq);
+ }
+
+ return 1;
+}
+
+static irqreturn_t pxamci_irq(int irq, void *devid, struct pt_regs *regs)
+{
+ struct pxamci_host *host = devid;
+ unsigned int ireg;
+ int handled = 0;
+
+ ireg = readl(host->base + MMC_I_REG);
+
+ DBG("PXAMCI: irq %08x\n", ireg);
+
+ if (ireg) {
+ unsigned stat = readl(host->base + MMC_STAT);
+
+ DBG("PXAMCI: stat %08x\n", stat);
+
+ if (ireg & END_CMD_RES)
+ handled |= pxamci_cmd_done(host, stat);
+ if (ireg & DATA_TRAN_DONE)
+ handled |= pxamci_data_done(host, stat);
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct pxamci_host *host = mmc_priv(mmc);
+ unsigned int cmdat;
+
+ WARN_ON(host->mrq != NULL);
+
+ host->mrq = mrq;
+
+ pxamci_stop_clock(host);
+
+ cmdat = host->cmdat;
+ host->cmdat &= ~CMDAT_INIT;
+
+ if (mrq->data) {
+ pxamci_setup_data(host, mrq->data);
+
+ cmdat &= ~CMDAT_BUSY;
+ cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
+ if (mrq->data->flags & MMC_DATA_WRITE)
+ cmdat |= CMDAT_WRITE;
+
+ if (mrq->data->flags & MMC_DATA_STREAM)
+ cmdat |= CMDAT_STREAM;
+ }
+
+ pxamci_start_cmd(host, mrq->cmd, cmdat);
+}
+
+static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct pxamci_host *host = mmc_priv(mmc);
+
+ DBG("pxamci_set_ios: clock %u power %u vdd %u.%02u\n",
+ ios->clock, ios->power_mode, ios->vdd / 100,
+ ios->vdd % 100);
+
+ if (ios->clock) {
+ unsigned int clk = CLOCKRATE / ios->clock;
+ if (CLOCKRATE / clk > ios->clock)
+ clk <<= 1;
+ host->clkrt = fls(clk) - 1;
+ pxa_set_cken(CKEN12_MMC, 1);
+
+ /*
+ * we write clkrt on the next command
+ */
+ } else {
+ pxamci_stop_clock(host);
+ pxa_set_cken(CKEN12_MMC, 0);
+ }
+
+ if (host->power_mode != ios->power_mode) {
+ host->power_mode = ios->power_mode;
+
+ if (host->pdata && host->pdata->setpower)
+ host->pdata->setpower(mmc->dev, ios->vdd);
+
+ if (ios->power_mode == MMC_POWER_ON)
+ host->cmdat |= CMDAT_INIT;
+ }
+
+ DBG("pxamci_set_ios: clkrt = %x cmdat = %x\n",
+ host->clkrt, host->cmdat);
+}
+
+static struct mmc_host_ops pxamci_ops = {
+ .request = pxamci_request,
+ .set_ios = pxamci_set_ios,
+};
+
+static void pxamci_dma_irq(int dma, void *devid, struct pt_regs *regs)
+{
+ printk(KERN_ERR "DMA%d: IRQ???\n", dma);
+ DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
+}
+
+static irqreturn_t pxamci_detect_irq(int irq, void *devid, struct pt_regs *regs)
+{
+ mmc_detect_change(devid);
+ return IRQ_HANDLED;
+}
+
+static int pxamci_probe(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mmc_host *mmc;
+ struct pxamci_host *host = NULL;
+ struct resource *r;
+ int ret, irq;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!r || irq == NO_IRQ)
+ return -ENXIO;
+
+ r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
+ if (!r)
+ return -EBUSY;
+
+ mmc = mmc_alloc_host(sizeof(struct pxamci_host), dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mmc->ops = &pxamci_ops;
+ mmc->f_min = CLOCKRATE_MIN;
+ mmc->f_max = CLOCKRATE_MAX;
+
+ /*
+ * We can do SG-DMA, but we don't because we never know how much
+ * data we successfully wrote to the card.
+ */
+ mmc->max_phys_segs = NR_SG;
+
+ /*
+ * Our hardware DMA can handle a maximum of one page per SG entry.
+ */
+ mmc->max_seg_size = PAGE_SIZE;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->dma = -1;
+ host->pdata = pdev->dev.platform_data;
+ mmc->ocr_avail = host->pdata ?
+ host->pdata->ocr_mask :
+ MMC_VDD_32_33|MMC_VDD_33_34;
+
+ host->sg_cpu = dma_alloc_coherent(dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
+ if (!host->sg_cpu) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock_init(&host->lock);
+ host->res = r;
+ host->irq = irq;
+ host->imask = MMC_I_MASK_ALL;
+
+ host->base = ioremap(r->start, SZ_4K);
+ if (!host->base) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Ensure that the host controller is shut down, and setup
+ * with our defaults.
+ */
+ pxamci_stop_clock(host);
+ writel(0, host->base + MMC_SPI);
+ writel(64, host->base + MMC_RESTO);
+ writel(host->imask, host->base + MMC_I_MASK);
+
+ host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
+ pxamci_dma_irq, host);
+ if (host->dma < 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
+ if (ret)
+ goto out;
+
+ dev_set_drvdata(dev, mmc);
+
+ if (host->pdata && host->pdata->init)
+ host->pdata->init(dev, pxamci_detect_irq, mmc);
+
+ mmc_add_host(mmc);
+
+ return 0;
+
+ out:
+ if (host) {
+ if (host->dma >= 0)
+ pxa_free_dma(host->dma);
+ if (host->base)
+ iounmap(host->base);
+ if (host->sg_cpu)
+ dma_free_coherent(dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+ }
+ if (mmc)
+ mmc_free_host(mmc);
+ release_resource(r);
+ return ret;
+}
+
+static int pxamci_remove(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+
+ dev_set_drvdata(dev, NULL);
+
+ if (mmc) {
+ struct pxamci_host *host = mmc_priv(mmc);
+
+ if (host->pdata && host->pdata->exit)
+ host->pdata->exit(dev, mmc);
+
+ mmc_remove_host(mmc);
+
+ pxamci_stop_clock(host);
+ writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
+ END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
+ host->base + MMC_I_MASK);
+
+ DRCMRRXMMC = 0;
+ DRCMRTXMMC = 0;
+
+ free_irq(host->irq, host);
+ pxa_free_dma(host->dma);
+ iounmap(host->base);
+ dma_free_coherent(dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+
+ release_resource(host->res);
+
+ mmc_free_host(mmc);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pxamci_suspend(struct device *dev, u32 state, u32 level)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (mmc && level == SUSPEND_DISABLE)
+ ret = mmc_suspend_host(mmc, state);
+
+ return ret;
+}
+
+static int pxamci_resume(struct device *dev, u32 level)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (mmc && level == RESUME_ENABLE)
+ ret = mmc_resume_host(mmc);
+
+ return ret;
+}
+#else
+#define pxamci_suspend NULL
+#define pxamci_resume NULL
+#endif
+
+static struct device_driver pxamci_driver = {
+ .name = DRIVER_NAME,
+ .bus = &platform_bus_type,
+ .probe = pxamci_probe,
+ .remove = pxamci_remove,
+ .suspend = pxamci_suspend,
+ .resume = pxamci_resume,
+};
+
+static int __init pxamci_init(void)
+{
+ return driver_register(&pxamci_driver);
+}
+
+static void __exit pxamci_exit(void)
+{
+ driver_unregister(&pxamci_driver);
+}
+
+module_init(pxamci_init);
+module_exit(pxamci_exit);
+
+MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/pxamci.h b/drivers/mmc/pxamci.h
new file mode 100644
index 00000000000..1b163220df2
--- /dev/null
+++ b/drivers/mmc/pxamci.h
@@ -0,0 +1,124 @@
+#undef MMC_STRPCL
+#undef MMC_STAT
+#undef MMC_CLKRT
+#undef MMC_SPI
+#undef MMC_CMDAT
+#undef MMC_RESTO
+#undef MMC_RDTO
+#undef MMC_BLKLEN
+#undef MMC_NOB
+#undef MMC_PRTBUF
+#undef MMC_I_MASK
+#undef END_CMD_RES
+#undef PRG_DONE
+#undef DATA_TRAN_DONE
+#undef MMC_I_REG
+#undef MMC_CMD
+#undef MMC_ARGH
+#undef MMC_ARGL
+#undef MMC_RES
+#undef MMC_RXFIFO
+#undef MMC_TXFIFO
+
+#define MMC_STRPCL 0x0000
+#define STOP_CLOCK (1 << 0)
+#define START_CLOCK (2 << 0)
+
+#define MMC_STAT 0x0004
+#define STAT_END_CMD_RES (1 << 13)
+#define STAT_PRG_DONE (1 << 12)
+#define STAT_DATA_TRAN_DONE (1 << 11)
+#define STAT_CLK_EN (1 << 8)
+#define STAT_RECV_FIFO_FULL (1 << 7)
+#define STAT_XMIT_FIFO_EMPTY (1 << 6)
+#define STAT_RES_CRC_ERR (1 << 5)
+#define STAT_SPI_READ_ERROR_TOKEN (1 << 4)
+#define STAT_CRC_READ_ERROR (1 << 3)
+#define STAT_CRC_WRITE_ERROR (1 << 2)
+#define STAT_TIME_OUT_RESPONSE (1 << 1)
+#define STAT_READ_TIME_OUT (1 << 0)
+
+#define MMC_CLKRT 0x0008 /* 3 bit */
+
+#define MMC_SPI 0x000c
+#define SPI_CS_ADDRESS (1 << 3)
+#define SPI_CS_EN (1 << 2)
+#define CRC_ON (1 << 1)
+#define SPI_EN (1 << 0)
+
+#define MMC_CMDAT 0x0010
+#define CMDAT_DMAEN (1 << 7)
+#define CMDAT_INIT (1 << 6)
+#define CMDAT_BUSY (1 << 5)
+#define CMDAT_STREAM (1 << 4) /* 1 = stream */
+#define CMDAT_WRITE (1 << 3) /* 1 = write */
+#define CMDAT_DATAEN (1 << 2)
+#define CMDAT_RESP_NONE (0 << 0)
+#define CMDAT_RESP_SHORT (1 << 0)
+#define CMDAT_RESP_R2 (2 << 0)
+#define CMDAT_RESP_R3 (3 << 0)
+
+#define MMC_RESTO 0x0014 /* 7 bit */
+
+#define MMC_RDTO 0x0018 /* 16 bit */
+
+#define MMC_BLKLEN 0x001c /* 10 bit */
+
+#define MMC_NOB 0x0020 /* 16 bit */
+
+#define MMC_PRTBUF 0x0024
+#define BUF_PART_FULL (1 << 0)
+
+#define MMC_I_MASK 0x0028
+
+/*PXA27x MMC interrupts*/
+#define SDIO_SUSPEND_ACK (1 << 12)
+#define SDIO_INT (1 << 11)
+#define RD_STALLED (1 << 10)
+#define RES_ERR (1 << 9)
+#define DAT_ERR (1 << 8)
+#define TINT (1 << 7)
+
+/*PXA2xx MMC interrupts*/
+#define TXFIFO_WR_REQ (1 << 6)
+#define RXFIFO_RD_REQ (1 << 5)
+#define CLK_IS_OFF (1 << 4)
+#define STOP_CMD (1 << 3)
+#define END_CMD_RES (1 << 2)
+#define PRG_DONE (1 << 1)
+#define DATA_TRAN_DONE (1 << 0)
+
+#ifdef CONFIG_PXA27x
+#define MMC_I_MASK_ALL 0x00001fff
+#else
+#define MMC_I_MASK_ALL 0x0000007f
+#endif
+
+#define MMC_I_REG 0x002c
+/* same as MMC_I_MASK */
+
+#define MMC_CMD 0x0030
+
+#define MMC_ARGH 0x0034 /* 16 bit */
+
+#define MMC_ARGL 0x0038 /* 16 bit */
+
+#define MMC_RES 0x003c /* 16 bit */
+
+#define MMC_RXFIFO 0x0040 /* 8 bit */
+
+#define MMC_TXFIFO 0x0044 /* 8 bit */
+
+/*
+ * The base MMC clock rate
+ */
+#ifdef CONFIG_PXA27x
+#define CLOCKRATE_MIN 304688
+#define CLOCKRATE_MAX 19500000
+#else
+#define CLOCKRATE_MIN 312500
+#define CLOCKRATE_MAX 20000000
+#endif
+
+#define CLOCKRATE CLOCKRATE_MAX
+
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
new file mode 100644
index 00000000000..938bca0414e
--- /dev/null
+++ b/drivers/mmc/wbsd.c
@@ -0,0 +1,1651 @@
+/*
+ * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
+ *
+ * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * Warning!
+ *
+ * Changes to the FIFO system should be done with extreme care since
+ * the hardware is full of bugs related to the FIFO. Known issues are:
+ *
+ * - FIFO size field in FSR is always zero.
+ *
+ * - FIFO interrupts tend not to work as they should. Interrupts are
+ * triggered only for full/empty events, not for threshold values.
+ *
+ * - On APIC systems the FIFO empty interrupt is sometimes lost.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/protocol.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/scatterlist.h>
+
+#include "wbsd.h"
+
+#define DRIVER_NAME "wbsd"
+#define DRIVER_VERSION "1.1"
+
+#ifdef CONFIG_MMC_DEBUG
+#define DBG(x...) \
+ printk(KERN_DEBUG DRIVER_NAME ": " x)
+#define DBGF(f, x...) \
+ printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__ , ##x)
+#else
+#define DBG(x...) do { } while (0)
+#define DBGF(x...) do { } while (0)
+#endif
+
+static unsigned int io = 0x248;
+static unsigned int irq = 6;
+static int dma = 2;
+
+#ifdef CONFIG_MMC_DEBUG
+void DBG_REG(int reg, u8 value)
+{
+ int i;
+
+ printk(KERN_DEBUG "wbsd: Register %d: 0x%02X %3d '%c' ",
+ reg, (int)value, (int)value, (value < 0x20)?'.':value);
+
+ for (i = 7;i >= 0;i--)
+ {
+ if (value & (1 << i))
+ printk("x");
+ else
+ printk(".");
+ }
+
+ printk("\n");
+}
+#else
+#define DBG_REG(r, v) do {} while (0)
+#endif
+
+/*
+ * Basic functions
+ */
+
+static inline void wbsd_unlock_config(struct wbsd_host* host)
+{
+ outb(host->unlock_code, host->config);
+ outb(host->unlock_code, host->config);
+}
+
+static inline void wbsd_lock_config(struct wbsd_host* host)
+{
+ outb(LOCK_CODE, host->config);
+}
+
+static inline void wbsd_write_config(struct wbsd_host* host, u8 reg, u8 value)
+{
+ outb(reg, host->config);
+ outb(value, host->config + 1);
+}
+
+static inline u8 wbsd_read_config(struct wbsd_host* host, u8 reg)
+{
+ outb(reg, host->config);
+ return inb(host->config + 1);
+}
+
+static inline void wbsd_write_index(struct wbsd_host* host, u8 index, u8 value)
+{
+ outb(index, host->base + WBSD_IDXR);
+ outb(value, host->base + WBSD_DATAR);
+}
+
+static inline u8 wbsd_read_index(struct wbsd_host* host, u8 index)
+{
+ outb(index, host->base + WBSD_IDXR);
+ return inb(host->base + WBSD_DATAR);
+}
+
+/*
+ * Common routines
+ */
+
+static void wbsd_init_device(struct wbsd_host* host)
+{
+ u8 setup, ier;
+
+ /*
+ * Reset chip (SD/MMC part) and fifo.
+ */
+ setup = wbsd_read_index(host, WBSD_IDX_SETUP);
+ setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
+ wbsd_write_index(host, WBSD_IDX_SETUP, setup);
+
+ /*
+ * Read back default clock.
+ */
+ host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
+
+ /*
+ * Power down port.
+ */
+ outb(WBSD_POWER_N, host->base + WBSD_CSR);
+
+ /*
+ * Set maximum timeout.
+ */
+ wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
+
+ /*
+ * Enable interesting interrupts.
+ */
+ ier = 0;
+ ier |= WBSD_EINT_CARD;
+ ier |= WBSD_EINT_FIFO_THRE;
+ ier |= WBSD_EINT_CCRC;
+ ier |= WBSD_EINT_TIMEOUT;
+ ier |= WBSD_EINT_CRC;
+ ier |= WBSD_EINT_TC;
+
+ outb(ier, host->base + WBSD_EIR);
+
+ /*
+ * Clear interrupts.
+ */
+ inb(host->base + WBSD_ISR);
+}
+
+static void wbsd_reset(struct wbsd_host* host)
+{
+ u8 setup;
+
+ printk(KERN_ERR DRIVER_NAME ": Resetting chip\n");
+
+ /*
+ * Soft reset of chip (SD/MMC part).
+ */
+ setup = wbsd_read_index(host, WBSD_IDX_SETUP);
+ setup |= WBSD_SOFT_RESET;
+ wbsd_write_index(host, WBSD_IDX_SETUP, setup);
+}
+
+static void wbsd_request_end(struct wbsd_host* host, struct mmc_request* mrq)
+{
+ unsigned long dmaflags;
+
+ DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode);
+
+ if (host->dma >= 0)
+ {
+ /*
+ * Release ISA DMA controller.
+ */
+ dmaflags = claim_dma_lock();
+ disable_dma(host->dma);
+ clear_dma_ff(host->dma);
+ release_dma_lock(dmaflags);
+
+ /*
+ * Disable DMA on host.
+ */
+ wbsd_write_index(host, WBSD_IDX_DMA, 0);
+ }
+
+ host->mrq = NULL;
+
+ /*
+ * MMC layer might call back into the driver so first unlock.
+ */
+ spin_unlock(&host->lock);
+ mmc_request_done(host->mmc, mrq);
+ spin_lock(&host->lock);
+}
+
+/*
+ * Scatter/gather functions
+ */
+
+static inline void wbsd_init_sg(struct wbsd_host* host, struct mmc_data* data)
+{
+ /*
+ * Get info. about SG list from data structure.
+ */
+ host->cur_sg = data->sg;
+ host->num_sg = data->sg_len;
+
+ host->offset = 0;
+ host->remain = host->cur_sg->length;
+}
+
+static inline int wbsd_next_sg(struct wbsd_host* host)
+{
+ /*
+ * Skip to next SG entry.
+ */
+ host->cur_sg++;
+ host->num_sg--;
+
+ /*
+ * Any entries left?
+ */
+ if (host->num_sg > 0)
+ {
+ host->offset = 0;
+ host->remain = host->cur_sg->length;
+ }
+
+ return host->num_sg;
+}
+
+static inline char* wbsd_kmap_sg(struct wbsd_host* host)
+{
+ host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) +
+ host->cur_sg->offset;
+ return host->mapped_sg;
+}
+
+static inline void wbsd_kunmap_sg(struct wbsd_host* host)
+{
+ kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
+}
+
+static inline void wbsd_sg_to_dma(struct wbsd_host* host, struct mmc_data* data)
+{
+ unsigned int len, i, size;
+ struct scatterlist* sg;
+ char* dmabuf = host->dma_buffer;
+ char* sgbuf;
+
+ size = host->size;
+
+ sg = data->sg;
+ len = data->sg_len;
+
+ /*
+ * Just loop through all entries. Size might not
+ * be the entire list though so make sure that
+ * we do not transfer too much.
+ */
+ for (i = 0;i < len;i++)
+ {
+ sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
+ if (size < sg[i].length)
+ memcpy(dmabuf, sgbuf, size);
+ else
+ memcpy(dmabuf, sgbuf, sg[i].length);
+ kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
+ dmabuf += sg[i].length;
+
+ if (size < sg[i].length)
+ size = 0;
+ else
+ size -= sg[i].length;
+
+ if (size == 0)
+ break;
+ }
+
+ /*
+ * Check that we didn't get a request to transfer
+ * more data than can fit into the SG list.
+ */
+
+ BUG_ON(size != 0);
+
+ host->size -= size;
+}
+
+static inline void wbsd_dma_to_sg(struct wbsd_host* host, struct mmc_data* data)
+{
+ unsigned int len, i, size;
+ struct scatterlist* sg;
+ char* dmabuf = host->dma_buffer;
+ char* sgbuf;
+
+ size = host->size;
+
+ sg = data->sg;
+ len = data->sg_len;
+
+ /*
+ * Just loop through all entries. Size might not
+ * be the entire list though so make sure that
+ * we do not transfer too much.
+ */
+ for (i = 0;i < len;i++)
+ {
+ sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
+ if (size < sg[i].length)
+ memcpy(sgbuf, dmabuf, size);
+ else
+ memcpy(sgbuf, dmabuf, sg[i].length);
+ kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
+ dmabuf += sg[i].length;
+
+ if (size < sg[i].length)
+ size = 0;
+ else
+ size -= sg[i].length;
+
+ if (size == 0)
+ break;
+ }
+
+ /*
+ * Check that we didn't get a request to transfer
+ * more data than can fit into the SG list.
+ */
+
+ BUG_ON(size != 0);
+
+ host->size -= size;
+}
+
+/*
+ * Command handling
+ */
+
+static inline void wbsd_get_short_reply(struct wbsd_host* host,
+ struct mmc_command* cmd)
+{
+ /*
+ * Correct response type?
+ */
+ if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT)
+ {
+ cmd->error = MMC_ERR_INVALID;
+ return;
+ }
+
+ cmd->resp[0] =
+ wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
+ cmd->resp[0] |=
+ wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
+ cmd->resp[0] |=
+ wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
+ cmd->resp[0] |=
+ wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
+ cmd->resp[1] =
+ wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
+}
+
+static inline void wbsd_get_long_reply(struct wbsd_host* host,
+ struct mmc_command* cmd)
+{
+ int i;
+
+ /*
+ * Correct response type?
+ */
+ if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG)
+ {
+ cmd->error = MMC_ERR_INVALID;
+ return;
+ }
+
+ for (i = 0;i < 4;i++)
+ {
+ cmd->resp[i] =
+ wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
+ cmd->resp[i] |=
+ wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
+ cmd->resp[i] |=
+ wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
+ cmd->resp[i] |=
+ wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
+ }
+}
+
+static irqreturn_t wbsd_irq(int irq, void *dev_id, struct pt_regs *regs);
+
+static void wbsd_send_command(struct wbsd_host* host, struct mmc_command* cmd)
+{
+ int i;
+ u8 status, isr;
+
+ DBGF("Sending cmd (%x)\n", cmd->opcode);
+
+ /*
+ * Clear accumulated ISR. The interrupt routine
+ * will fill this one with events that occur during
+ * transfer.
+ */
+ host->isr = 0;
+
+ /*
+ * Send the command (CRC calculated by host).
+ */
+ outb(cmd->opcode, host->base + WBSD_CMDR);
+ for (i = 3;i >= 0;i--)
+ outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
+
+ cmd->error = MMC_ERR_NONE;
+
+ /*
+ * Wait for the request to complete.
+ */
+ do {
+ status = wbsd_read_index(host, WBSD_IDX_STATUS);
+ } while (status & WBSD_CARDTRAFFIC);
+
+ /*
+ * Do we expect a reply?
+ */
+ if ((cmd->flags & MMC_RSP_MASK) != MMC_RSP_NONE)
+ {
+ /*
+ * Read back status.
+ */
+ isr = host->isr;
+
+ /* Card removed? */
+ if (isr & WBSD_INT_CARD)
+ cmd->error = MMC_ERR_TIMEOUT;
+ /* Timeout? */
+ else if (isr & WBSD_INT_TIMEOUT)
+ cmd->error = MMC_ERR_TIMEOUT;
+ /* CRC? */
+ else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
+ cmd->error = MMC_ERR_BADCRC;
+ /* All ok */
+ else
+ {
+ if ((cmd->flags & MMC_RSP_MASK) == MMC_RSP_SHORT)
+ wbsd_get_short_reply(host, cmd);
+ else
+ wbsd_get_long_reply(host, cmd);
+ }
+ }
+
+ DBGF("Sent cmd (%x), res %d\n", cmd->opcode, cmd->error);
+}
+
+/*
+ * Data functions
+ */
+
+static void wbsd_empty_fifo(struct wbsd_host* host)
+{
+ struct mmc_data* data = host->mrq->cmd->data;
+ char* buffer;
+ int i, fsr, fifo;
+
+ /*
+ * Handle excessive data.
+ */
+ if (data->bytes_xfered == host->size)
+ return;
+
+ buffer = wbsd_kmap_sg(host) + host->offset;
+
+ /*
+ * Drain the fifo. This has a tendency to loop longer
+ * than the FIFO length (usually one block).
+ */
+ while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY))
+ {
+ /*
+ * The size field in the FSR is broken so we have to
+ * do some guessing.
+ */
+ if (fsr & WBSD_FIFO_FULL)
+ fifo = 16;
+ else if (fsr & WBSD_FIFO_FUTHRE)
+ fifo = 8;
+ else
+ fifo = 1;
+
+ for (i = 0;i < fifo;i++)
+ {
+ *buffer = inb(host->base + WBSD_DFR);
+ buffer++;
+ host->offset++;
+ host->remain--;
+
+ data->bytes_xfered++;
+
+ /*
+ * Transfer done?
+ */
+ if (data->bytes_xfered == host->size)
+ {
+ wbsd_kunmap_sg(host);
+ return;
+ }
+
+ /*
+ * End of scatter list entry?
+ */
+ if (host->remain == 0)
+ {
+ wbsd_kunmap_sg(host);
+
+ /*
+ * Get next entry. Check if last.
+ */
+ if (!wbsd_next_sg(host))
+ {
+ /*
+ * We should never reach this point.
+ * It means that we're trying to
+ * transfer more blocks than can fit
+ * into the scatter list.
+ */
+ BUG_ON(1);
+
+ host->size = data->bytes_xfered;
+
+ return;
+ }
+
+ buffer = wbsd_kmap_sg(host);
+ }
+ }
+ }
+
+ wbsd_kunmap_sg(host);
+
+ /*
+ * This is a very dirty hack to solve a
+ * hardware problem. The chip doesn't trigger
+ * FIFO threshold interrupts properly.
+ */
+ if ((host->size - data->bytes_xfered) < 16)
+ tasklet_schedule(&host->fifo_tasklet);
+}
+
+static void wbsd_fill_fifo(struct wbsd_host* host)
+{
+ struct mmc_data* data = host->mrq->cmd->data;
+ char* buffer;
+ int i, fsr, fifo;
+
+ /*
+ * Check that we aren't being called after the
+ * entire buffer has been transfered.
+ */
+ if (data->bytes_xfered == host->size)
+ return;
+
+ buffer = wbsd_kmap_sg(host) + host->offset;
+
+ /*
+ * Fill the fifo. This has a tendency to loop longer
+ * than the FIFO length (usually one block).
+ */
+ while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL))
+ {
+ /*
+ * The size field in the FSR is broken so we have to
+ * do some guessing.
+ */
+ if (fsr & WBSD_FIFO_EMPTY)
+ fifo = 0;
+ else if (fsr & WBSD_FIFO_EMTHRE)
+ fifo = 8;
+ else
+ fifo = 15;
+
+ for (i = 16;i > fifo;i--)
+ {
+ outb(*buffer, host->base + WBSD_DFR);
+ buffer++;
+ host->offset++;
+ host->remain--;
+
+ data->bytes_xfered++;
+
+ /*
+ * Transfer done?
+ */
+ if (data->bytes_xfered == host->size)
+ {
+ wbsd_kunmap_sg(host);
+ return;
+ }
+
+ /*
+ * End of scatter list entry?
+ */
+ if (host->remain == 0)
+ {
+ wbsd_kunmap_sg(host);
+
+ /*
+ * Get next entry. Check if last.
+ */
+ if (!wbsd_next_sg(host))
+ {
+ /*
+ * We should never reach this point.
+ * It means that we're trying to
+ * transfer more blocks than can fit
+ * into the scatter list.
+ */
+ BUG_ON(1);
+
+ host->size = data->bytes_xfered;
+
+ return;
+ }
+
+ buffer = wbsd_kmap_sg(host);
+ }
+ }
+ }
+
+ wbsd_kunmap_sg(host);
+}
+
+static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
+{
+ u16 blksize;
+ u8 setup;
+ unsigned long dmaflags;
+
+ DBGF("blksz %04x blks %04x flags %08x\n",
+ 1 << data->blksz_bits, data->blocks, data->flags);
+ DBGF("tsac %d ms nsac %d clk\n",
+ data->timeout_ns / 1000000, data->timeout_clks);
+
+ /*
+ * Calculate size.
+ */
+ host->size = data->blocks << data->blksz_bits;
+
+ /*
+ * Check timeout values for overflow.
+ * (Yes, some cards cause this value to overflow).
+ */
+ if (data->timeout_ns > 127000000)
+ wbsd_write_index(host, WBSD_IDX_TAAC, 127);
+ else
+ wbsd_write_index(host, WBSD_IDX_TAAC, data->timeout_ns/1000000);
+
+ if (data->timeout_clks > 255)
+ wbsd_write_index(host, WBSD_IDX_NSAC, 255);
+ else
+ wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
+
+ /*
+ * Inform the chip of how large blocks will be
+ * sent. It needs this to determine when to
+ * calculate CRC.
+ *
+ * Space for CRC must be included in the size.
+ */
+ blksize = (1 << data->blksz_bits) + 2;
+
+ wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
+ wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
+
+ /*
+ * Clear the FIFO. This is needed even for DMA
+ * transfers since the chip still uses the FIFO
+ * internally.
+ */
+ setup = wbsd_read_index(host, WBSD_IDX_SETUP);
+ setup |= WBSD_FIFO_RESET;
+ wbsd_write_index(host, WBSD_IDX_SETUP, setup);
+
+ /*
+ * DMA transfer?
+ */
+ if (host->dma >= 0)
+ {
+ /*
+ * The buffer for DMA is only 64 kB.
+ */
+ BUG_ON(host->size > 0x10000);
+ if (host->size > 0x10000)
+ {
+ data->error = MMC_ERR_INVALID;
+ return;
+ }
+
+ /*
+ * Transfer data from the SG list to
+ * the DMA buffer.
+ */
+ if (data->flags & MMC_DATA_WRITE)
+ wbsd_sg_to_dma(host, data);
+
+ /*
+ * Initialise the ISA DMA controller.
+ */
+ dmaflags = claim_dma_lock();
+ disable_dma(host->dma);
+ clear_dma_ff(host->dma);
+ if (data->flags & MMC_DATA_READ)
+ set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
+ else
+ set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
+ set_dma_addr(host->dma, host->dma_addr);
+ set_dma_count(host->dma, host->size);
+
+ enable_dma(host->dma);
+ release_dma_lock(dmaflags);
+
+ /*
+ * Enable DMA on the host.
+ */
+ wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
+ }
+ else
+ {
+ /*
+ * This flag is used to keep printk
+ * output to a minimum.
+ */
+ host->firsterr = 1;
+
+ /*
+ * Initialise the SG list.
+ */
+ wbsd_init_sg(host, data);
+
+ /*
+ * Turn off DMA.
+ */
+ wbsd_write_index(host, WBSD_IDX_DMA, 0);
+
+ /*
+ * Set up FIFO threshold levels (and fill
+ * buffer if doing a write).
+ */
+ if (data->flags & MMC_DATA_READ)
+ {
+ wbsd_write_index(host, WBSD_IDX_FIFOEN,
+ WBSD_FIFOEN_FULL | 8);
+ }
+ else
+ {
+ wbsd_write_index(host, WBSD_IDX_FIFOEN,
+ WBSD_FIFOEN_EMPTY | 8);
+ wbsd_fill_fifo(host);
+ }
+ }
+
+ data->error = MMC_ERR_NONE;
+}
+
+static void wbsd_finish_data(struct wbsd_host* host, struct mmc_data* data)
+{
+ unsigned long dmaflags;
+ int count;
+ u8 status;
+
+ WARN_ON(host->mrq == NULL);
+
+ /*
+ * Send a stop command if needed.
+ */
+ if (data->stop)
+ wbsd_send_command(host, data->stop);
+
+ /*
+ * Wait for the controller to leave data
+ * transfer state.
+ */
+ do
+ {
+ status = wbsd_read_index(host, WBSD_IDX_STATUS);
+ } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
+
+ /*
+ * DMA transfer?
+ */
+ if (host->dma >= 0)
+ {
+ /*
+ * Disable DMA on the host.
+ */
+ wbsd_write_index(host, WBSD_IDX_DMA, 0);
+
+ /*
+ * Turn of ISA DMA controller.
+ */
+ dmaflags = claim_dma_lock();
+ disable_dma(host->dma);
+ clear_dma_ff(host->dma);
+ count = get_dma_residue(host->dma);
+ release_dma_lock(dmaflags);
+
+ /*
+ * Any leftover data?
+ */
+ if (count)
+ {
+ printk(KERN_ERR DRIVER_NAME ": Incomplete DMA "
+ "transfer. %d bytes left.\n", count);
+
+ data->error = MMC_ERR_FAILED;
+ }
+ else
+ {
+ /*
+ * Transfer data from DMA buffer to
+ * SG list.
+ */
+ if (data->flags & MMC_DATA_READ)
+ wbsd_dma_to_sg(host, data);
+
+ data->bytes_xfered = host->size;
+ }
+ }
+
+ DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
+
+ wbsd_request_end(host, host->mrq);
+}
+
+/*
+ * MMC Callbacks
+ */
+
+static void wbsd_request(struct mmc_host* mmc, struct mmc_request* mrq)
+{
+ struct wbsd_host* host = mmc_priv(mmc);
+ struct mmc_command* cmd;
+
+ /*
+ * Disable tasklets to avoid a deadlock.
+ */
+ spin_lock_bh(&host->lock);
+
+ BUG_ON(host->mrq != NULL);
+
+ cmd = mrq->cmd;
+
+ host->mrq = mrq;
+
+ /*
+ * If there is no card in the slot then
+ * timeout immediatly.
+ */
+ if (!(inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT))
+ {
+ cmd->error = MMC_ERR_TIMEOUT;
+ goto done;
+ }
+
+ /*
+ * Does the request include data?
+ */
+ if (cmd->data)
+ {
+ wbsd_prepare_data(host, cmd->data);
+
+ if (cmd->data->error != MMC_ERR_NONE)
+ goto done;
+ }
+
+ wbsd_send_command(host, cmd);
+
+ /*
+ * If this is a data transfer the request
+ * will be finished after the data has
+ * transfered.
+ */
+ if (cmd->data && (cmd->error == MMC_ERR_NONE))
+ {
+ /*
+ * Dirty fix for hardware bug.
+ */
+ if (host->dma == -1)
+ tasklet_schedule(&host->fifo_tasklet);
+
+ spin_unlock_bh(&host->lock);
+
+ return;
+ }
+
+done:
+ wbsd_request_end(host, mrq);
+
+ spin_unlock_bh(&host->lock);
+}
+
+static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
+{
+ struct wbsd_host* host = mmc_priv(mmc);
+ u8 clk, setup, pwr;
+
+ DBGF("clock %uHz busmode %u powermode %u Vdd %u\n",
+ ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
+
+ spin_lock_bh(&host->lock);
+
+ /*
+ * Reset the chip on each power off.
+ * Should clear out any weird states.
+ */
+ if (ios->power_mode == MMC_POWER_OFF)
+ wbsd_init_device(host);
+
+ if (ios->clock >= 24000000)
+ clk = WBSD_CLK_24M;
+ else if (ios->clock >= 16000000)
+ clk = WBSD_CLK_16M;
+ else if (ios->clock >= 12000000)
+ clk = WBSD_CLK_12M;
+ else
+ clk = WBSD_CLK_375K;
+
+ /*
+ * Only write to the clock register when
+ * there is an actual change.
+ */
+ if (clk != host->clk)
+ {
+ wbsd_write_index(host, WBSD_IDX_CLK, clk);
+ host->clk = clk;
+ }
+
+ if (ios->power_mode != MMC_POWER_OFF)
+ {
+ /*
+ * Power up card.
+ */
+ pwr = inb(host->base + WBSD_CSR);
+ pwr &= ~WBSD_POWER_N;
+ outb(pwr, host->base + WBSD_CSR);
+
+ /*
+ * This behaviour is stolen from the
+ * Windows driver. Don't know why, but
+ * it is needed.
+ */
+ setup = wbsd_read_index(host, WBSD_IDX_SETUP);
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ setup |= WBSD_DAT3_H;
+ else
+ setup &= ~WBSD_DAT3_H;
+ wbsd_write_index(host, WBSD_IDX_SETUP, setup);
+
+ mdelay(1);
+ }
+
+ spin_unlock_bh(&host->lock);
+}
+
+/*
+ * Tasklets
+ */
+
+inline static struct mmc_data* wbsd_get_data(struct wbsd_host* host)
+{
+ WARN_ON(!host->mrq);
+ if (!host->mrq)
+ return NULL;
+
+ WARN_ON(!host->mrq->cmd);
+ if (!host->mrq->cmd)
+ return NULL;
+
+ WARN_ON(!host->mrq->cmd->data);
+ if (!host->mrq->cmd->data)
+ return NULL;
+
+ return host->mrq->cmd->data;
+}
+
+static void wbsd_tasklet_card(unsigned long param)
+{
+ struct wbsd_host* host = (struct wbsd_host*)param;
+ u8 csr;
+
+ spin_lock(&host->lock);
+
+ csr = inb(host->base + WBSD_CSR);
+ WARN_ON(csr == 0xff);
+
+ if (csr & WBSD_CARDPRESENT)
+ DBG("Card inserted\n");
+ else
+ {
+ DBG("Card removed\n");
+
+ if (host->mrq)
+ {
+ printk(KERN_ERR DRIVER_NAME
+ ": Card removed during transfer!\n");
+ wbsd_reset(host);
+
+ host->mrq->cmd->error = MMC_ERR_FAILED;
+ tasklet_schedule(&host->finish_tasklet);
+ }
+ }
+
+ /*
+ * Unlock first since we might get a call back.
+ */
+ spin_unlock(&host->lock);
+
+ mmc_detect_change(host->mmc);
+}
+
+static void wbsd_tasklet_fifo(unsigned long param)
+{
+ struct wbsd_host* host = (struct wbsd_host*)param;
+ struct mmc_data* data;
+
+ spin_lock(&host->lock);
+
+ if (!host->mrq)
+ goto end;
+
+ data = wbsd_get_data(host);
+ if (!data)
+ goto end;
+
+ if (data->flags & MMC_DATA_WRITE)
+ wbsd_fill_fifo(host);
+ else
+ wbsd_empty_fifo(host);
+
+ /*
+ * Done?
+ */
+ if (host->size == data->bytes_xfered)
+ {
+ wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
+ tasklet_schedule(&host->finish_tasklet);
+ }
+
+end:
+ spin_unlock(&host->lock);
+}
+
+static void wbsd_tasklet_crc(unsigned long param)
+{
+ struct wbsd_host* host = (struct wbsd_host*)param;
+ struct mmc_data* data;
+
+ spin_lock(&host->lock);
+
+ if (!host->mrq)
+ goto end;
+
+ data = wbsd_get_data(host);
+ if (!data)
+ goto end;
+
+ DBGF("CRC error\n");
+
+ data->error = MMC_ERR_BADCRC;
+
+ tasklet_schedule(&host->finish_tasklet);
+
+end:
+ spin_unlock(&host->lock);
+}
+
+static void wbsd_tasklet_timeout(unsigned long param)
+{
+ struct wbsd_host* host = (struct wbsd_host*)param;
+ struct mmc_data* data;
+
+ spin_lock(&host->lock);
+
+ if (!host->mrq)
+ goto end;
+
+ data = wbsd_get_data(host);
+ if (!data)
+ goto end;
+
+ DBGF("Timeout\n");
+
+ data->error = MMC_ERR_TIMEOUT;
+
+ tasklet_schedule(&host->finish_tasklet);
+
+end:
+ spin_unlock(&host->lock);
+}
+
+static void wbsd_tasklet_finish(unsigned long param)
+{
+ struct wbsd_host* host = (struct wbsd_host*)param;
+ struct mmc_data* data;
+
+ spin_lock(&host->lock);
+
+ WARN_ON(!host->mrq);
+ if (!host->mrq)
+ goto end;
+
+ data = wbsd_get_data(host);
+ if (!data)
+ goto end;
+
+ wbsd_finish_data(host, data);
+
+end:
+ spin_unlock(&host->lock);
+}
+
+static void wbsd_tasklet_block(unsigned long param)
+{
+ struct wbsd_host* host = (struct wbsd_host*)param;
+ struct mmc_data* data;
+
+ spin_lock(&host->lock);
+
+ if ((wbsd_read_index(host, WBSD_IDX_CRCSTATUS) & WBSD_CRC_MASK) !=
+ WBSD_CRC_OK)
+ {
+ data = wbsd_get_data(host);
+ if (!data)
+ goto end;
+
+ DBGF("CRC error\n");
+
+ data->error = MMC_ERR_BADCRC;
+
+ tasklet_schedule(&host->finish_tasklet);
+ }
+
+end:
+ spin_unlock(&host->lock);
+}
+
+/*
+ * Interrupt handling
+ */
+
+static irqreturn_t wbsd_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct wbsd_host* host = dev_id;
+ int isr;
+
+ isr = inb(host->base + WBSD_ISR);
+
+ /*
+ * Was it actually our hardware that caused the interrupt?
+ */
+ if (isr == 0xff || isr == 0x00)
+ return IRQ_NONE;
+
+ host->isr |= isr;
+
+ /*
+ * Schedule tasklets as needed.
+ */
+ if (isr & WBSD_INT_CARD)
+ tasklet_schedule(&host->card_tasklet);
+ if (isr & WBSD_INT_FIFO_THRE)
+ tasklet_schedule(&host->fifo_tasklet);
+ if (isr & WBSD_INT_CRC)
+ tasklet_hi_schedule(&host->crc_tasklet);
+ if (isr & WBSD_INT_TIMEOUT)
+ tasklet_hi_schedule(&host->timeout_tasklet);
+ if (isr & WBSD_INT_BUSYEND)
+ tasklet_hi_schedule(&host->block_tasklet);
+ if (isr & WBSD_INT_TC)
+ tasklet_schedule(&host->finish_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Support functions for probe
+ */
+
+static int wbsd_scan(struct wbsd_host* host)
+{
+ int i, j, k;
+ int id;
+
+ /*
+ * Iterate through all ports, all codes to
+ * find hardware that is in our known list.
+ */
+ for (i = 0;i < sizeof(config_ports)/sizeof(int);i++)
+ {
+ if (!request_region(config_ports[i], 2, DRIVER_NAME))
+ continue;
+
+ for (j = 0;j < sizeof(unlock_codes)/sizeof(int);j++)
+ {
+ id = 0xFFFF;
+
+ outb(unlock_codes[j], config_ports[i]);
+ outb(unlock_codes[j], config_ports[i]);
+
+ outb(WBSD_CONF_ID_HI, config_ports[i]);
+ id = inb(config_ports[i] + 1) << 8;
+
+ outb(WBSD_CONF_ID_LO, config_ports[i]);
+ id |= inb(config_ports[i] + 1);
+
+ for (k = 0;k < sizeof(valid_ids)/sizeof(int);k++)
+ {
+ if (id == valid_ids[k])
+ {
+ host->chip_id = id;
+ host->config = config_ports[i];
+ host->unlock_code = unlock_codes[i];
+
+ return 0;
+ }
+ }
+
+ if (id != 0xFFFF)
+ {
+ DBG("Unknown hardware (id %x) found at %x\n",
+ id, config_ports[i]);
+ }
+
+ outb(LOCK_CODE, config_ports[i]);
+ }
+
+ release_region(config_ports[i], 2);
+ }
+
+ return -ENODEV;
+}
+
+static int wbsd_request_regions(struct wbsd_host* host)
+{
+ if (io & 0x7)
+ return -EINVAL;
+
+ if (!request_region(io, 8, DRIVER_NAME))
+ return -EIO;
+
+ host->base = io;
+
+ return 0;
+}
+
+static void wbsd_release_regions(struct wbsd_host* host)
+{
+ if (host->base)
+ release_region(host->base, 8);
+
+ if (host->config)
+ release_region(host->config, 2);
+}
+
+static void wbsd_init_dma(struct wbsd_host* host)
+{
+ host->dma = -1;
+
+ if (dma < 0)
+ return;
+
+ if (request_dma(dma, DRIVER_NAME))
+ goto err;
+
+ /*
+ * We need to allocate a special buffer in
+ * order for ISA to be able to DMA to it.
+ */
+ host->dma_buffer = kmalloc(65536,
+ GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
+ if (!host->dma_buffer)
+ goto free;
+
+ /*
+ * Translate the address to a physical address.
+ */
+ host->dma_addr = isa_virt_to_bus(host->dma_buffer);
+
+ /*
+ * ISA DMA must be aligned on a 64k basis.
+ */
+ if ((host->dma_addr & 0xffff) != 0)
+ goto kfree;
+ /*
+ * ISA cannot access memory above 16 MB.
+ */
+ else if (host->dma_addr >= 0x1000000)
+ goto kfree;
+
+ host->dma = dma;
+
+ return;
+
+kfree:
+ /*
+ * If we've gotten here then there is some kind of alignment bug
+ */
+ BUG_ON(1);
+
+ kfree(host->dma_buffer);
+ host->dma_buffer = NULL;
+
+free:
+ free_dma(dma);
+
+err:
+ printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
+ "Falling back on FIFO.\n", dma);
+}
+
+static struct mmc_host_ops wbsd_ops = {
+ .request = wbsd_request,
+ .set_ios = wbsd_set_ios,
+};
+
+/*
+ * Device probe
+ */
+
+static int wbsd_probe(struct device* dev)
+{
+ struct wbsd_host* host = NULL;
+ struct mmc_host* mmc = NULL;
+ int ret;
+
+ /*
+ * Allocate MMC structure.
+ */
+ mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ /*
+ * Scan for hardware.
+ */
+ ret = wbsd_scan(host);
+ if (ret)
+ goto freemmc;
+
+ /*
+ * Reset the chip.
+ */
+ wbsd_write_config(host, WBSD_CONF_SWRST, 1);
+ wbsd_write_config(host, WBSD_CONF_SWRST, 0);
+
+ /*
+ * Allocate I/O ports.
+ */
+ ret = wbsd_request_regions(host);
+ if (ret)
+ goto release;
+
+ /*
+ * Set host parameters.
+ */
+ mmc->ops = &wbsd_ops;
+ mmc->f_min = 375000;
+ mmc->f_max = 24000000;
+ mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
+
+ spin_lock_init(&host->lock);
+
+ /*
+ * Select SD/MMC function.
+ */
+ wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
+
+ /*
+ * Set up card detection.
+ */
+ wbsd_write_config(host, WBSD_CONF_PINS, 0x02);
+
+ /*
+ * Configure I/O port.
+ */
+ wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
+ wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
+
+ /*
+ * Allocate interrupt.
+ */
+ ret = request_irq(irq, wbsd_irq, SA_SHIRQ, DRIVER_NAME, host);
+ if (ret)
+ goto release;
+
+ host->irq = irq;
+
+ /*
+ * Set up tasklets.
+ */
+ tasklet_init(&host->card_tasklet, wbsd_tasklet_card, (unsigned long)host);
+ tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo, (unsigned long)host);
+ tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc, (unsigned long)host);
+ tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout, (unsigned long)host);
+ tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish, (unsigned long)host);
+ tasklet_init(&host->block_tasklet, wbsd_tasklet_block, (unsigned long)host);
+
+ /*
+ * Configure interrupt.
+ */
+ wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
+
+ /*
+ * Allocate DMA.
+ */
+ wbsd_init_dma(host);
+
+ /*
+ * If all went well, then configure DMA.
+ */
+ if (host->dma >= 0)
+ wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
+
+ /*
+ * Maximum number of segments. Worst case is one sector per segment
+ * so this will be 64kB/512.
+ */
+ mmc->max_hw_segs = 128;
+ mmc->max_phys_segs = 128;
+
+ /*
+ * Maximum number of sectors in one transfer. Also limited by 64kB
+ * buffer.
+ */
+ mmc->max_sectors = 128;
+
+ /*
+ * Maximum segment size. Could be one segment with the maximum number
+ * of segments.
+ */
+ mmc->max_seg_size = mmc->max_sectors * 512;
+
+ /*
+ * Enable chip.
+ */
+ wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
+
+ /*
+ * Power up chip.
+ */
+ wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
+
+ /*
+ * Power Management stuff. No idea how this works.
+ * Not tested.
+ */
+#ifdef CONFIG_PM
+ wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
+#endif
+
+ /*
+ * Reset the chip into a known state.
+ */
+ wbsd_init_device(host);
+
+ dev_set_drvdata(dev, mmc);
+
+ /*
+ * Add host to MMC layer.
+ */
+ mmc_add_host(mmc);
+
+ printk(KERN_INFO "%s: W83L51xD id %x at 0x%x irq %d dma %d\n",
+ mmc->host_name, (int)host->chip_id, (int)host->base,
+ (int)host->irq, (int)host->dma);
+
+ return 0;
+
+release:
+ wbsd_release_regions(host);
+
+freemmc:
+ mmc_free_host(mmc);
+
+ return ret;
+}
+
+/*
+ * Device remove
+ */
+
+static int wbsd_remove(struct device* dev)
+{
+ struct mmc_host* mmc = dev_get_drvdata(dev);
+ struct wbsd_host* host;
+
+ if (!mmc)
+ return 0;
+
+ host = mmc_priv(mmc);
+
+ /*
+ * Unregister host with MMC layer.
+ */
+ mmc_remove_host(mmc);
+
+ /*
+ * Power down the SD/MMC function.
+ */
+ wbsd_unlock_config(host);
+ wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
+ wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
+ wbsd_lock_config(host);
+
+ /*
+ * Free resources.
+ */
+ if (host->dma_buffer)
+ kfree(host->dma_buffer);
+
+ if (host->dma >= 0)
+ free_dma(host->dma);
+
+ free_irq(host->irq, host);
+
+ tasklet_kill(&host->card_tasklet);
+ tasklet_kill(&host->fifo_tasklet);
+ tasklet_kill(&host->crc_tasklet);
+ tasklet_kill(&host->timeout_tasklet);
+ tasklet_kill(&host->finish_tasklet);
+ tasklet_kill(&host->block_tasklet);
+
+ wbsd_release_regions(host);
+
+ mmc_free_host(mmc);
+
+ return 0;
+}
+
+/*
+ * Power management
+ */
+
+#ifdef CONFIG_PM
+static int wbsd_suspend(struct device *dev, u32 state, u32 level)
+{
+ DBGF("Not yet supported\n");
+
+ return 0;
+}
+
+static int wbsd_resume(struct device *dev, u32 level)
+{
+ DBGF("Not yet supported\n");
+
+ return 0;
+}
+#else
+#define wbsd_suspend NULL
+#define wbsd_resume NULL
+#endif
+
+static void wbsd_release(struct device *dev)
+{
+}
+
+static struct platform_device wbsd_device = {
+ .name = DRIVER_NAME,
+ .id = -1,
+ .dev = {
+ .release = wbsd_release,
+ },
+};
+
+static struct device_driver wbsd_driver = {
+ .name = DRIVER_NAME,
+ .bus = &platform_bus_type,
+ .probe = wbsd_probe,
+ .remove = wbsd_remove,
+
+ .suspend = wbsd_suspend,
+ .resume = wbsd_resume,
+};
+
+/*
+ * Module loading/unloading
+ */
+
+static int __init wbsd_drv_init(void)
+{
+ int result;
+
+ printk(KERN_INFO DRIVER_NAME
+ ": Winbond W83L51xD SD/MMC card interface driver, "
+ DRIVER_VERSION "\n");
+ printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
+
+ result = driver_register(&wbsd_driver);
+ if (result < 0)
+ return result;
+
+ result = platform_device_register(&wbsd_device);
+ if (result < 0)
+ return result;
+
+ return 0;
+}
+
+static void __exit wbsd_drv_exit(void)
+{
+ platform_device_unregister(&wbsd_device);
+
+ driver_unregister(&wbsd_driver);
+
+ DBG("unloaded\n");
+}
+
+module_init(wbsd_drv_init);
+module_exit(wbsd_drv_exit);
+module_param(io, uint, 0444);
+module_param(irq, uint, 0444);
+module_param(dma, int, 0444);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
+MODULE_VERSION(DRIVER_VERSION);
+
+MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
+MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
+MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");
diff --git a/drivers/mmc/wbsd.h b/drivers/mmc/wbsd.h
new file mode 100644
index 00000000000..fdc03b56a81
--- /dev/null
+++ b/drivers/mmc/wbsd.h
@@ -0,0 +1,178 @@
+/*
+ * linux/drivers/mmc/wbsd.h - Winbond W83L51xD SD/MMC driver
+ *
+ * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+const int config_ports[] = { 0x2E, 0x4E };
+const int unlock_codes[] = { 0x83, 0x87 };
+
+const int valid_ids[] = {
+ 0x7112,
+ };
+
+#define LOCK_CODE 0xAA
+
+#define WBSD_CONF_SWRST 0x02
+#define WBSD_CONF_DEVICE 0x07
+#define WBSD_CONF_ID_HI 0x20
+#define WBSD_CONF_ID_LO 0x21
+#define WBSD_CONF_POWER 0x22
+#define WBSD_CONF_PME 0x23
+#define WBSD_CONF_PMES 0x24
+
+#define WBSD_CONF_ENABLE 0x30
+#define WBSD_CONF_PORT_HI 0x60
+#define WBSD_CONF_PORT_LO 0x61
+#define WBSD_CONF_IRQ 0x70
+#define WBSD_CONF_DRQ 0x74
+
+#define WBSD_CONF_PINS 0xF0
+
+#define DEVICE_SD 0x03
+
+#define WBSD_CMDR 0x00
+#define WBSD_DFR 0x01
+#define WBSD_EIR 0x02
+#define WBSD_ISR 0x03
+#define WBSD_FSR 0x04
+#define WBSD_IDXR 0x05
+#define WBSD_DATAR 0x06
+#define WBSD_CSR 0x07
+
+#define WBSD_EINT_CARD 0x40
+#define WBSD_EINT_FIFO_THRE 0x20
+#define WBSD_EINT_CCRC 0x10
+#define WBSD_EINT_TIMEOUT 0x08
+#define WBSD_EINT_PROGEND 0x04
+#define WBSD_EINT_CRC 0x02
+#define WBSD_EINT_TC 0x01
+
+#define WBSD_INT_PENDING 0x80
+#define WBSD_INT_CARD 0x40
+#define WBSD_INT_FIFO_THRE 0x20
+#define WBSD_INT_CRC 0x10
+#define WBSD_INT_TIMEOUT 0x08
+#define WBSD_INT_PROGEND 0x04
+#define WBSD_INT_BUSYEND 0x02
+#define WBSD_INT_TC 0x01
+
+#define WBSD_FIFO_EMPTY 0x80
+#define WBSD_FIFO_FULL 0x40
+#define WBSD_FIFO_EMTHRE 0x20
+#define WBSD_FIFO_FUTHRE 0x10
+#define WBSD_FIFO_SZMASK 0x0F
+
+#define WBSD_MSLED 0x20
+#define WBSD_POWER_N 0x10
+#define WBSD_WRPT 0x04
+#define WBSD_CARDPRESENT 0x01
+
+#define WBSD_IDX_CLK 0x01
+#define WBSD_IDX_PBSMSB 0x02
+#define WBSD_IDX_TAAC 0x03
+#define WBSD_IDX_NSAC 0x04
+#define WBSD_IDX_PBSLSB 0x05
+#define WBSD_IDX_SETUP 0x06
+#define WBSD_IDX_DMA 0x07
+#define WBSD_IDX_FIFOEN 0x08
+#define WBSD_IDX_STATUS 0x10
+#define WBSD_IDX_RSPLEN 0x1E
+#define WBSD_IDX_RESP0 0x1F
+#define WBSD_IDX_RESP1 0x20
+#define WBSD_IDX_RESP2 0x21
+#define WBSD_IDX_RESP3 0x22
+#define WBSD_IDX_RESP4 0x23
+#define WBSD_IDX_RESP5 0x24
+#define WBSD_IDX_RESP6 0x25
+#define WBSD_IDX_RESP7 0x26
+#define WBSD_IDX_RESP8 0x27
+#define WBSD_IDX_RESP9 0x28
+#define WBSD_IDX_RESP10 0x29
+#define WBSD_IDX_RESP11 0x2A
+#define WBSD_IDX_RESP12 0x2B
+#define WBSD_IDX_RESP13 0x2C
+#define WBSD_IDX_RESP14 0x2D
+#define WBSD_IDX_RESP15 0x2E
+#define WBSD_IDX_RESP16 0x2F
+#define WBSD_IDX_CRCSTATUS 0x30
+#define WBSD_IDX_ISR 0x3F
+
+#define WBSD_CLK_375K 0x00
+#define WBSD_CLK_12M 0x01
+#define WBSD_CLK_16M 0x02
+#define WBSD_CLK_24M 0x03
+
+#define WBSD_DAT3_H 0x08
+#define WBSD_FIFO_RESET 0x04
+#define WBSD_SOFT_RESET 0x02
+#define WBSD_INC_INDEX 0x01
+
+#define WBSD_DMA_SINGLE 0x02
+#define WBSD_DMA_ENABLE 0x01
+
+#define WBSD_FIFOEN_EMPTY 0x20
+#define WBSD_FIFOEN_FULL 0x10
+#define WBSD_FIFO_THREMASK 0x0F
+
+#define WBSD_BLOCK_READ 0x80
+#define WBSD_BLOCK_WRITE 0x40
+#define WBSD_BUSY 0x20
+#define WBSD_CARDTRAFFIC 0x04
+#define WBSD_SENDCMD 0x02
+#define WBSD_RECVRES 0x01
+
+#define WBSD_RSP_SHORT 0x00
+#define WBSD_RSP_LONG 0x01
+
+#define WBSD_CRC_MASK 0x1F
+#define WBSD_CRC_OK 0x05 /* S010E (00101) */
+#define WBSD_CRC_FAIL 0x0B /* S101E (01011) */
+
+
+struct wbsd_host
+{
+ struct mmc_host* mmc; /* MMC structure */
+
+ spinlock_t lock; /* Mutex */
+
+ struct mmc_request* mrq; /* Current request */
+
+ u8 isr; /* Accumulated ISR */
+
+ struct scatterlist* cur_sg; /* Current SG entry */
+ unsigned int num_sg; /* Number of entries left */
+ void* mapped_sg; /* vaddr of mapped sg */
+
+ unsigned int offset; /* Offset into current entry */
+ unsigned int remain; /* Data left in curren entry */
+
+ int size; /* Total size of transfer */
+
+ char* dma_buffer; /* ISA DMA buffer */
+ dma_addr_t dma_addr; /* Physical address for same */
+
+ int firsterr; /* See fifo functions */
+
+ u8 clk; /* Current clock speed */
+
+ int config; /* Config port */
+ u8 unlock_code; /* Code to unlock config */
+
+ int chip_id; /* ID of controller */
+
+ int base; /* I/O port base */
+ int irq; /* Interrupt */
+ int dma; /* DMA channel */
+
+ struct tasklet_struct card_tasklet; /* Tasklet structures */
+ struct tasklet_struct fifo_tasklet;
+ struct tasklet_struct crc_tasklet;
+ struct tasklet_struct timeout_tasklet;
+ struct tasklet_struct finish_tasklet;
+ struct tasklet_struct block_tasklet;
+};